summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/ceph-volume/ceph_volume/activate/main.py31
-rw-r--r--src/ceph-volume/ceph_volume/devices/lvm/activate.py226
-rw-r--r--src/ceph-volume/ceph_volume/devices/lvm/batch.py50
-rw-r--r--src/ceph-volume/ceph_volume/devices/lvm/common.py9
-rw-r--r--src/ceph-volume/ceph_volume/devices/lvm/create.py38
-rw-r--r--src/ceph-volume/ceph_volume/devices/lvm/prepare.py292
-rw-r--r--src/ceph-volume/ceph_volume/devices/raw/activate.py124
-rw-r--r--src/ceph-volume/ceph_volume/devices/raw/common.py11
-rw-r--r--src/ceph-volume/ceph_volume/devices/raw/prepare.py120
-rw-r--r--src/ceph-volume/ceph_volume/objectstore/__init__.py11
-rw-r--r--src/ceph-volume/ceph_volume/objectstore/baseobjectstore.py154
-rw-r--r--src/ceph-volume/ceph_volume/objectstore/bluestore.py61
-rw-r--r--src/ceph-volume/ceph_volume/objectstore/lvmbluestore.py491
-rw-r--r--src/ceph-volume/ceph_volume/objectstore/rawbluestore.py181
-rw-r--r--src/ceph-volume/ceph_volume/tests/api/test_lvm.py18
-rw-r--r--src/ceph-volume/ceph_volume/tests/conftest.py201
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/lvm/test_activate.py196
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/lvm/test_batch.py93
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/lvm/test_migrate.py51
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/lvm/test_prepare.py83
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/lvm/test_zap.py12
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/raw/test_list.py66
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/raw/test_prepare.py64
-rw-r--r--src/ceph-volume/ceph_volume/tests/devices/simple/test_activate.py6
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type-dmcrypt-explicit/Vagrantfile (renamed from src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt-explicit/Vagrantfile)0
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type-dmcrypt-explicit/group_vars/all (renamed from src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt-explicit/group_vars/all)0
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type-dmcrypt-explicit/hosts (renamed from src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt-explicit/hosts)0
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type-dmcrypt-explicit/setup.yml (renamed from src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt-explicit/setup.yml)0
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type-dmcrypt-explicit/test.yml (renamed from src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt-explicit/test.yml)0
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type-dmcrypt-explicit/test_zap.yml (renamed from src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt-explicit/test_zap.yml)0
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type-dmcrypt-explicit/vagrant_variables.yml (renamed from src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt-explicit/vagrant_variables.yml)0
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type-dmcrypt/Vagrantfile (renamed from src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt/Vagrantfile)0
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type-dmcrypt/group_vars/all (renamed from src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt/group_vars/all)0
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type-dmcrypt/hosts (renamed from src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt/hosts)0
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type-dmcrypt/setup.yml (renamed from src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt/setup.yml)0
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type-dmcrypt/test.yml (renamed from src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt/test.yml)0
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type-dmcrypt/test_zap.yml (renamed from src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt/test_zap.yml)0
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type-dmcrypt/vagrant_variables.yml (renamed from src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt/vagrant_variables.yml)0
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type-explicit/Vagrantfile (renamed from src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-explicit/Vagrantfile)0
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type-explicit/group_vars/all (renamed from src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-explicit/group_vars/all)0
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type-explicit/hosts (renamed from src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-explicit/hosts)0
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type-explicit/setup.yml (renamed from src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-explicit/setup.yml)0
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type-explicit/test.yml (renamed from src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-explicit/test.yml)0
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type-explicit/test_zap.yml (renamed from src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-explicit/test_zap.yml)0
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type-explicit/vagrant_variables.yml (renamed from src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-explicit/vagrant_variables.yml)0
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type/Vagrantfile (renamed from src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type/Vagrantfile)0
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type/group_vars/all (renamed from src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type/group_vars/all)0
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type/hosts (renamed from src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type/hosts)0
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type/setup.yml (renamed from src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type/setup.yml)0
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type/test.yml (renamed from src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type/test.yml)0
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type/test_zap.yml (renamed from src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type/test_zap.yml)0
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type/vagrant_variables.yml (renamed from src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type/vagrant_variables.yml)0
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/single-type-dmcrypt/Vagrantfile (renamed from src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type-dmcrypt/Vagrantfile)0
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/single-type-dmcrypt/group_vars/all (renamed from src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type-dmcrypt/group_vars/all)0
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/single-type-dmcrypt/hosts (renamed from src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type-dmcrypt/hosts)0
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/single-type-dmcrypt/setup.yml (renamed from src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type-dmcrypt/setup.yml)0
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/single-type-dmcrypt/test.yml (renamed from src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type-dmcrypt/test.yml)0
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/single-type-dmcrypt/test_zap.yml (renamed from src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type-dmcrypt/test_zap.yml)0
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/single-type-dmcrypt/vagrant_variables.yml (renamed from src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type-dmcrypt/vagrant_variables.yml)0
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/single-type/Vagrantfile (renamed from src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type/Vagrantfile)0
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/single-type/group_vars/all (renamed from src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type/group_vars/all)0
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/single-type/hosts (renamed from src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type/hosts)0
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/single-type/setup.yml (renamed from src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type/setup.yml)0
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/single-type/test.yml (renamed from src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type/test.yml)0
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/single-type/test_zap.yml (renamed from src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type/test_zap.yml)0
l---------src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/single-type/vagrant_variables.yml (renamed from src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type/vagrant_variables.yml)0
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test.yml12
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_explicit.yml12
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_zap.yml6
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/batch/tox.ini20
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/group_vars/bluestore2
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/group_vars/bluestore_lvm2
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/group_vars/bluestore_lvm_dmcrypt2
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/group_vars/bluestore_single2
l---------src/ceph-volume/ceph_volume/tests/functional/lvm/centos/bluestore/create/Vagrantfile (renamed from src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/create/Vagrantfile)0
l---------src/ceph-volume/ceph_volume/tests/functional/lvm/centos/bluestore/create/group_vars/all (renamed from src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/create/group_vars/all)0
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/lvm/centos/bluestore/create/hosts (renamed from src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/create/hosts)0
l---------src/ceph-volume/ceph_volume/tests/functional/lvm/centos/bluestore/create/setup.yml (renamed from src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/create/setup.yml)0
l---------src/ceph-volume/ceph_volume/tests/functional/lvm/centos/bluestore/create/test.yml (renamed from src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/create/test.yml)0
l---------src/ceph-volume/ceph_volume/tests/functional/lvm/centos/bluestore/create/vagrant_variables.yml (renamed from src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/create/vagrant_variables.yml)0
l---------src/ceph-volume/ceph_volume/tests/functional/lvm/centos/bluestore/dmcrypt/Vagrantfile (renamed from src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/Vagrantfile)0
l---------src/ceph-volume/ceph_volume/tests/functional/lvm/centos/bluestore/dmcrypt/group_vars/all (renamed from src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/group_vars/all)0
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/lvm/centos/bluestore/dmcrypt/hosts (renamed from src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/hosts)0
l---------src/ceph-volume/ceph_volume/tests/functional/lvm/centos/bluestore/dmcrypt/setup.yml (renamed from src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/setup.yml)0
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/lvm/centos/bluestore/dmcrypt/test.yml (renamed from src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/test.yml)22
l---------src/ceph-volume/ceph_volume/tests/functional/lvm/centos/bluestore/dmcrypt/vagrant_variables.yml (renamed from src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/vagrant_variables.yml)0
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_bluestore.yml18
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/lvm/tox.ini19
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/playbooks/deploy.yml32
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/scripts/vagrant_up.sh9
l---------src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/Vagrantfile1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/group_vars/all19
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/host_vars/osd0.yml7
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/host_vars/osd1.yml6
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/hosts9
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/test.yml31
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/vagrant_variables.yml73
l---------src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/Vagrantfile1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/group_vars/all22
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/host_vars/osd0.yml7
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/host_vars/osd1.yml6
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/hosts9
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/test.yml15
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/vagrant_variables.yml73
l---------src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/Vagrantfile1
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/group_vars/all22
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/host_vars/osd0.yml7
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/host_vars/osd1.yml6
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/hosts9
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/test.yml31
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/vagrant_variables.yml73
-rw-r--r--src/ceph-volume/ceph_volume/tests/functional/simple/tox.ini56
-rw-r--r--src/ceph-volume/ceph_volume/tests/objectstore/test_baseobjectstore.py162
-rw-r--r--src/ceph-volume/ceph_volume/tests/objectstore/test_bluestore.py27
-rw-r--r--src/ceph-volume/ceph_volume/tests/objectstore/test_lvmbluestore.py571
-rw-r--r--src/ceph-volume/ceph_volume/tests/objectstore/test_rawbluestore.py159
-rw-r--r--src/ceph-volume/ceph_volume/tests/systemd/test_main.py6
-rw-r--r--src/ceph-volume/ceph_volume/tests/test_main.py6
-rw-r--r--src/ceph-volume/ceph_volume/tests/test_terminal.py10
-rw-r--r--src/ceph-volume/ceph_volume/tests/util/test_arg_validators.py18
-rw-r--r--src/ceph-volume/ceph_volume/tests/util/test_disk.py47
-rw-r--r--src/ceph-volume/ceph_volume/tests/util/test_encryption.py6
-rw-r--r--src/ceph-volume/ceph_volume/tests/util/test_prepare.py53
-rw-r--r--src/ceph-volume/ceph_volume/util/arg_validators.py2
-rw-r--r--src/ceph-volume/ceph_volume/util/prepare.py81
-rw-r--r--src/ceph-volume/tox.ini2
-rw-r--r--src/cephadm/tests/test_util_funcs.py2
-rw-r--r--src/cephadm/tox.ini2
-rw-r--r--src/cls/user/cls_user.cc238
-rw-r--r--src/cls/user/cls_user_client.cc121
-rw-r--r--src/cls/user/cls_user_client.h27
-rw-r--r--src/cls/user/cls_user_ops.cc86
-rw-r--r--src/cls/user/cls_user_ops.h132
-rw-r--r--src/cls/user/cls_user_types.cc32
-rw-r--r--src/cls/user/cls_user_types.h48
-rw-r--r--src/common/dns_resolve.cc1
-rw-r--r--src/common/options/crimson.yaml.in5
-rw-r--r--src/common/options/mon.yaml.in18
-rw-r--r--src/common/random_string.cc16
-rw-r--r--src/common/random_string.h2
-rw-r--r--src/crimson/common/operation.h4
-rw-r--r--src/crimson/common/utility.h15
-rw-r--r--src/crimson/osd/osd.cc21
-rw-r--r--src/crimson/osd/osd.h3
-rw-r--r--src/crimson/osd/shard_services.h7
-rw-r--r--src/include/rbd/librbd.hpp11
-rw-r--r--src/librbd/librbd.cc12
-rw-r--r--src/mds/MDCache.cc3
-rw-r--r--src/mds/MDCache.h2
-rw-r--r--src/mds/MDSAuthCaps.cc64
-rw-r--r--src/mds/MDSAuthCaps.h3
-rw-r--r--src/mds/ScrubStack.cc1
-rw-r--r--src/mon/AuthMonitor.cc27
-rw-r--r--src/mon/LogMonitor.cc37
-rw-r--r--src/mon/LogMonitor.h18
-rw-r--r--src/os/bluestore/bluestore_types.h2
-rw-r--r--src/osd/scrubber/osd_scrub.cc4
-rw-r--r--src/osd/scrubber/pg_scrubber.cc2
-rw-r--r--src/osd/scrubber/scrub_machine.cc8
-rw-r--r--src/osd/scrubber/scrub_reservations.cc3
-rw-r--r--src/pybind/mgr/cephadm/templates/services/prometheus/prometheus.yml.j27
-rw-r--r--src/pybind/mgr/cephadm/tests/test_services.py7
-rw-r--r--src/pybind/mgr/dashboard/controllers/multi_cluster.py54
-rw-r--r--src/pybind/mgr/dashboard/controllers/rgw.py9
-rw-r--r--src/pybind/mgr/dashboard/frontend/cypress/e2e/pools/pools.po.ts2
-rw-r--r--src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-configuration-form/rbd-configuration-form.component.ts2
-rw-r--r--src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-form/rbd-form.component.html47
-rw-r--r--src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-snapshotschedule-form/cephfs-snapshotschedule-form.component.spec.ts23
-rw-r--r--src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-snapshotschedule-form/cephfs-snapshotschedule-form.component.ts162
-rw-r--r--src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-snapshotschedule-list/cephfs-snapshotschedule-list.component.ts3
-rw-r--r--src/pybind/mgr/dashboard/frontend/src/app/shared/api/cephfs-snapshot-schedule.service.ts11
-rw-r--r--src/pybind/mgr/dashboard/frontend/src/app/shared/components/grafana/grafana.component.ts2
-rw-r--r--src/pybind/mgr/dashboard/frontend/src/app/shared/enum/retention-frequency.enum.ts1
-rw-r--r--src/pybind/mgr/dashboard/openapi.yaml22
-rw-r--r--src/pybind/mgr/tox.ini3
-rw-r--r--src/rgw/CMakeLists.txt24
-rw-r--r--src/rgw/driver/d4n/rgw_sal_d4n.cc13
-rw-r--r--src/rgw/driver/d4n/rgw_sal_d4n.h6
-rw-r--r--src/rgw/driver/daos/rgw_sal_daos.cc81
-rw-r--r--src/rgw/driver/daos/rgw_sal_daos.h72
-rw-r--r--src/rgw/driver/dbstore/common/dbstore.cc33
-rw-r--r--src/rgw/driver/dbstore/common/dbstore.h25
-rw-r--r--src/rgw/driver/dbstore/sqlite/sqliteDB.cc15
-rw-r--r--src/rgw/driver/dbstore/tests/dbstore_tests.cc16
-rw-r--r--src/rgw/driver/motr/rgw_sal_motr.cc88
-rw-r--r--src/rgw/driver/motr/rgw_sal_motr.h89
-rw-r--r--src/rgw/driver/posix/rgw_sal_posix.cc32
-rw-r--r--src/rgw/driver/posix/rgw_sal_posix.h38
-rw-r--r--src/rgw/driver/rados/account.cc674
-rw-r--r--src/rgw/driver/rados/account.h130
-rw-r--r--src/rgw/driver/rados/buckets.cc275
-rw-r--r--src/rgw/driver/rados/buckets.h96
-rw-r--r--src/rgw/driver/rados/cls_fifo_legacy.h2
-rw-r--r--src/rgw/driver/rados/group.cc522
-rw-r--r--src/rgw/driver/rados/group.h90
-rw-r--r--src/rgw/driver/rados/groups.cc135
-rw-r--r--src/rgw/driver/rados/groups.h79
-rw-r--r--src/rgw/driver/rados/rgw_bucket.cc302
-rw-r--r--src/rgw/driver/rados/rgw_bucket.h40
-rw-r--r--src/rgw/driver/rados/rgw_bucket_sync.cc9
-rw-r--r--src/rgw/driver/rados/rgw_cr_rados.cc2
-rw-r--r--src/rgw/driver/rados/rgw_data_sync.cc33
-rw-r--r--src/rgw/driver/rados/rgw_notify.cc115
-rw-r--r--src/rgw/driver/rados/rgw_notify.h14
-rw-r--r--src/rgw/driver/rados/rgw_putobj_processor.cc3
-rw-r--r--src/rgw/driver/rados/rgw_putobj_processor.h12
-rw-r--r--src/rgw/driver/rados/rgw_rados.cc138
-rw-r--r--src/rgw/driver/rados/rgw_rados.h21
-rw-r--r--src/rgw/driver/rados/rgw_reshard.cc2
-rw-r--r--src/rgw/driver/rados/rgw_rest_user.cc24
-rw-r--r--src/rgw/driver/rados/rgw_sal_rados.cc1010
-rw-r--r--src/rgw/driver/rados/rgw_sal_rados.h238
-rw-r--r--src/rgw/driver/rados/rgw_service.cc37
-rw-r--r--src/rgw/driver/rados/rgw_service.h8
-rw-r--r--src/rgw/driver/rados/rgw_sync_module.cc4
-rw-r--r--src/rgw/driver/rados/rgw_sync_module.h3
-rw-r--r--src/rgw/driver/rados/rgw_sync_module_aws.cc6
-rw-r--r--src/rgw/driver/rados/rgw_sync_module_es.cc6
-rw-r--r--src/rgw/driver/rados/rgw_user.cc342
-rw-r--r--src/rgw/driver/rados/rgw_user.h64
-rw-r--r--src/rgw/driver/rados/rgw_zone.h8
-rw-r--r--src/rgw/driver/rados/roles.cc174
-rw-r--r--src/rgw/driver/rados/roles.h86
-rw-r--r--src/rgw/driver/rados/topic.cc93
-rw-r--r--src/rgw/driver/rados/topic.h9
-rw-r--r--src/rgw/driver/rados/topics.cc109
-rw-r--r--src/rgw/driver/rados/topics.h57
-rw-r--r--src/rgw/driver/rados/users.cc174
-rw-r--r--src/rgw/driver/rados/users.h87
-rw-r--r--src/rgw/rgw_account.cc529
-rw-r--r--src/rgw/rgw_account.h90
-rw-r--r--src/rgw/rgw_acl.cc27
-rw-r--r--src/rgw/rgw_acl.h27
-rw-r--r--src/rgw/rgw_acl_s3.cc99
-rw-r--r--src/rgw/rgw_acl_s3.h1
-rw-r--r--src/rgw/rgw_acl_swift.cc14
-rw-r--r--src/rgw/rgw_acl_swift.h7
-rw-r--r--src/rgw/rgw_acl_types.h9
-rw-r--r--src/rgw/rgw_admin.cc681
-rw-r--r--src/rgw/rgw_appmain.cc2
-rw-r--r--src/rgw/rgw_auth.cc645
-rw-r--r--src/rgw/rgw_auth.h162
-rw-r--r--src/rgw/rgw_auth_filters.h63
-rw-r--r--src/rgw/rgw_auth_s3.cc93
-rw-r--r--src/rgw/rgw_auth_s3.h20
-rw-r--r--src/rgw/rgw_basic_types.cc57
-rw-r--r--src/rgw/rgw_basic_types.h12
-rw-r--r--src/rgw/rgw_common.cc592
-rw-r--r--src/rgw/rgw_common.h190
-rw-r--r--src/rgw/rgw_crypt.cc9
-rw-r--r--src/rgw/rgw_data_access.cc2
-rw-r--r--src/rgw/rgw_file.cc2
-rw-r--r--src/rgw/rgw_iam_managed_policy.cc191
-rw-r--r--src/rgw/rgw_iam_managed_policy.h39
-rw-r--r--src/rgw/rgw_iam_policy.cc253
-rw-r--r--src/rgw/rgw_iam_policy.h285
-rw-r--r--src/rgw/rgw_lc.cc2
-rw-r--r--src/rgw/rgw_lib.cc21
-rw-r--r--src/rgw/rgw_lib.h1
-rw-r--r--src/rgw/rgw_log.cc35
-rw-r--r--src/rgw/rgw_log.h43
-rw-r--r--src/rgw/rgw_lua_request.cc22
-rw-r--r--src/rgw/rgw_main.cc2
-rw-r--r--src/rgw/rgw_oidc_provider.cc193
-rw-r--r--src/rgw/rgw_oidc_provider.h85
-rw-r--r--src/rgw/rgw_op.cc1521
-rw-r--r--src/rgw/rgw_op.h9
-rw-r--r--src/rgw/rgw_op_type.h65
-rw-r--r--src/rgw/rgw_polparser.cc12
-rw-r--r--src/rgw/rgw_process.cc8
-rw-r--r--src/rgw/rgw_pubsub.cc106
-rw-r--r--src/rgw/rgw_pubsub.h76
-rw-r--r--src/rgw/rgw_quota.cc339
-rw-r--r--src/rgw/rgw_quota.h6
-rw-r--r--src/rgw/rgw_rest.cc35
-rw-r--r--src/rgw/rgw_rest.h4
-rw-r--r--src/rgw/rgw_rest_account.cc241
-rw-r--r--src/rgw/rgw_rest_account.h46
-rw-r--r--src/rgw/rgw_rest_client.cc2
-rw-r--r--src/rgw/rgw_rest_conn.cc11
-rw-r--r--src/rgw/rgw_rest_conn.h20
-rw-r--r--src/rgw/rgw_rest_iam.cc234
-rw-r--r--src/rgw/rgw_rest_iam.h84
-rw-r--r--src/rgw/rgw_rest_iam_group.cc2122
-rw-r--r--src/rgw/rgw_rest_iam_group.h40
-rw-r--r--src/rgw/rgw_rest_iam_user.cc1468
-rw-r--r--src/rgw/rgw_rest_iam_user.h33
-rw-r--r--src/rgw/rgw_rest_metadata.cc3
-rw-r--r--src/rgw/rgw_rest_oidc_provider.cc314
-rw-r--r--src/rgw/rgw_rest_oidc_provider.h65
-rw-r--r--src/rgw/rgw_rest_pubsub.cc1011
-rw-r--r--src/rgw/rgw_rest_role.cc1037
-rw-r--r--src/rgw/rgw_rest_role.h195
-rw-r--r--src/rgw/rgw_rest_s3.cc119
-rw-r--r--src/rgw/rgw_rest_sts.cc78
-rw-r--r--src/rgw/rgw_rest_sts.h12
-rw-r--r--src/rgw/rgw_rest_swift.cc55
-rw-r--r--src/rgw/rgw_rest_swift.h4
-rw-r--r--src/rgw/rgw_rest_user_policy.cc789
-rw-r--r--src/rgw/rgw_rest_user_policy.h69
-rw-r--r--src/rgw/rgw_role.cc39
-rw-r--r--src/rgw/rgw_role.h26
-rw-r--r--src/rgw/rgw_sal.h304
-rw-r--r--src/rgw/rgw_sal_dbstore.cc339
-rw-r--r--src/rgw/rgw_sal_dbstore.h225
-rw-r--r--src/rgw/rgw_sal_filter.cc342
-rw-r--r--src/rgw/rgw_sal_filter.h217
-rw-r--r--src/rgw/rgw_sal_fwd.h7
-rw-r--r--src/rgw/rgw_sal_store.h2
-rw-r--r--src/rgw/rgw_signal.cc4
-rw-r--r--src/rgw/rgw_signal.h1
-rw-r--r--src/rgw/rgw_sts.cc11
-rw-r--r--src/rgw/rgw_swift_auth.cc37
-rw-r--r--src/rgw/rgw_swift_auth.h20
-rw-r--r--src/rgw/rgw_user.cc22
-rw-r--r--src/rgw/rgw_user_types.h29
-rw-r--r--src/rgw/rgw_xml.cc14
-rw-r--r--src/rgw/rgw_xml.h2
-rw-r--r--src/rgw/rgw_zone.cc10
-rw-r--r--src/rgw/services/svc_bucket.h2
-rw-r--r--src/rgw/services/svc_bucket_sobj.cc8
-rw-r--r--src/rgw/services/svc_bucket_sobj.h2
-rw-r--r--src/rgw/services/svc_user.h45
-rw-r--r--src/rgw/services/svc_user_rados.cc608
-rw-r--r--src/rgw/services/svc_user_rados.h72
-rwxr-xr-xsrc/script/ptl-tool.py228
-rw-r--r--src/test/CMakeLists.txt7
-rw-r--r--src/test/cli/radosgw-admin/help.t26
-rw-r--r--src/test/cls_rgw/CMakeLists.txt43
-rw-r--r--src/test/cls_rgw_gc/CMakeLists.txt33
-rw-r--r--src/test/cls_user/CMakeLists.txt5
-rw-r--r--src/test/cls_user/test_cls_user.cc211
-rw-r--r--src/test/mds/TestQuiesceAgent.cc8
-rw-r--r--src/test/osd/TestOSDScrub.cc1
-rw-r--r--src/test/rgw/bucket_notification/test_bn.py103
-rw-r--r--src/test/rgw/rgw_multi/conn.py13
-rw-r--r--src/test/rgw/rgw_multi/multisite.py12
-rw-r--r--src/test/rgw/rgw_multi/tests.py221
-rw-r--r--src/test/rgw/test_multi.py7
-rw-r--r--src/test/rgw/test_rgw_crypto.cc1
-rw-r--r--src/test/rgw/test_rgw_iam_policy.cc304
-rw-r--r--src/test/rgw/test_rgw_lua.cc49
-rw-r--r--src/tools/ceph-dencoder/rgw_types.h22
-rwxr-xr-xsrc/vstart.sh27
344 files changed, 22147 insertions, 7778 deletions
diff --git a/src/ceph-volume/ceph_volume/activate/main.py b/src/ceph-volume/ceph_volume/activate/main.py
index 1cef038b62f..161cd1cf436 100644
--- a/src/ceph-volume/ceph_volume/activate/main.py
+++ b/src/ceph-volume/ceph_volume/activate/main.py
@@ -3,8 +3,8 @@
import argparse
from ceph_volume import terminal
-from ceph_volume.devices.lvm.activate import Activate as LVMActivate
-from ceph_volume.devices.raw.activate import Activate as RAWActivate
+from ceph_volume.objectstore.lvmbluestore import LvmBlueStore as LVMActivate
+from ceph_volume.objectstore.rawbluestore import RawBlueStore as RAWActivate
from ceph_volume.devices.simple.activate import Activate as SimpleActivate
@@ -44,27 +44,24 @@ class Activate(object):
# first try raw
try:
- RAWActivate([]).activate(
- devs=None,
- start_osd_id=self.args.osd_id,
- start_osd_uuid=self.args.osd_uuid,
- tmpfs=not self.args.no_tmpfs,
- systemd=not self.args.no_systemd,
- )
+ raw_activate = RAWActivate([])
+ raw_activate.activate(None,
+ self.args.osd_id,
+ self.args.osd_uuid,
+ not self.args.no_tmpfs)
return
except Exception as e:
terminal.info(f'Failed to activate via raw: {e}')
# then try lvm
try:
- LVMActivate([]).activate(
- argparse.Namespace(
- osd_id=self.args.osd_id,
- osd_fsid=self.args.osd_uuid,
- no_tmpfs=self.args.no_tmpfs,
- no_systemd=self.args.no_systemd,
- )
- )
+ lvm_activate = LVMActivate(argparse.Namespace(
+ no_tmpfs=self.args.no_tmpfs,
+ no_systemd=self.args.no_systemd,
+ osd_fsid=self.args.osd_uuid))
+ lvm_activate.activate(None,
+ self.args.osd_id,
+ self.args.osd_uuid)
return
except Exception as e:
terminal.info(f'Failed to activate via LVM: {e}')
diff --git a/src/ceph-volume/ceph_volume/devices/lvm/activate.py b/src/ceph-volume/ceph_volume/devices/lvm/activate.py
index 17c66194c67..7b4d57c9509 100644
--- a/src/ceph-volume/ceph_volume/devices/lvm/activate.py
+++ b/src/ceph-volume/ceph_volume/devices/lvm/activate.py
@@ -1,216 +1,20 @@
from __future__ import print_function
import argparse
import logging
-import os
from textwrap import dedent
-from ceph_volume import process, conf, decorators, terminal, configuration
-from ceph_volume.util import system, disk
-from ceph_volume.util import prepare as prepare_utils
-from ceph_volume.util import encryption as encryption_utils
-from ceph_volume.systemd import systemctl
-from ceph_volume.api import lvm as api
-from .listing import direct_report
+from ceph_volume import objectstore
logger = logging.getLogger(__name__)
-
-def get_osd_device_path(osd_lvs, device_type, dmcrypt_secret=None):
- """
- ``device_type`` can be one of ``db``, ``wal`` or ``block`` so that we can
- query LVs on system and fallback to querying the uuid if that is not
- present.
-
- Return a path if possible, failing to do that a ``None``, since some of
- these devices are optional.
- """
- osd_block_lv = None
- for lv in osd_lvs:
- if lv.tags.get('ceph.type') == 'block':
- osd_block_lv = lv
- break
- if osd_block_lv:
- is_encrypted = osd_block_lv.tags.get('ceph.encrypted', '0') == '1'
- logger.debug('Found block device (%s) with encryption: %s', osd_block_lv.name, is_encrypted)
- uuid_tag = 'ceph.%s_uuid' % device_type
- device_uuid = osd_block_lv.tags.get(uuid_tag)
- if not device_uuid:
- return None
-
- device_lv = None
- for lv in osd_lvs:
- if lv.tags.get('ceph.type') == device_type:
- device_lv = lv
- break
- if device_lv:
- if is_encrypted:
- encryption_utils.luks_open(dmcrypt_secret, device_lv.lv_path, device_uuid)
- return '/dev/mapper/%s' % device_uuid
- return device_lv.lv_path
-
- # this could be a regular device, so query it with blkid
- physical_device = disk.get_device_from_partuuid(device_uuid)
- if physical_device:
- if is_encrypted:
- encryption_utils.luks_open(dmcrypt_secret, physical_device, device_uuid)
- return '/dev/mapper/%s' % device_uuid
- return physical_device
-
- raise RuntimeError('could not find %s with uuid %s' % (device_type, device_uuid))
-
-
-def activate_bluestore(osd_lvs, no_systemd=False, no_tmpfs=False):
- for lv in osd_lvs:
- if lv.tags.get('ceph.type') == 'block':
- osd_block_lv = lv
- break
- else:
- raise RuntimeError('could not find a bluestore OSD to activate')
-
- is_encrypted = osd_block_lv.tags.get('ceph.encrypted', '0') == '1'
- if is_encrypted and conf.dmcrypt_no_workqueue is None:
- encryption_utils.set_dmcrypt_no_workqueue()
- dmcrypt_secret = None
- osd_id = osd_block_lv.tags['ceph.osd_id']
- conf.cluster = osd_block_lv.tags['ceph.cluster_name']
- osd_fsid = osd_block_lv.tags['ceph.osd_fsid']
- configuration.load_ceph_conf_path(osd_block_lv.tags['ceph.cluster_name'])
- configuration.load()
-
- # mount on tmpfs the osd directory
- osd_path = '/var/lib/ceph/osd/%s-%s' % (conf.cluster, osd_id)
- if not system.path_is_mounted(osd_path):
- # mkdir -p and mount as tmpfs
- prepare_utils.create_osd_path(osd_id, tmpfs=not no_tmpfs)
- # XXX This needs to be removed once ceph-bluestore-tool can deal with
- # symlinks that exist in the osd dir
- for link_name in ['block', 'block.db', 'block.wal']:
- link_path = os.path.join(osd_path, link_name)
- if os.path.exists(link_path):
- os.unlink(os.path.join(osd_path, link_name))
- # encryption is handled here, before priming the OSD dir
- if is_encrypted:
- osd_lv_path = '/dev/mapper/%s' % osd_block_lv.lv_uuid
- lockbox_secret = osd_block_lv.tags['ceph.cephx_lockbox_secret']
- encryption_utils.write_lockbox_keyring(osd_id, osd_fsid, lockbox_secret)
- dmcrypt_secret = encryption_utils.get_dmcrypt_key(osd_id, osd_fsid)
- encryption_utils.luks_open(dmcrypt_secret, osd_block_lv.lv_path, osd_block_lv.lv_uuid)
- else:
- osd_lv_path = osd_block_lv.lv_path
-
- db_device_path = get_osd_device_path(osd_lvs, 'db', dmcrypt_secret=dmcrypt_secret)
- wal_device_path = get_osd_device_path(osd_lvs, 'wal', dmcrypt_secret=dmcrypt_secret)
-
- # Once symlinks are removed, the osd dir can be 'primed again. chown first,
- # regardless of what currently exists so that ``prime-osd-dir`` can succeed
- # even if permissions are somehow messed up
- system.chown(osd_path)
- prime_command = [
- 'ceph-bluestore-tool', '--cluster=%s' % conf.cluster,
- 'prime-osd-dir', '--dev', osd_lv_path,
- '--path', osd_path, '--no-mon-config']
-
- process.run(prime_command)
- # always re-do the symlink regardless if it exists, so that the block,
- # block.wal, and block.db devices that may have changed can be mapped
- # correctly every time
- process.run(['ln', '-snf', osd_lv_path, os.path.join(osd_path, 'block')])
- system.chown(os.path.join(osd_path, 'block'))
- system.chown(osd_path)
- if db_device_path:
- destination = os.path.join(osd_path, 'block.db')
- process.run(['ln', '-snf', db_device_path, destination])
- system.chown(db_device_path)
- system.chown(destination)
- if wal_device_path:
- destination = os.path.join(osd_path, 'block.wal')
- process.run(['ln', '-snf', wal_device_path, destination])
- system.chown(wal_device_path)
- system.chown(destination)
-
- if no_systemd is False:
- # enable the ceph-volume unit for this OSD
- systemctl.enable_volume(osd_id, osd_fsid, 'lvm')
-
- # enable the OSD
- systemctl.enable_osd(osd_id)
-
- # start the OSD
- systemctl.start_osd(osd_id)
- terminal.success("ceph-volume lvm activate successful for osd ID: %s" % osd_id)
-
-
class Activate(object):
-
help = 'Discover and mount the LVM device associated with an OSD ID and start the Ceph OSD'
- def __init__(self, argv):
+ def __init__(self, argv, args=None):
+ self.objectstore = None
self.argv = argv
-
- @decorators.needs_root
- def activate_all(self, args):
- listed_osds = direct_report()
- osds = {}
- for osd_id, devices in listed_osds.items():
- # the metadata for all devices in each OSD will contain
- # the FSID which is required for activation
- for device in devices:
- fsid = device.get('tags', {}).get('ceph.osd_fsid')
- if fsid:
- osds[fsid] = osd_id
- break
- if not osds:
- terminal.warning('Was unable to find any OSDs to activate')
- terminal.warning('Verify OSDs are present with "ceph-volume lvm list"')
- return
- for osd_fsid, osd_id in osds.items():
- if not args.no_systemd and systemctl.osd_is_active(osd_id):
- terminal.warning(
- 'OSD ID %s FSID %s process is active. Skipping activation' % (osd_id, osd_fsid)
- )
- else:
- terminal.info('Activating OSD ID %s FSID %s' % (osd_id, osd_fsid))
- self.activate(args, osd_id=osd_id, osd_fsid=osd_fsid)
-
- @decorators.needs_root
- def activate(self, args, osd_id=None, osd_fsid=None):
- """
- :param args: The parsed arguments coming from the CLI
- :param osd_id: When activating all, this gets populated with an
- existing OSD ID
- :param osd_fsid: When activating all, this gets populated with an
- existing OSD FSID
- """
- osd_id = osd_id if osd_id else args.osd_id
- osd_fsid = osd_fsid if osd_fsid else args.osd_fsid
-
- if osd_id and osd_fsid:
- tags = {'ceph.osd_id': osd_id, 'ceph.osd_fsid': osd_fsid}
- elif not osd_id and osd_fsid:
- tags = {'ceph.osd_fsid': osd_fsid}
- elif osd_id and not osd_fsid:
- raise RuntimeError('could not activate osd.{}, please provide the '
- 'osd_fsid too'.format(osd_id))
- else:
- raise RuntimeError('Please provide both osd_id and osd_fsid')
- lvs = api.get_lvs(tags=tags)
- if not lvs:
- raise RuntimeError('could not find osd.%s with osd_fsid %s' %
- (osd_id, osd_fsid))
-
- # This argument is only available when passed in directly or via
- # systemd, not when ``create`` is being used
- # placeholder when a new objectstore support will be added
- if getattr(args, 'auto_detect_objectstore', False):
- logger.info('auto detecting objectstore')
- return activate_bluestore(lvs, args.no_systemd)
-
- # explicit 'objectstore' flags take precedence
- if getattr(args, 'bluestore', False):
- activate_bluestore(lvs, args.no_systemd, getattr(args, 'no_tmpfs', False))
- elif any('ceph.block_device' in lv.tags for lv in lvs):
- activate_bluestore(lvs, args.no_systemd, getattr(args, 'no_tmpfs', False))
+ self.args = args
def main(self):
sub_command_help = dedent("""
@@ -257,6 +61,14 @@ class Activate(object):
help='force bluestore objectstore activation',
)
parser.add_argument(
+ '--objectstore',
+ dest='objectstore',
+ help='The OSD objectstore.',
+ default='bluestore',
+ choices=['bluestore', 'seastore'],
+ type=str,
+ )
+ parser.add_argument(
'--all',
dest='activate_all',
action='store_true',
@@ -273,11 +85,15 @@ class Activate(object):
action='store_true',
help='Do not use a tmpfs mount for OSD data dir'
)
- if len(self.argv) == 0:
+ if len(self.argv) == 0 and self.args is None:
print(sub_command_help)
return
- args = parser.parse_args(self.argv)
- if args.activate_all:
- self.activate_all(args)
+ if self.args is None:
+ self.args = parser.parse_args(self.argv)
+ if self.args.bluestore:
+ self.args.objectstore = 'bluestore'
+ self.objectstore = objectstore.mapping['LVM'][self.args.objectstore](args=self.args)
+ if self.args.activate_all:
+ self.objectstore.activate_all()
else:
- self.activate(args)
+ self.objectstore.activate()
diff --git a/src/ceph-volume/ceph_volume/devices/lvm/batch.py b/src/ceph-volume/ceph_volume/devices/lvm/batch.py
index 2118ce47aee..6cd3bc39817 100644
--- a/src/ceph-volume/ceph_volume/devices/lvm/batch.py
+++ b/src/ceph-volume/ceph_volume/devices/lvm/batch.py
@@ -234,9 +234,17 @@ class Batch(object):
'are passed in DEVICES'),
)
parser.add_argument(
+ '--objectstore',
+ dest='objectstore',
+ help='The OSD objectstore.',
+ default='bluestore',
+ choices=['bluestore', 'seastore'],
+ type=str,
+ )
+ parser.add_argument(
'--bluestore',
action='store_true',
- help='bluestore objectstore (default)',
+ help='bluestore objectstore (default). (DEPRECATED: use --objectstore instead)',
)
parser.add_argument(
'--report',
@@ -323,6 +331,8 @@ class Batch(object):
type=arg_validators.valid_osd_id
)
self.args = parser.parse_args(argv)
+ if self.args.bluestore:
+ self.args.objectstore = 'bluestore'
self.parser = parser
for dev_list in ['', 'db_', 'wal_']:
setattr(self, '{}usable'.format(dev_list), [])
@@ -383,11 +393,6 @@ class Batch(object):
if not self.args.devices:
return self.parser.print_help()
- # Default to bluestore here since defaulting it in add_argument may
- # cause both to be True
- if not self.args.bluestore:
- self.args.bluestore = True
-
if (self.args.auto and not self.args.db_devices and not
self.args.wal_devices):
self._sort_rotational_disks()
@@ -398,7 +403,7 @@ class Batch(object):
self.args.db_devices,
self.args.wal_devices)
- plan = self.get_plan(self.args)
+ plan = self.get_deployment_layout()
if self.args.report:
self.report(plan)
@@ -425,43 +430,38 @@ class Batch(object):
for osd in plan:
args = osd.get_args(defaults)
if self.args.prepare:
- p = Prepare([])
- p.safe_prepare(argparse.Namespace(**args))
+ p = Prepare([], args=argparse.Namespace(**args))
+ p.main()
else:
- c = Create([])
- c.create(argparse.Namespace(**args))
-
-
- def get_plan(self, args):
- if args.bluestore:
- plan = self.get_deployment_layout(args, args.devices, args.db_devices,
- args.wal_devices)
- return plan
+ c = Create([], args=argparse.Namespace(**args))
+ c.create()
- def get_deployment_layout(self, args, devices, fast_devices=[],
- very_fast_devices=[]):
+ def get_deployment_layout(self):
'''
The methods here are mostly just organization, error reporting and
setting up of (default) args. The heavy lifting code for the deployment
layout can be found in the static get_*_osds and get_*_fast_allocs
functions.
'''
+ devices = self.args.devices
+ fast_devices = self.args.db_devices
+ very_fast_devices = self.args.wal_devices
plan = []
phys_devs, lvm_devs = separate_devices_from_lvs(devices)
mlogger.debug(('passed data devices: {} physical,'
' {} LVM').format(len(phys_devs), len(lvm_devs)))
- plan.extend(get_physical_osds(phys_devs, args))
+ plan.extend(get_physical_osds(phys_devs, self.args))
- plan.extend(get_lvm_osds(lvm_devs, args))
+ plan.extend(get_lvm_osds(lvm_devs, self.args))
num_osds = len(plan)
if num_osds == 0:
mlogger.info('All data devices are unavailable')
return plan
- requested_osds = args.osds_per_device * len(phys_devs) + len(lvm_devs)
+ requested_osds = self.args.osds_per_device * len(phys_devs) + len(lvm_devs)
- if args.bluestore:
+ if self.args.objectstore == 'bluestore':
fast_type = 'block_db'
fast_allocations = self.fast_allocations(fast_devices,
requested_osds,
@@ -491,7 +491,7 @@ class Batch(object):
if fast_devices:
osd.add_fast_device(*fast_allocations.pop(),
type_=fast_type)
- if very_fast_devices and args.bluestore:
+ if very_fast_devices and self.args.objectstore == 'bluestore':
osd.add_very_fast_device(*very_fast_allocations.pop())
return plan
diff --git a/src/ceph-volume/ceph_volume/devices/lvm/common.py b/src/ceph-volume/ceph_volume/devices/lvm/common.py
index 198ba9417a1..90bed61a3bd 100644
--- a/src/ceph-volume/ceph_volume/devices/lvm/common.py
+++ b/src/ceph-volume/ceph_volume/devices/lvm/common.py
@@ -36,6 +36,13 @@ def rollback_osd(args, osd_id=None):
common_args = {
+ '--objectstore': {
+ 'dest': 'objectstore',
+ 'help': 'The OSD objectstore.',
+ 'default': 'bluestore',
+ 'choices': ['bluestore', 'seastore'],
+ 'type': str,
+ },
'--data': {
'help': 'OSD data path. A physical device or logical volume',
'required': True,
@@ -86,7 +93,7 @@ common_args = {
bluestore_args = {
'--bluestore': {
'action': 'store_true',
- 'help': 'Use the bluestore objectstore',
+ 'help': 'Use the bluestore objectstore. (DEPRECATED: use --objectstore instead)',
},
'--block.db': {
'dest': 'block_db',
diff --git a/src/ceph-volume/ceph_volume/devices/lvm/create.py b/src/ceph-volume/ceph_volume/devices/lvm/create.py
index 631a21b239d..6a4d11b99bf 100644
--- a/src/ceph-volume/ceph_volume/devices/lvm/create.py
+++ b/src/ceph-volume/ceph_volume/devices/lvm/create.py
@@ -3,10 +3,8 @@ from textwrap import dedent
import logging
from ceph_volume.util import system
from ceph_volume.util.arg_validators import exclude_group_options
-from ceph_volume import decorators, terminal
+from ceph_volume import decorators, terminal, objectstore
from .common import create_parser, rollback_osd
-from .prepare import Prepare
-from .activate import Activate
logger = logging.getLogger(__name__)
@@ -15,27 +13,29 @@ class Create(object):
help = 'Create a new OSD from an LVM device'
- def __init__(self, argv):
+ def __init__(self, argv, args=None):
+ self.objectstore = None
self.argv = argv
+ self.args = args
@decorators.needs_root
- def create(self, args):
- if not args.osd_fsid:
- args.osd_fsid = system.generate_uuid()
- prepare_step = Prepare([])
- prepare_step.safe_prepare(args)
- osd_id = prepare_step.osd_id
+ def create(self):
+ if not self.args.osd_fsid:
+ self.args.osd_fsid = system.generate_uuid()
+ self.objectstore = objectstore.mapping['LVM'][self.args.objectstore](args=self.args)
+ self.objectstore.safe_prepare()
+ osd_id = self.objectstore.osd_id
try:
# we try this for activate only when 'creating' an OSD, because a rollback should not
# happen when doing normal activation. For example when starting an OSD, systemd will call
# activate, which would never need to be rolled back.
- Activate([]).activate(args)
+ self.objectstore.activate()
except Exception:
logger.exception('lvm activate was unable to complete, while creating the OSD')
logger.info('will rollback OSD ID creation')
- rollback_osd(args, osd_id)
+ rollback_osd(self.args, osd_id)
raise
- terminal.success("ceph-volume lvm create successful for: %s" % args.data)
+ terminal.success("ceph-volume lvm create successful for: %s" % self.args.data)
def main(self):
sub_command_help = dedent("""
@@ -69,9 +69,9 @@ class Create(object):
print(sub_command_help)
return
exclude_group_options(parser, groups=['bluestore'], argv=self.argv)
- args = parser.parse_args(self.argv)
- # Default to bluestore here since defaulting it in add_argument may
- # cause both to be True
- if not args.bluestore:
- args.bluestore = True
- self.create(args)
+ if self.args is None:
+ self.args = parser.parse_args(self.argv)
+ if self.args.bluestore:
+ self.args.objectstore = 'bluestore'
+ self.objectstore = objectstore.mapping['LVM'][self.args.objectstore]
+ self.create()
diff --git a/src/ceph-volume/ceph_volume/devices/lvm/prepare.py b/src/ceph-volume/ceph_volume/devices/lvm/prepare.py
index 85c8a146771..18fc1df03d8 100644
--- a/src/ceph-volume/ceph_volume/devices/lvm/prepare.py
+++ b/src/ceph-volume/ceph_volume/devices/lvm/prepare.py
@@ -1,290 +1,23 @@
from __future__ import print_function
-import json
import logging
from textwrap import dedent
-from ceph_volume.util import prepare as prepare_utils
-from ceph_volume.util import encryption as encryption_utils
-from ceph_volume.util import system, disk
-from ceph_volume.util.arg_validators import exclude_group_options
-from ceph_volume import conf, decorators, terminal
-from ceph_volume.api import lvm as api
-from .common import prepare_parser, rollback_osd
+from ceph_volume import objectstore
+from .common import prepare_parser
logger = logging.getLogger(__name__)
-def prepare_dmcrypt(key, device, device_type, tags):
- """
- Helper for devices that are encrypted. The operations needed for
- block, db, wal devices are all the same
- """
- if not device:
- return ''
- tag_name = 'ceph.%s_uuid' % device_type
- uuid = tags[tag_name]
- return encryption_utils.prepare_dmcrypt(key, device, uuid)
-
-def prepare_bluestore(block, wal, db, secrets, tags, osd_id, fsid):
- """
- :param block: The name of the logical volume for the bluestore data
- :param wal: a regular/plain disk or logical volume, to be used for block.wal
- :param db: a regular/plain disk or logical volume, to be used for block.db
- :param secrets: A dict with the secrets needed to create the osd (e.g. cephx)
- :param id_: The OSD id
- :param fsid: The OSD fsid, also known as the OSD UUID
- """
- cephx_secret = secrets.get('cephx_secret', prepare_utils.create_key())
- # encryption-only operations
- if secrets.get('dmcrypt_key'):
- # If encrypted, there is no need to create the lockbox keyring file because
- # bluestore re-creates the files and does not have support for other files
- # like the custom lockbox one. This will need to be done on activation.
- # format and open ('decrypt' devices) and re-assign the device and journal
- # variables so that the rest of the process can use the mapper paths
- key = secrets['dmcrypt_key']
- block = prepare_dmcrypt(key, block, 'block', tags)
- wal = prepare_dmcrypt(key, wal, 'wal', tags)
- db = prepare_dmcrypt(key, db, 'db', tags)
-
- # create the directory
- prepare_utils.create_osd_path(osd_id, tmpfs=True)
- # symlink the block
- prepare_utils.link_block(block, osd_id)
- # get the latest monmap
- prepare_utils.get_monmap(osd_id)
- # write the OSD keyring if it doesn't exist already
- prepare_utils.write_keyring(osd_id, cephx_secret)
- # prepare the osd filesystem
- prepare_utils.osd_mkfs_bluestore(
- osd_id, fsid,
- keyring=cephx_secret,
- wal=wal,
- db=db
- )
-
-
class Prepare(object):
help = 'Format an LVM device and associate it with an OSD'
- def __init__(self, argv):
+ def __init__(self, argv, args=None):
+ self.objectstore = None
self.argv = argv
+ self.args = args
self.osd_id = None
- def get_ptuuid(self, argument):
- uuid = disk.get_partuuid(argument)
- if not uuid:
- terminal.error('blkid could not detect a PARTUUID for device: %s' % argument)
- raise RuntimeError('unable to use device')
- return uuid
-
- def setup_device(self, device_type, device_name, tags, size, slots):
- """
- Check if ``device`` is an lv, if so, set the tags, making sure to
- update the tags with the lv_uuid and lv_path which the incoming tags
- will not have.
-
- If the device is not a logical volume, then retrieve the partition UUID
- by querying ``blkid``
- """
- if device_name is None:
- return '', '', tags
- tags['ceph.type'] = device_type
- tags['ceph.vdo'] = api.is_vdo(device_name)
-
- try:
- vg_name, lv_name = device_name.split('/')
- lv = api.get_single_lv(filters={'lv_name': lv_name,
- 'vg_name': vg_name})
- except ValueError:
- lv = None
-
- if lv:
- lv_uuid = lv.lv_uuid
- path = lv.lv_path
- tags['ceph.%s_uuid' % device_type] = lv_uuid
- tags['ceph.%s_device' % device_type] = path
- lv.set_tags(tags)
- elif disk.is_device(device_name):
- # We got a disk, create an lv
- lv_type = "osd-{}".format(device_type)
- name_uuid = system.generate_uuid()
- kwargs = {
- 'device': device_name,
- 'tags': tags,
- 'slots': slots
- }
- #TODO use get_block_db_size and co here to get configured size in
- #conf file
- if size != 0:
- kwargs['size'] = size
- lv = api.create_lv(
- lv_type,
- name_uuid,
- **kwargs)
- path = lv.lv_path
- tags['ceph.{}_device'.format(device_type)] = path
- tags['ceph.{}_uuid'.format(device_type)] = lv.lv_uuid
- lv_uuid = lv.lv_uuid
- lv.set_tags(tags)
- else:
- # otherwise assume this is a regular disk partition
- name_uuid = self.get_ptuuid(device_name)
- path = device_name
- tags['ceph.%s_uuid' % device_type] = name_uuid
- tags['ceph.%s_device' % device_type] = path
- lv_uuid = name_uuid
- return path, lv_uuid, tags
-
- def prepare_data_device(self, device_type, osd_uuid):
- """
- Check if ``arg`` is a device or partition to create an LV out of it
- with a distinct volume group name, assigning LV tags on it and
- ultimately, returning the logical volume object. Failing to detect
- a device or partition will result in error.
-
- :param arg: The value of ``--data`` when parsing args
- :param device_type: Usually ``block``
- :param osd_uuid: The OSD uuid
- """
- device = self.args.data
- if disk.is_partition(device) or disk.is_device(device):
- # we must create a vg, and then a single lv
- lv_name_prefix = "osd-{}".format(device_type)
- kwargs = {'device': device,
- 'tags': {'ceph.type': device_type},
- 'slots': self.args.data_slots,
- }
- logger.debug('data device size: {}'.format(self.args.data_size))
- if self.args.data_size != 0:
- kwargs['size'] = self.args.data_size
- return api.create_lv(
- lv_name_prefix,
- osd_uuid,
- **kwargs)
- else:
- error = [
- 'Cannot use device ({}).'.format(device),
- 'A vg/lv path or an existing device is needed']
- raise RuntimeError(' '.join(error))
-
- raise RuntimeError('no data logical volume found with: {}'.format(device))
-
- def safe_prepare(self, args=None):
- """
- An intermediate step between `main()` and `prepare()` so that we can
- capture the `self.osd_id` in case we need to rollback
-
- :param args: Injected args, usually from `lvm create` which compounds
- both `prepare` and `create`
- """
- if args is not None:
- self.args = args
-
- try:
- vgname, lvname = self.args.data.split('/')
- lv = api.get_single_lv(filters={'lv_name': lvname,
- 'vg_name': vgname})
- except ValueError:
- lv = None
-
- if api.is_ceph_device(lv):
- logger.info("device {} is already used".format(self.args.data))
- raise RuntimeError("skipping {}, it is already prepared".format(self.args.data))
- try:
- self.prepare()
- except Exception:
- logger.exception('lvm prepare was unable to complete')
- logger.info('will rollback OSD ID creation')
- rollback_osd(self.args, self.osd_id)
- raise
- terminal.success("ceph-volume lvm prepare successful for: %s" % self.args.data)
-
- def get_cluster_fsid(self):
- """
- Allows using --cluster-fsid as an argument, but can fallback to reading
- from ceph.conf if that is unset (the default behavior).
- """
- if self.args.cluster_fsid:
- return self.args.cluster_fsid
- else:
- return conf.ceph.get('global', 'fsid')
-
- @decorators.needs_root
- def prepare(self):
- # FIXME we don't allow re-using a keyring, we always generate one for the
- # OSD, this needs to be fixed. This could either be a file (!) or a string
- # (!!) or some flags that we would need to compound into a dict so that we
- # can convert to JSON (!!!)
- secrets = {'cephx_secret': prepare_utils.create_key()}
- cephx_lockbox_secret = ''
- encrypted = 1 if self.args.dmcrypt else 0
- cephx_lockbox_secret = '' if not encrypted else prepare_utils.create_key()
-
- if encrypted:
- secrets['dmcrypt_key'] = encryption_utils.create_dmcrypt_key()
- secrets['cephx_lockbox_secret'] = cephx_lockbox_secret
-
- cluster_fsid = self.get_cluster_fsid()
-
- osd_fsid = self.args.osd_fsid or system.generate_uuid()
- crush_device_class = self.args.crush_device_class
- if crush_device_class:
- secrets['crush_device_class'] = crush_device_class
- # reuse a given ID if it exists, otherwise create a new ID
- self.osd_id = prepare_utils.create_id(osd_fsid, json.dumps(secrets), osd_id=self.args.osd_id)
- tags = {
- 'ceph.osd_fsid': osd_fsid,
- 'ceph.osd_id': self.osd_id,
- 'ceph.cluster_fsid': cluster_fsid,
- 'ceph.cluster_name': conf.cluster,
- 'ceph.crush_device_class': crush_device_class,
- 'ceph.osdspec_affinity': prepare_utils.get_osdspec_affinity()
- }
- if self.args.bluestore:
- try:
- vg_name, lv_name = self.args.data.split('/')
- block_lv = api.get_single_lv(filters={'lv_name': lv_name,
- 'vg_name': vg_name})
- except ValueError:
- block_lv = None
-
- if not block_lv:
- block_lv = self.prepare_data_device('block', osd_fsid)
-
- tags['ceph.block_device'] = block_lv.lv_path
- tags['ceph.block_uuid'] = block_lv.lv_uuid
- tags['ceph.cephx_lockbox_secret'] = cephx_lockbox_secret
- tags['ceph.encrypted'] = encrypted
- tags['ceph.vdo'] = api.is_vdo(block_lv.lv_path)
-
- wal_device, wal_uuid, tags = self.setup_device(
- 'wal',
- self.args.block_wal,
- tags,
- self.args.block_wal_size,
- self.args.block_wal_slots)
- db_device, db_uuid, tags = self.setup_device(
- 'db',
- self.args.block_db,
- tags,
- self.args.block_db_size,
- self.args.block_db_slots)
-
- tags['ceph.type'] = 'block'
- block_lv.set_tags(tags)
-
- prepare_bluestore(
- block_lv.lv_path,
- wal_device,
- db_device,
- secrets,
- tags,
- self.osd_id,
- osd_fsid,
- )
-
def main(self):
sub_command_help = dedent("""
Prepare an OSD by assigning an ID and FSID, registering them with the
@@ -315,13 +48,12 @@ class Prepare(object):
prog='ceph-volume lvm prepare',
description=sub_command_help,
)
- if len(self.argv) == 0:
+ if len(self.argv) == 0 and self.args is None:
print(sub_command_help)
return
- exclude_group_options(parser, argv=self.argv, groups=['bluestore'])
- self.args = parser.parse_args(self.argv)
- # Default to bluestore here since defaulting it in add_argument may
- # cause both to be True
- if not self.args.bluestore:
- self.args.bluestore = True
- self.safe_prepare()
+ if self.args is None:
+ self.args = parser.parse_args(self.argv)
+ if self.args.bluestore:
+ self.args.objectstore = 'bluestore'
+ self.objectstore = objectstore.mapping['LVM'][self.args.objectstore](args=self.args)
+ self.objectstore.safe_prepare()
diff --git a/src/ceph-volume/ceph_volume/devices/raw/activate.py b/src/ceph-volume/ceph_volume/devices/raw/activate.py
index 17be57dfeaa..38c74ef9829 100644
--- a/src/ceph-volume/ceph_volume/devices/raw/activate.py
+++ b/src/ceph-volume/ceph_volume/devices/raw/activate.py
@@ -1,95 +1,20 @@
from __future__ import print_function
import argparse
import logging
-import os
from textwrap import dedent
-from ceph_volume import process, conf, decorators, terminal
-from ceph_volume.util import system
-from ceph_volume.util import prepare as prepare_utils
-from .list import direct_report
+from ceph_volume import objectstore
logger = logging.getLogger(__name__)
-def activate_bluestore(meta, tmpfs, systemd):
- # find the osd
- osd_id = meta['osd_id']
- osd_uuid = meta['osd_uuid']
-
- # mount on tmpfs the osd directory
- osd_path = '/var/lib/ceph/osd/%s-%s' % (conf.cluster, osd_id)
- if not system.path_is_mounted(osd_path):
- # mkdir -p and mount as tmpfs
- prepare_utils.create_osd_path(osd_id, tmpfs=tmpfs)
-
- # XXX This needs to be removed once ceph-bluestore-tool can deal with
- # symlinks that exist in the osd dir
- for link_name in ['block', 'block.db', 'block.wal']:
- link_path = os.path.join(osd_path, link_name)
- if os.path.exists(link_path):
- os.unlink(os.path.join(osd_path, link_name))
-
- # Once symlinks are removed, the osd dir can be 'primed again. chown first,
- # regardless of what currently exists so that ``prime-osd-dir`` can succeed
- # even if permissions are somehow messed up
- system.chown(osd_path)
- prime_command = [
- 'ceph-bluestore-tool',
- 'prime-osd-dir',
- '--path', osd_path,
- '--no-mon-config',
- '--dev', meta['device'],
- ]
- process.run(prime_command)
-
- # always re-do the symlink regardless if it exists, so that the block,
- # block.wal, and block.db devices that may have changed can be mapped
- # correctly every time
- prepare_utils.link_block(meta['device'], osd_id)
-
- if 'device_db' in meta:
- prepare_utils.link_db(meta['device_db'], osd_id, osd_uuid)
-
- if 'device_wal' in meta:
- prepare_utils.link_wal(meta['device_wal'], osd_id, osd_uuid)
-
- system.chown(osd_path)
- terminal.success("ceph-volume raw activate successful for osd ID: %s" % osd_id)
-
-
class Activate(object):
help = 'Discover and prepare a data directory for a (BlueStore) OSD on a raw device'
- def __init__(self, argv):
+ def __init__(self, argv, args=None):
+ self.objectstore = None
self.argv = argv
- self.args = None
-
- @decorators.needs_root
- def activate(self, devs, start_osd_id, start_osd_uuid,
- tmpfs, systemd):
- """
- :param args: The parsed arguments coming from the CLI
- """
- assert devs or start_osd_id or start_osd_uuid
- found = direct_report(devs)
-
- activated_any = False
- for osd_uuid, meta in found.items():
- osd_id = meta['osd_id']
- if start_osd_id is not None and str(osd_id) != str(start_osd_id):
- continue
- if start_osd_uuid is not None and osd_uuid != start_osd_uuid:
- continue
- logger.info('Activating osd.%s uuid %s cluster %s' % (
- osd_id, osd_uuid, meta['ceph_fsid']))
- activate_bluestore(meta,
- tmpfs=tmpfs,
- systemd=systemd)
- activated_any = True
-
- if not activated_any:
- raise RuntimeError('did not find any matching OSD to activate')
+ self.args = args
def main(self):
sub_command_help = dedent("""
@@ -126,7 +51,15 @@ class Activate(object):
'--no-systemd',
dest='no_systemd',
action='store_true',
- help='Skip creating and enabling systemd units and starting OSD services'
+ help='This argument has no effect, this is here for backward compatibility.'
+ )
+ parser.add_argument(
+ '--objectstore',
+ dest='objectstore',
+ help='The OSD objectstore.',
+ default='bluestore',
+ choices=['bluestore', 'seastore'],
+ type=str,
)
parser.add_argument(
'--block.db',
@@ -147,20 +80,17 @@ class Activate(object):
if not self.argv:
print(sub_command_help)
return
- args = parser.parse_args(self.argv)
- self.args = args
- if not args.no_systemd:
- terminal.error('systemd support not yet implemented')
- raise SystemExit(1)
-
- devs = [args.device]
- if args.block_wal:
- devs.append(args.block_wal)
- if args.block_db:
- devs.append(args.block_db)
-
- self.activate(devs=devs,
- start_osd_id=args.osd_id,
- start_osd_uuid=args.osd_uuid,
- tmpfs=not args.no_tmpfs,
- systemd=not self.args.no_systemd)
+ self.args = parser.parse_args(self.argv)
+
+ devs = []
+ if self.args.device:
+ devs = [self.args.device]
+ if self.args.block_wal:
+ devs.append(self.args.block_wal)
+ if self.args.block_db:
+ devs.append(self.args.block_db)
+ self.objectstore = objectstore.mapping['RAW'][self.args.objectstore](args=self.args)
+ self.objectstore.activate(devs=devs,
+ start_osd_id=self.args.osd_id,
+ start_osd_uuid=self.args.osd_uuid,
+ tmpfs=not self.args.no_tmpfs)
diff --git a/src/ceph-volume/ceph_volume/devices/raw/common.py b/src/ceph-volume/ceph_volume/devices/raw/common.py
index 4863b9e18e0..e3aea2c7250 100644
--- a/src/ceph-volume/ceph_volume/devices/raw/common.py
+++ b/src/ceph-volume/ceph_volume/devices/raw/common.py
@@ -12,6 +12,14 @@ def create_parser(prog, description):
description=description,
)
parser.add_argument(
+ '--objectstore',
+ dest='objectstore',
+ help='The OSD objectstore.',
+ default='bluestore',
+ choices=['bluestore', 'seastore'],
+ type=str,
+ ),
+ parser.add_argument(
'--data',
required=True,
type=arg_validators.ValidRawDevice(as_string=True),
@@ -20,7 +28,8 @@ def create_parser(prog, description):
parser.add_argument(
'--bluestore',
action='store_true',
- help='Use BlueStore backend')
+ help='Use BlueStore backend. (DEPRECATED: use --objectstore instead)'
+ )
parser.add_argument(
'--crush-device-class',
dest='crush_device_class',
diff --git a/src/ceph-volume/ceph_volume/devices/raw/prepare.py b/src/ceph-volume/ceph_volume/devices/raw/prepare.py
index b3201a89daf..e4308e55036 100644
--- a/src/ceph-volume/ceph_volume/devices/raw/prepare.py
+++ b/src/ceph-volume/ceph_volume/devices/raw/prepare.py
@@ -1,62 +1,12 @@
from __future__ import print_function
-import json
import logging
import os
from textwrap import dedent
-from ceph_volume.util import prepare as prepare_utils
-from ceph_volume.util import encryption as encryption_utils
-from ceph_volume.util import disk
-from ceph_volume.util import system
-from ceph_volume import decorators, terminal
-from ceph_volume.devices.lvm.common import rollback_osd
+from ceph_volume import terminal, objectstore
from .common import create_parser
logger = logging.getLogger(__name__)
-def prepare_dmcrypt(key, device, device_type, fsid):
- """
- Helper for devices that are encrypted. The operations needed for
- block, db, wal, devices are all the same
- """
- if not device:
- return ''
- kname = disk.lsblk(device)['KNAME']
- mapping = 'ceph-{}-{}-{}-dmcrypt'.format(fsid, kname, device_type)
- return encryption_utils.prepare_dmcrypt(key, device, mapping)
-
-def prepare_bluestore(block, wal, db, secrets, osd_id, fsid, tmpfs):
- """
- :param block: The name of the logical volume for the bluestore data
- :param wal: a regular/plain disk or logical volume, to be used for block.wal
- :param db: a regular/plain disk or logical volume, to be used for block.db
- :param secrets: A dict with the secrets needed to create the osd (e.g. cephx)
- :param id_: The OSD id
- :param fsid: The OSD fsid, also known as the OSD UUID
- """
- cephx_secret = secrets.get('cephx_secret', prepare_utils.create_key())
-
- if secrets.get('dmcrypt_key'):
- key = secrets['dmcrypt_key']
- block = prepare_dmcrypt(key, block, 'block', fsid)
- wal = prepare_dmcrypt(key, wal, 'wal', fsid)
- db = prepare_dmcrypt(key, db, 'db', fsid)
-
- # create the directory
- prepare_utils.create_osd_path(osd_id, tmpfs=tmpfs)
- # symlink the block
- prepare_utils.link_block(block, osd_id)
- # get the latest monmap
- prepare_utils.get_monmap(osd_id)
- # write the OSD keyring if it doesn't exist already
- prepare_utils.write_keyring(osd_id, cephx_secret)
- # prepare the osd filesystem
- prepare_utils.osd_mkfs_bluestore(
- osd_id, fsid,
- keyring=cephx_secret,
- wal=wal,
- db=db
- )
-
class Prepare(object):
@@ -65,65 +15,7 @@ class Prepare(object):
def __init__(self, argv):
self.argv = argv
self.osd_id = None
-
- def safe_prepare(self, args=None):
- """
- An intermediate step between `main()` and `prepare()` so that we can
- capture the `self.osd_id` in case we need to rollback
-
- :param args: Injected args, usually from `raw create` which compounds
- both `prepare` and `create`
- """
- if args is not None:
- self.args = args
- try:
- self.prepare()
- except Exception:
- logger.exception('raw prepare was unable to complete')
- logger.info('will rollback OSD ID creation')
- rollback_osd(self.args, self.osd_id)
- raise
- dmcrypt_log = 'dmcrypt' if args.dmcrypt else 'clear'
- terminal.success("ceph-volume raw {} prepare successful for: {}".format(dmcrypt_log, self.args.data))
-
-
- @decorators.needs_root
- def prepare(self):
- secrets = {'cephx_secret': prepare_utils.create_key()}
- encrypted = 1 if self.args.dmcrypt else 0
- cephx_lockbox_secret = '' if not encrypted else prepare_utils.create_key()
-
- if encrypted:
- secrets['dmcrypt_key'] = os.getenv('CEPH_VOLUME_DMCRYPT_SECRET')
- secrets['cephx_lockbox_secret'] = cephx_lockbox_secret # dummy value to make `ceph osd new` not complaining
-
- osd_fsid = system.generate_uuid()
- crush_device_class = self.args.crush_device_class
- if crush_device_class:
- secrets['crush_device_class'] = crush_device_class
- tmpfs = not self.args.no_tmpfs
- wal = ""
- db = ""
- if self.args.block_wal:
- wal = self.args.block_wal
- if self.args.block_db:
- db = self.args.block_db
-
- # reuse a given ID if it exists, otherwise create a new ID
- self.osd_id = prepare_utils.create_id(
- osd_fsid,
- json.dumps(secrets),
- osd_id=self.args.osd_id)
-
- prepare_bluestore(
- self.args.data,
- wal,
- db,
- secrets,
- self.osd_id,
- osd_fsid,
- tmpfs,
- )
+ self.objectstore = None
def main(self):
sub_command_help = dedent("""
@@ -148,13 +40,13 @@ class Prepare(object):
print(sub_command_help)
return
self.args = parser.parse_args(self.argv)
- if not self.args.bluestore:
- terminal.error('must specify --bluestore (currently the only supported backend)')
- raise SystemExit(1)
+ if self.args.bluestore:
+ self.args.objectstore = 'bluestore'
if self.args.dmcrypt and not os.getenv('CEPH_VOLUME_DMCRYPT_SECRET'):
terminal.error('encryption was requested (--dmcrypt) but environment variable ' \
'CEPH_VOLUME_DMCRYPT_SECRET is not set, you must set ' \
'this variable to provide a dmcrypt secret.')
raise SystemExit(1)
- self.safe_prepare(self.args)
+ self.objectstore = objectstore.mapping['RAW'][self.args.objectstore](args=self.args)
+ self.objectstore.safe_prepare(self.args)
diff --git a/src/ceph-volume/ceph_volume/objectstore/__init__.py b/src/ceph-volume/ceph_volume/objectstore/__init__.py
new file mode 100644
index 00000000000..f8bc2c50793
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/objectstore/__init__.py
@@ -0,0 +1,11 @@
+from . import lvmbluestore
+from . import rawbluestore
+
+mapping = {
+ 'LVM': {
+ 'bluestore': lvmbluestore.LvmBlueStore
+ },
+ 'RAW': {
+ 'bluestore': rawbluestore.RawBlueStore
+ }
+}
diff --git a/src/ceph-volume/ceph_volume/objectstore/baseobjectstore.py b/src/ceph-volume/ceph_volume/objectstore/baseobjectstore.py
new file mode 100644
index 00000000000..822f293f3ad
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/objectstore/baseobjectstore.py
@@ -0,0 +1,154 @@
+import logging
+import os
+import errno
+import time
+from ceph_volume import conf, terminal, process
+from ceph_volume.util import prepare as prepare_utils
+from ceph_volume.util import system, disk
+from typing import Dict, Any, List, Optional, TYPE_CHECKING
+
+if TYPE_CHECKING:
+ import argparse
+ from ceph_volume.api.lvm import Volume
+
+
+logger = logging.getLogger(__name__)
+
+
+class BaseObjectStore:
+ def __init__(self, args: "argparse.Namespace") -> None:
+ self.args: "argparse.Namespace" = args
+ # FIXME we don't allow re-using a keyring, we always generate one
+ # for the OSD, this needs to be fixed. This could either be a file (!)
+ # or a string (!!) or some flags that we would need to compound
+ # into a dict so that we can convert to JSON (!!!)
+ self.secrets = {'cephx_secret': prepare_utils.create_key()}
+ self.cephx_secret = self.secrets.get('cephx_secret',
+ prepare_utils.create_key())
+ self.encrypted = 0
+ self.tags: Dict[str, Any] = {}
+ self.osd_id: str = ''
+ self.osd_fsid = ''
+ self.block_lv: Optional["Volume"] = None
+ self.cephx_lockbox_secret = ''
+ self.objectstore: str = ''
+ self.osd_mkfs_cmd: List[str] = []
+ self.block_device_path = ''
+ if hasattr(self.args, 'dmcrypt'):
+ if self.args.dmcrypt:
+ self.encrypted = 1
+ self.cephx_lockbox_secret = prepare_utils.create_key()
+ self.secrets['cephx_lockbox_secret'] = \
+ self.cephx_lockbox_secret
+
+ def get_ptuuid(self, argument: str) -> str:
+ uuid = disk.get_partuuid(argument)
+ if not uuid:
+ terminal.error('blkid could not detect a PARTUUID for device: %s' %
+ argument)
+ raise RuntimeError('unable to use device')
+ return uuid
+
+ def get_osdspec_affinity(self) -> str:
+ return os.environ.get('CEPH_VOLUME_OSDSPEC_AFFINITY', '')
+
+ def pre_prepare(self) -> None:
+ raise NotImplementedError()
+
+ def prepare_data_device(self,
+ device_type: str,
+ osd_uuid: str) -> Optional["Volume"]:
+ raise NotImplementedError()
+
+ def safe_prepare(self, args: "argparse.Namespace") -> None:
+ raise NotImplementedError()
+
+ def add_objectstore_opts(self) -> None:
+ raise NotImplementedError()
+
+ def prepare_osd_req(self, tmpfs: bool = True) -> None:
+ # create the directory
+ prepare_utils.create_osd_path(self.osd_id, tmpfs=tmpfs)
+ # symlink the block
+ prepare_utils.link_block(self.block_device_path, self.osd_id)
+ # get the latest monmap
+ prepare_utils.get_monmap(self.osd_id)
+ # write the OSD keyring if it doesn't exist already
+ prepare_utils.write_keyring(self.osd_id, self.cephx_secret)
+
+ def prepare(self) -> None:
+ raise NotImplementedError()
+
+ def prepare_dmcrypt(self) -> None:
+ raise NotImplementedError()
+
+ def get_cluster_fsid(self) -> str:
+ """
+ Allows using --cluster-fsid as an argument, but can fallback to reading
+ from ceph.conf if that is unset (the default behavior).
+ """
+ if self.args.cluster_fsid:
+ return self.args.cluster_fsid
+ else:
+ return conf.ceph.get('global', 'fsid')
+
+ def get_osd_path(self) -> str:
+ return '/var/lib/ceph/osd/%s-%s/' % (conf.cluster, self.osd_id)
+
+ def build_osd_mkfs_cmd(self) -> List[str]:
+ self.supplementary_command = [
+ '--osd-data', self.osd_path,
+ '--osd-uuid', self.osd_fsid,
+ '--setuser', 'ceph',
+ '--setgroup', 'ceph'
+ ]
+ self.osd_mkfs_cmd = [
+ 'ceph-osd',
+ '--cluster', conf.cluster,
+ '--osd-objectstore', self.objectstore,
+ '--mkfs',
+ '-i', self.osd_id,
+ '--monmap', self.monmap,
+ ]
+ if self.cephx_secret is not None:
+ self.osd_mkfs_cmd.extend(['--keyfile', '-'])
+ try:
+ self.add_objectstore_opts()
+ except NotImplementedError:
+ logger.info("No specific objectstore options to add.")
+
+ self.osd_mkfs_cmd.extend(self.supplementary_command)
+ return self.osd_mkfs_cmd
+
+ def osd_mkfs(self) -> None:
+ self.osd_path = self.get_osd_path()
+ self.monmap = os.path.join(self.osd_path, 'activate.monmap')
+ cmd = self.build_osd_mkfs_cmd()
+
+ system.chown(self.osd_path)
+ """
+ When running in containers the --mkfs on raw device sometimes fails
+ to acquire a lock through flock() on the device because systemd-udevd holds one temporarily.
+ See KernelDevice.cc and _lock() to understand how ceph-osd acquires the lock.
+ Because this is really transient, we retry up to 5 times and wait for 1 sec in-between
+ """
+ for retry in range(5):
+ _, _, returncode = process.call(cmd,
+ stdin=self.cephx_secret,
+ terminal_verbose=True,
+ show_command=True)
+ if returncode == 0:
+ break
+ else:
+ if returncode == errno.EWOULDBLOCK:
+ time.sleep(1)
+ logger.info('disk is held by another process, '
+ 'trying to mkfs again... (%s/5 attempt)' %
+ retry)
+ continue
+ else:
+ raise RuntimeError('Command failed with exit code %s: %s' %
+ (returncode, ' '.join(cmd)))
+
+ def activate(self) -> None:
+ raise NotImplementedError()
diff --git a/src/ceph-volume/ceph_volume/objectstore/bluestore.py b/src/ceph-volume/ceph_volume/objectstore/bluestore.py
new file mode 100644
index 00000000000..e9b0e9516cb
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/objectstore/bluestore.py
@@ -0,0 +1,61 @@
+import logging
+import os
+from .baseobjectstore import BaseObjectStore
+from ceph_volume.util import system
+from typing import Optional, TYPE_CHECKING
+
+if TYPE_CHECKING:
+ import argparse
+
+logger = logging.getLogger(__name__)
+
+
+class BlueStore(BaseObjectStore):
+ def __init__(self, args: "argparse.Namespace") -> None:
+ super().__init__(args)
+ self.args: "argparse.Namespace" = args
+ self.objectstore = 'bluestore'
+ self.osd_id: str = ''
+ self.osd_fsid: str = ''
+ self.osd_path: str = ''
+ self.key: Optional[str] = None
+ self.block_device_path: str = ''
+ self.wal_device_path: str = ''
+ self.db_device_path: str = ''
+
+ def add_objectstore_opts(self) -> None:
+ """
+ Create the files for the OSD to function. A normal call will look like:
+
+ ceph-osd --cluster ceph --mkfs --mkkey -i 0 \
+ --monmap /var/lib/ceph/osd/ceph-0/activate.monmap \
+ --osd-data /var/lib/ceph/osd/ceph-0 \
+ --osd-uuid 8d208665-89ae-4733-8888-5d3bfbeeec6c \
+ --keyring /var/lib/ceph/osd/ceph-0/keyring \
+ --setuser ceph --setgroup ceph
+
+ In some cases it is required to use the keyring, when it is passed
+ in as a keyword argument it is used as part of the ceph-osd command
+ """
+
+ if self.wal_device_path:
+ self.osd_mkfs_cmd.extend(
+ ['--bluestore-block-wal-path', self.wal_device_path]
+ )
+ system.chown(self.wal_device_path)
+
+ if self.db_device_path:
+ self.osd_mkfs_cmd.extend(
+ ['--bluestore-block-db-path', self.db_device_path]
+ )
+ system.chown(self.db_device_path)
+
+ if self.get_osdspec_affinity():
+ self.osd_mkfs_cmd.extend(['--osdspec-affinity',
+ self.get_osdspec_affinity()])
+
+ def unlink_bs_symlinks(self) -> None:
+ for link_name in ['block', 'block.db', 'block.wal']:
+ link_path = os.path.join(self.osd_path, link_name)
+ if os.path.exists(link_path):
+ os.unlink(os.path.join(self.osd_path, link_name))
diff --git a/src/ceph-volume/ceph_volume/objectstore/lvmbluestore.py b/src/ceph-volume/ceph_volume/objectstore/lvmbluestore.py
new file mode 100644
index 00000000000..5dc46361e92
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/objectstore/lvmbluestore.py
@@ -0,0 +1,491 @@
+import json
+import logging
+import os
+from ceph_volume import conf, terminal, decorators, configuration, process
+from ceph_volume.api import lvm as api
+from ceph_volume.util import prepare as prepare_utils
+from ceph_volume.util import encryption as encryption_utils
+from ceph_volume.util import system, disk
+from ceph_volume.systemd import systemctl
+from ceph_volume.devices.lvm.common import rollback_osd
+from ceph_volume.devices.lvm.listing import direct_report
+from .bluestore import BlueStore
+from typing import Dict, Any, Optional, List, Tuple, TYPE_CHECKING
+
+if TYPE_CHECKING:
+ import argparse
+ from ceph_volume.api.lvm import Volume
+
+logger = logging.getLogger(__name__)
+
+
+class LvmBlueStore(BlueStore):
+ def __init__(self, args: "argparse.Namespace") -> None:
+ super().__init__(args)
+ self.tags: Dict[str, Any] = {}
+ self.block_lv: Optional["Volume"] = None
+
+ def pre_prepare(self) -> None:
+ if self.encrypted:
+ self.secrets['dmcrypt_key'] = encryption_utils.create_dmcrypt_key()
+
+ cluster_fsid = self.get_cluster_fsid()
+
+ self.osd_fsid = self.args.osd_fsid or system.generate_uuid()
+ crush_device_class = self.args.crush_device_class
+ if crush_device_class:
+ self.secrets['crush_device_class'] = crush_device_class
+ # reuse a given ID if it exists, otherwise create a new ID
+ self.osd_id = prepare_utils.create_id(self.osd_fsid,
+ json.dumps(self.secrets),
+ osd_id=self.args.osd_id)
+ self.tags = {
+ 'ceph.osd_fsid': self.osd_fsid,
+ 'ceph.osd_id': self.osd_id,
+ 'ceph.cluster_fsid': cluster_fsid,
+ 'ceph.cluster_name': conf.cluster,
+ 'ceph.crush_device_class': crush_device_class,
+ 'ceph.osdspec_affinity': self.get_osdspec_affinity()
+ }
+
+ try:
+ vg_name, lv_name = self.args.data.split('/')
+ self.block_lv = api.get_single_lv(filters={'lv_name': lv_name,
+ 'vg_name': vg_name})
+ except ValueError:
+ self.block_lv = None
+
+ if not self.block_lv:
+ self.block_lv = self.prepare_data_device('block', self.osd_fsid)
+ self.block_device_path = self.block_lv.__dict__['lv_path']
+
+ self.tags['ceph.block_device'] = self.block_lv.__dict__['lv_path']
+ self.tags['ceph.block_uuid'] = self.block_lv.__dict__['lv_uuid']
+ self.tags['ceph.cephx_lockbox_secret'] = self.cephx_lockbox_secret
+ self.tags['ceph.encrypted'] = self.encrypted
+ self.tags['ceph.vdo'] = api.is_vdo(self.block_lv.__dict__['lv_path'])
+
+ def prepare_data_device(self,
+ device_type: str,
+ osd_uuid: str) -> Optional["Volume"]:
+ """
+ Check if ``arg`` is a device or partition to create an LV out of it
+ with a distinct volume group name, assigning LV tags on it and
+ ultimately, returning the logical volume object. Failing to detect
+ a device or partition will result in error.
+
+ :param arg: The value of ``--data`` when parsing args
+ :param device_type: Usually ``block``
+ :param osd_uuid: The OSD uuid
+ """
+
+ device = self.args.data
+ if disk.is_partition(device) or disk.is_device(device):
+ # we must create a vg, and then a single lv
+ lv_name_prefix = "osd-{}".format(device_type)
+ kwargs = {
+ 'device': device,
+ 'tags': {'ceph.type': device_type},
+ 'slots': self.args.data_slots,
+ }
+ logger.debug('data device size: {}'.format(self.args.data_size))
+ if self.args.data_size != 0:
+ kwargs['size'] = self.args.data_size
+ return api.create_lv(
+ lv_name_prefix,
+ osd_uuid,
+ **kwargs)
+ else:
+ error = [
+ 'Cannot use device ({}).'.format(device),
+ 'A vg/lv path or an existing device is needed']
+ raise RuntimeError(' '.join(error))
+
+ def safe_prepare(self,
+ args: Optional["argparse.Namespace"] = None) -> None:
+ """
+ An intermediate step between `main()` and `prepare()` so that we can
+ capture the `self.osd_id` in case we need to rollback
+
+ :param args: Injected args, usually from `lvm create` which compounds
+ both `prepare` and `create`
+ """
+ if args is not None:
+ self.args = args
+
+ try:
+ vgname, lvname = self.args.data.split('/')
+ lv = api.get_single_lv(filters={'lv_name': lvname,
+ 'vg_name': vgname})
+ except ValueError:
+ lv = None
+
+ if api.is_ceph_device(lv):
+ logger.info("device {} is already used".format(self.args.data))
+ raise RuntimeError("skipping {}, it is already prepared".format(
+ self.args.data))
+ try:
+ self.prepare()
+ except Exception:
+ logger.exception('lvm prepare was unable to complete')
+ logger.info('will rollback OSD ID creation')
+ rollback_osd(self.args, self.osd_id)
+ raise
+ terminal.success("ceph-volume lvm prepare successful for: %s" %
+ self.args.data)
+
+ @decorators.needs_root
+ def prepare(self) -> None:
+ # 1/
+ # Need to be reworked (move it to the parent class + call super()? )
+ self.pre_prepare()
+
+ # 2/
+ self.wal_device_path, wal_uuid, tags = self.setup_device(
+ 'wal',
+ self.args.block_wal,
+ self.tags,
+ self.args.block_wal_size,
+ self.args.block_wal_slots)
+ self.db_device_path, db_uuid, tags = self.setup_device(
+ 'db',
+ self.args.block_db,
+ self.tags,
+ self.args.block_db_size,
+ self.args.block_db_slots)
+
+ self.tags['ceph.type'] = 'block'
+ self.block_lv.set_tags(self.tags) # type: ignore
+
+ # 3/ encryption-only operations
+ if self.secrets.get('dmcrypt_key'):
+ self.prepare_dmcrypt()
+
+ # 4/ osd_prepare req
+ self.prepare_osd_req()
+
+ # 5/ bluestore mkfs
+ # prepare the osd filesystem
+ self.osd_mkfs()
+
+ def prepare_dmcrypt(self) -> None:
+ # If encrypted, there is no need to create the lockbox keyring file
+ # because bluestore re-creates the files and does not have support
+ # for other files like the custom lockbox one. This will need to be
+ # done on activation. Format and open ('decrypt' devices) and
+ # re-assign the device and journal variables so that the rest of the
+ # process can use the mapper paths
+ key = self.secrets['dmcrypt_key']
+
+ self.block_device_path = \
+ self.luks_format_and_open(key,
+ self.block_device_path,
+ 'block',
+ self.tags)
+ self.wal_device_path = self.luks_format_and_open(key,
+ self.wal_device_path,
+ 'wal',
+ self.tags)
+ self.db_device_path = self.luks_format_and_open(key,
+ self.db_device_path,
+ 'db',
+ self.tags)
+
+ def luks_format_and_open(self,
+ key: Optional[str],
+ device: str,
+ device_type: str,
+ tags: Dict[str, Any]) -> str:
+ """
+ Helper for devices that are encrypted. The operations needed for
+ block, db, wal devices are all the same
+ """
+ if not device:
+ return ''
+ tag_name = 'ceph.%s_uuid' % device_type
+ uuid = tags[tag_name]
+ # format data device
+ encryption_utils.luks_format(
+ key,
+ device
+ )
+ encryption_utils.luks_open(
+ key,
+ device,
+ uuid
+ )
+
+ return '/dev/mapper/%s' % uuid
+
+ def setup_device(self,
+ device_type: str,
+ device_name: str,
+ tags: Dict[str, Any],
+ size: int,
+ slots: int) -> Tuple[str, str, Dict[str, Any]]:
+ """
+ Check if ``device`` is an lv, if so, set the tags, making sure to
+ update the tags with the lv_uuid and lv_path which the incoming tags
+ will not have.
+
+ If the device is not a logical volume, then retrieve the partition UUID
+ by querying ``blkid``
+ """
+ if device_name is None:
+ return '', '', tags
+ tags['ceph.type'] = device_type
+ tags['ceph.vdo'] = api.is_vdo(device_name)
+
+ try:
+ vg_name, lv_name = device_name.split('/')
+ lv = api.get_single_lv(filters={'lv_name': lv_name,
+ 'vg_name': vg_name})
+ except ValueError:
+ lv = None
+
+ if lv:
+ lv_uuid = lv.lv_uuid
+ path = lv.lv_path
+ tags['ceph.%s_uuid' % device_type] = lv_uuid
+ tags['ceph.%s_device' % device_type] = path
+ lv.set_tags(tags)
+ elif disk.is_device(device_name):
+ # We got a disk, create an lv
+ lv_type = "osd-{}".format(device_type)
+ name_uuid = system.generate_uuid()
+ kwargs = {
+ 'device': device_name,
+ 'tags': tags,
+ 'slots': slots
+ }
+ # TODO use get_block_db_size and co here to get configured size in
+ # conf file
+ if size != 0:
+ kwargs['size'] = size
+ lv = api.create_lv(
+ lv_type,
+ name_uuid,
+ **kwargs)
+ path = lv.lv_path
+ tags['ceph.{}_device'.format(device_type)] = path
+ tags['ceph.{}_uuid'.format(device_type)] = lv.lv_uuid
+ lv_uuid = lv.lv_uuid
+ lv.set_tags(tags)
+ else:
+ # otherwise assume this is a regular disk partition
+ name_uuid = self.get_ptuuid(device_name)
+ path = device_name
+ tags['ceph.%s_uuid' % device_type] = name_uuid
+ tags['ceph.%s_device' % device_type] = path
+ lv_uuid = name_uuid
+ return path, lv_uuid, tags
+
+ def get_osd_device_path(self,
+ osd_lvs: List["Volume"],
+ device_type: str,
+ dmcrypt_secret: Optional[str] =
+ None) -> Optional[str]:
+ """
+ ``device_type`` can be one of ``db``, ``wal`` or ``block`` so that we
+ can query LVs on system and fallback to querying the uuid if that is
+ not present.
+
+ Return a path if possible, failing to do that a ``None``, since some of
+ these devices are optional.
+ """
+ # TODO(guits): this should be moved in a new function get_device_uuid_from_lv()
+ osd_block_lv = None
+ for lv in osd_lvs:
+ if lv.tags.get('ceph.type') == 'block':
+ osd_block_lv = lv
+ break
+ if osd_block_lv:
+ is_encrypted = osd_block_lv.tags.get('ceph.encrypted', '0') == '1'
+ logger.debug('Found block device (%s) with encryption: %s',
+ osd_block_lv.name, is_encrypted)
+ uuid_tag = 'ceph.%s_uuid' % device_type
+ device_uuid = osd_block_lv.tags.get(uuid_tag)
+ if not device_uuid:
+ return None
+
+ device_lv: Optional["Volume"] = None
+ for lv in osd_lvs:
+ if lv.tags.get('ceph.type') == device_type:
+ device_lv = lv
+ break
+ if device_lv:
+ if is_encrypted:
+ encryption_utils.luks_open(dmcrypt_secret,
+ device_lv.__dict__['lv_path'],
+ device_uuid)
+ return '/dev/mapper/%s' % device_uuid
+ return device_lv.__dict__['lv_path']
+
+ # this could be a regular device, so query it with blkid
+ physical_device = disk.get_device_from_partuuid(device_uuid)
+ if physical_device:
+ if is_encrypted:
+ encryption_utils.luks_open(dmcrypt_secret,
+ physical_device,
+ device_uuid)
+ return '/dev/mapper/%s' % device_uuid
+ return physical_device
+
+ raise RuntimeError('could not find %s with uuid %s' % (device_type,
+ device_uuid))
+
+ def _activate(self,
+ osd_lvs: List["Volume"],
+ no_systemd: bool = False,
+ no_tmpfs: bool = False) -> None:
+ for lv in osd_lvs:
+ if lv.tags.get('ceph.type') == 'block':
+ osd_block_lv = lv
+ break
+ else:
+ raise RuntimeError('could not find a bluestore OSD to activate')
+
+ is_encrypted = osd_block_lv.tags.get('ceph.encrypted', '0') == '1'
+ dmcrypt_secret = None
+ osd_id = osd_block_lv.tags['ceph.osd_id']
+ conf.cluster = osd_block_lv.tags['ceph.cluster_name']
+ osd_fsid = osd_block_lv.tags['ceph.osd_fsid']
+ configuration.load_ceph_conf_path(
+ osd_block_lv.tags['ceph.cluster_name'])
+ configuration.load()
+
+ # mount on tmpfs the osd directory
+ self.osd_path = '/var/lib/ceph/osd/%s-%s' % (conf.cluster, osd_id)
+ if not system.path_is_mounted(self.osd_path):
+ # mkdir -p and mount as tmpfs
+ prepare_utils.create_osd_path(osd_id, tmpfs=not no_tmpfs)
+
+ # XXX This needs to be removed once ceph-bluestore-tool can deal with
+ # symlinks that exist in the osd dir
+ self.unlink_bs_symlinks()
+
+ # encryption is handled here, before priming the OSD dir
+ if is_encrypted:
+ osd_lv_path = '/dev/mapper/%s' % osd_block_lv.__dict__['lv_uuid']
+ lockbox_secret = osd_block_lv.tags['ceph.cephx_lockbox_secret']
+ encryption_utils.write_lockbox_keyring(osd_id,
+ osd_fsid,
+ lockbox_secret)
+ dmcrypt_secret = encryption_utils.get_dmcrypt_key(osd_id, osd_fsid)
+ encryption_utils.luks_open(dmcrypt_secret,
+ osd_block_lv.__dict__['lv_path'],
+ osd_block_lv.__dict__['lv_uuid'])
+ else:
+ osd_lv_path = osd_block_lv.__dict__['lv_path']
+
+ db_device_path = \
+ self.get_osd_device_path(osd_lvs, 'db',
+ dmcrypt_secret=dmcrypt_secret)
+ wal_device_path = \
+ self.get_osd_device_path(osd_lvs,
+ 'wal',
+ dmcrypt_secret=dmcrypt_secret)
+
+ # Once symlinks are removed, the osd dir can be 'primed again.
+ # chown first, regardless of what currently exists so that
+ # ``prime-osd-dir`` can succeed even if permissions are
+ # somehow messed up.
+ system.chown(self.osd_path)
+ prime_command = [
+ 'ceph-bluestore-tool', '--cluster=%s' % conf.cluster,
+ 'prime-osd-dir', '--dev', osd_lv_path,
+ '--path', self.osd_path, '--no-mon-config']
+
+ process.run(prime_command)
+ # always re-do the symlink regardless if it exists, so that the block,
+ # block.wal, and block.db devices that may have changed can be mapped
+ # correctly every time
+ process.run(['ln',
+ '-snf',
+ osd_lv_path,
+ os.path.join(self.osd_path, 'block')])
+ system.chown(os.path.join(self.osd_path, 'block'))
+ system.chown(self.osd_path)
+ if db_device_path:
+ destination = os.path.join(self.osd_path, 'block.db')
+ process.run(['ln', '-snf', db_device_path, destination])
+ system.chown(db_device_path)
+ system.chown(destination)
+ if wal_device_path:
+ destination = os.path.join(self.osd_path, 'block.wal')
+ process.run(['ln', '-snf', wal_device_path, destination])
+ system.chown(wal_device_path)
+ system.chown(destination)
+
+ if no_systemd is False:
+ # enable the ceph-volume unit for this OSD
+ systemctl.enable_volume(osd_id, osd_fsid, 'lvm')
+
+ # enable the OSD
+ systemctl.enable_osd(osd_id)
+
+ # start the OSD
+ systemctl.start_osd(osd_id)
+ terminal.success("ceph-volume lvm activate successful for osd ID: %s" %
+ osd_id)
+
+ @decorators.needs_root
+ def activate_all(self) -> None:
+ listed_osds = direct_report()
+ osds = {}
+ for osd_id, devices in listed_osds.items():
+ # the metadata for all devices in each OSD will contain
+ # the FSID which is required for activation
+ for device in devices:
+ fsid = device.get('tags', {}).get('ceph.osd_fsid')
+ if fsid:
+ osds[fsid] = osd_id
+ break
+ if not osds:
+ terminal.warning('Was unable to find any OSDs to activate')
+ terminal.warning('Verify OSDs are present with '
+ '"ceph-volume lvm list"')
+ return
+ for osd_fsid, osd_id in osds.items():
+ if not self.args.no_systemd and systemctl.osd_is_active(osd_id):
+ terminal.warning(
+ 'OSD ID %s FSID %s process is active. '
+ 'Skipping activation' % (osd_id, osd_fsid)
+ )
+ else:
+ terminal.info('Activating OSD ID %s FSID %s' % (osd_id,
+ osd_fsid))
+ self.activate(self.args, osd_id=osd_id, osd_fsid=osd_fsid)
+
+ @decorators.needs_root
+ def activate(self,
+ args: Optional["argparse.Namespace"] = None,
+ osd_id: Optional[str] = None,
+ osd_fsid: Optional[str] = None) -> None:
+ """
+ :param args: The parsed arguments coming from the CLI
+ :param osd_id: When activating all, this gets populated with an
+ existing OSD ID
+ :param osd_fsid: When activating all, this gets populated with an
+ existing OSD FSID
+ """
+ osd_id = osd_id if osd_id else self.args.osd_id
+ osd_fsid = osd_fsid if osd_fsid else self.args.osd_fsid
+
+ if osd_id and osd_fsid:
+ tags = {'ceph.osd_id': osd_id, 'ceph.osd_fsid': osd_fsid}
+ elif not osd_id and osd_fsid:
+ tags = {'ceph.osd_fsid': osd_fsid}
+ elif osd_id and not osd_fsid:
+ raise RuntimeError('could not activate osd.{}, please provide the '
+ 'osd_fsid too'.format(osd_id))
+ else:
+ raise RuntimeError('Please provide both osd_id and osd_fsid')
+ lvs = api.get_lvs(tags=tags)
+ if not lvs:
+ raise RuntimeError('could not find osd.%s with osd_fsid %s' %
+ (osd_id, osd_fsid))
+
+ self._activate(lvs, self.args.no_systemd, getattr(self.args,
+ 'no_tmpfs',
+ False))
diff --git a/src/ceph-volume/ceph_volume/objectstore/rawbluestore.py b/src/ceph-volume/ceph_volume/objectstore/rawbluestore.py
new file mode 100644
index 00000000000..5ac16617e50
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/objectstore/rawbluestore.py
@@ -0,0 +1,181 @@
+import logging
+import json
+import os
+from .bluestore import BlueStore
+from ceph_volume import terminal, decorators, conf, process
+from ceph_volume.util import system, disk
+from ceph_volume.util import prepare as prepare_utils
+from ceph_volume.util import encryption as encryption_utils
+from ceph_volume.devices.lvm.common import rollback_osd
+from ceph_volume.devices.raw.list import direct_report
+from typing import Any, Dict, List, Optional, TYPE_CHECKING
+
+if TYPE_CHECKING:
+ import argparse
+
+logger = logging.getLogger(__name__)
+
+
+class RawBlueStore(BlueStore):
+ def __init__(self, args: "argparse.Namespace") -> None:
+ super().__init__(args)
+ if hasattr(self.args, 'data'):
+ self.block_device_path = self.args.data
+ if hasattr(self.args, 'block_db'):
+ self.db_device_path = self.args.block_db
+ if hasattr(self.args, 'block_wal'):
+ self.wal_device_path = self.args.block_wal
+
+ def prepare_dmcrypt(self) -> None:
+ """
+ Helper for devices that are encrypted. The operations needed for
+ block, db, wal, devices are all the same
+ """
+ key = self.secrets['dmcrypt_key']
+
+ for device, device_type in [(self.block_device_path, 'block'),
+ (self.db_device_path, 'db'),
+ (self.wal_device_path, 'wal')]:
+
+ if device:
+ kname = disk.lsblk(device)['KNAME']
+ mapping = 'ceph-{}-{}-{}-dmcrypt'.format(self.osd_fsid,
+ kname,
+ device_type)
+ # format data device
+ encryption_utils.luks_format(
+ key,
+ device
+ )
+ encryption_utils.luks_open(
+ key,
+ device,
+ mapping
+ )
+ self.__dict__[f'{device_type}_device_path'] = \
+ '/dev/mapper/{}'.format(mapping)
+
+ def safe_prepare(self,
+ args: Optional["argparse.Namespace"] = None) -> None:
+ """
+ An intermediate step between `main()` and `prepare()` so that we can
+ capture the `self.osd_id` in case we need to rollback
+
+ :param args: Injected args, usually from `raw create` which compounds
+ both `prepare` and `create`
+ """
+ if args is not None:
+ self.args = args # This should be moved (to __init__ ?)
+ try:
+ self.prepare()
+ except Exception:
+ logger.exception('raw prepare was unable to complete')
+ logger.info('will rollback OSD ID creation')
+ rollback_osd(self.args, self.osd_id)
+ raise
+ dmcrypt_log = 'dmcrypt' if hasattr(args, 'dmcrypt') else 'clear'
+ terminal.success("ceph-volume raw {} prepare "
+ "successful for: {}".format(dmcrypt_log,
+ self.args.data))
+
+ @decorators.needs_root
+ def prepare(self) -> None:
+ if self.encrypted:
+ self.secrets['dmcrypt_key'] = \
+ os.getenv('CEPH_VOLUME_DMCRYPT_SECRET')
+ self.osd_fsid = system.generate_uuid()
+ crush_device_class = self.args.crush_device_class
+ if crush_device_class:
+ self.secrets['crush_device_class'] = crush_device_class
+
+ tmpfs = not self.args.no_tmpfs
+ if self.args.block_wal:
+ self.wal = self.args.block_wal
+ if self.args.block_db:
+ self.db = self.args.block_db
+
+ # reuse a given ID if it exists, otherwise create a new ID
+ self.osd_id = prepare_utils.create_id(
+ self.osd_fsid, json.dumps(self.secrets))
+
+ if self.secrets.get('dmcrypt_key'):
+ self.prepare_dmcrypt()
+
+ self.prepare_osd_req(tmpfs=tmpfs)
+
+ # prepare the osd filesystem
+ self.osd_mkfs()
+
+ def _activate(self,
+ meta: Dict[str, Any],
+ tmpfs: bool) -> None:
+ # find the osd
+ osd_id = meta['osd_id']
+ osd_uuid = meta['osd_uuid']
+
+ # mount on tmpfs the osd directory
+ self.osd_path = '/var/lib/ceph/osd/%s-%s' % (conf.cluster, osd_id)
+ if not system.path_is_mounted(self.osd_path):
+ # mkdir -p and mount as tmpfs
+ prepare_utils.create_osd_path(osd_id, tmpfs=tmpfs)
+
+ # XXX This needs to be removed once ceph-bluestore-tool can deal with
+ # symlinks that exist in the osd dir
+
+ self.unlink_bs_symlinks()
+
+ # Once symlinks are removed, the osd dir can be 'primed again. chown
+ # first, regardless of what currently exists so that ``prime-osd-dir``
+ # can succeed even if permissions are somehow messed up
+ system.chown(self.osd_path)
+ prime_command = [
+ 'ceph-bluestore-tool',
+ 'prime-osd-dir',
+ '--path', self.osd_path,
+ '--no-mon-config',
+ '--dev', meta['device'],
+ ]
+ process.run(prime_command)
+
+ # always re-do the symlink regardless if it exists, so that the block,
+ # block.wal, and block.db devices that may have changed can be mapped
+ # correctly every time
+ prepare_utils.link_block(meta['device'], osd_id)
+
+ if 'device_db' in meta:
+ prepare_utils.link_db(meta['device_db'], osd_id, osd_uuid)
+
+ if 'device_wal' in meta:
+ prepare_utils.link_wal(meta['device_wal'], osd_id, osd_uuid)
+
+ system.chown(self.osd_path)
+ terminal.success("ceph-volume raw activate "
+ "successful for osd ID: %s" % osd_id)
+
+ @decorators.needs_root
+ def activate(self,
+ devs: List[str],
+ start_osd_id: str,
+ start_osd_uuid: str,
+ tmpfs: bool) -> None:
+ """
+ :param args: The parsed arguments coming from the CLI
+ """
+ assert devs or start_osd_id or start_osd_uuid
+ found = direct_report(devs)
+
+ activated_any = False
+ for osd_uuid, meta in found.items():
+ osd_id = meta['osd_id']
+ if start_osd_id is not None and str(osd_id) != str(start_osd_id):
+ continue
+ if start_osd_uuid is not None and osd_uuid != start_osd_uuid:
+ continue
+ logger.info('Activating osd.%s uuid %s cluster %s' % (
+ osd_id, osd_uuid, meta['ceph_fsid']))
+ self._activate(meta,
+ tmpfs=tmpfs)
+ activated_any = True
+
+ if not activated_any:
+ raise RuntimeError('did not find any matching OSD to activate')
diff --git a/src/ceph-volume/ceph_volume/tests/api/test_lvm.py b/src/ceph-volume/ceph_volume/tests/api/test_lvm.py
index 139328b4a0d..9ad2f701f12 100644
--- a/src/ceph-volume/ceph_volume/tests/api/test_lvm.py
+++ b/src/ceph-volume/ceph_volume/tests/api/test_lvm.py
@@ -782,7 +782,7 @@ class TestGetLVs(object):
class TestGetSinglePV(object):
- @patch('ceph_volume.devices.lvm.prepare.api.get_pvs')
+ @patch('ceph_volume.api.lvm.get_pvs')
def test_get_single_pv_multiple_matches_raises_runtimeerror(self, m_get_pvs):
fake_pvs = []
fake_pvs.append(api.PVolume(pv_name='/dev/sda', pv_tags={}))
@@ -794,14 +794,14 @@ class TestGetSinglePV(object):
api.get_single_pv()
assert "matched more than 1 PV present on this host." in str(e.value)
- @patch('ceph_volume.devices.lvm.prepare.api.get_pvs')
+ @patch('ceph_volume.api.lvm.get_pvs')
def test_get_single_pv_no_match_returns_none(self, m_get_pvs):
m_get_pvs.return_value = []
pv = api.get_single_pv()
assert pv == None
- @patch('ceph_volume.devices.lvm.prepare.api.get_pvs')
+ @patch('ceph_volume.api.lvm.get_pvs')
def test_get_single_pv_one_match(self, m_get_pvs):
fake_pvs = []
fake_pvs.append(api.PVolume(pv_name='/dev/sda', pv_tags={}))
@@ -815,7 +815,7 @@ class TestGetSinglePV(object):
class TestGetSingleVG(object):
- @patch('ceph_volume.devices.lvm.prepare.api.get_vgs')
+ @patch('ceph_volume.api.lvm.get_vgs')
def test_get_single_vg_multiple_matches_raises_runtimeerror(self, m_get_vgs):
fake_vgs = []
fake_vgs.append(api.VolumeGroup(vg_name='vg1'))
@@ -827,14 +827,14 @@ class TestGetSingleVG(object):
api.get_single_vg()
assert "matched more than 1 VG present on this host." in str(e.value)
- @patch('ceph_volume.devices.lvm.prepare.api.get_vgs')
+ @patch('ceph_volume.api.lvm.get_vgs')
def test_get_single_vg_no_match_returns_none(self, m_get_vgs):
m_get_vgs.return_value = []
vg = api.get_single_vg()
assert vg == None
- @patch('ceph_volume.devices.lvm.prepare.api.get_vgs')
+ @patch('ceph_volume.api.lvm.get_vgs')
def test_get_single_vg_one_match(self, m_get_vgs):
fake_vgs = []
fake_vgs.append(api.VolumeGroup(vg_name='vg1'))
@@ -847,7 +847,7 @@ class TestGetSingleVG(object):
class TestGetSingleLV(object):
- @patch('ceph_volume.devices.lvm.prepare.api.get_lvs')
+ @patch('ceph_volume.api.lvm.get_lvs')
def test_get_single_lv_multiple_matches_raises_runtimeerror(self, m_get_lvs):
fake_lvs = []
fake_lvs.append(api.Volume(lv_name='lv1',
@@ -866,14 +866,14 @@ class TestGetSingleLV(object):
api.get_single_lv()
assert "matched more than 1 LV present on this host" in str(e.value)
- @patch('ceph_volume.devices.lvm.prepare.api.get_lvs')
+ @patch('ceph_volume.api.lvm.get_lvs')
def test_get_single_lv_no_match_returns_none(self, m_get_lvs):
m_get_lvs.return_value = []
lv = api.get_single_lv()
assert lv == None
- @patch('ceph_volume.devices.lvm.prepare.api.get_lvs')
+ @patch('ceph_volume.api.lvm.get_lvs')
def test_get_single_lv_one_match(self, m_get_lvs):
fake_lvs = []
fake_lvs.append(api.Volume(lv_name='lv1', lv_path='/dev/vg1/lv1', vg_name='vg1', lv_tags='', lv_uuid='fake-uuid'))
diff --git a/src/ceph-volume/ceph_volume/tests/conftest.py b/src/ceph-volume/ceph_volume/tests/conftest.py
index 7a7c57d9721..fb43da7ab22 100644
--- a/src/ceph-volume/ceph_volume/tests/conftest.py
+++ b/src/ceph-volume/ceph_volume/tests/conftest.py
@@ -5,7 +5,7 @@ from ceph_volume.api import lvm
from ceph_volume.util import disk
from ceph_volume.util import device
from ceph_volume.util.constants import ceph_disk_guids
-from ceph_volume import conf, configuration
+from ceph_volume import conf, configuration, objectstore
class Capture(object):
@@ -36,6 +36,16 @@ class Factory(object):
def factory():
return Factory
+def objectstore_bluestore_factory(**kw):
+ o = objectstore.bluestore.BlueStore([])
+ for k, v in kw.items():
+ setattr(o, k, v)
+ return o
+
+@pytest.fixture
+def objectstore_bluestore():
+ return objectstore_bluestore_factory
+
@pytest.fixture
def capture():
@@ -58,30 +68,78 @@ def mock_lv_device_generator():
return dev
return mock_lv
-def mock_device():
+def mock_device(name='foo',
+ vg_name='vg_foo',
+ vg_size=None,
+ lv_name='lv_foo',
+ lv_size=None,
+ path='foo',
+ lv_path='',
+ number_lvs=0):
dev = create_autospec(device.Device)
- dev.path = '/dev/foo'
- dev.vg_name = 'vg_foo'
- dev.lv_name = 'lv_foo'
+ if vg_size is None:
+ dev.vg_size = [21474836480]
+ if lv_size is None:
+ lv_size = dev.vg_size
+ dev.lv_size = lv_size
+ dev.path = f'/dev/{path}'
+ dev.vg_name = f'{vg_name}'
+ dev.lv_name = f'{lv_name}'
+ dev.lv_path = lv_path if lv_path else f'/dev/{dev.vg_name}/{dev.lv_name}'
dev.symlink = None
dev.vgs = [lvm.VolumeGroup(vg_name=dev.vg_name, lv_name=dev.lv_name)]
dev.available_lvm = True
- dev.vg_size = [21474836480]
dev.vg_free = dev.vg_size
dev.lvs = []
+ for n in range(0, number_lvs):
+ dev.lvs.append(lvm.Volume(vg_name=f'{dev.vg_name}{n}',
+ lv_name=f'{dev.lv_name}-{n}',
+ lv_path=f'{dev.lv_path}-{n}',
+ lv_size=dev.lv_size,
+ lv_tags=''))
+ dev.is_device = True
return dev
@pytest.fixture(params=range(1,4))
def mock_devices_available(request):
ret = []
- for n in range(request.param):
- dev = mock_device()
- # after v15.2.8, a single VG is created for each PV
- dev.vg_name = f'vg_foo_{n}'
+ for n in range(1, request.param+1):
+ # dev = mock_device(suffix=str(n), vg_name=f'vg_foo_{n}', lv_name='')
+ dev = mock_device(vg_name=f'vg_foo_{n}', lv_name='')
dev.vgs = [lvm.VolumeGroup(vg_name=dev.vg_name, lv_name=dev.lv_name)]
ret.append(dev)
return ret
+@pytest.fixture(params=range(2,5))
+def mock_devices_available_multi_pvs_per_vg(request):
+ ret = []
+ number_lvs = 1
+ # for n in range(0, 2):
+ for n in range(0, request.param):
+ if n == request.param - 1:
+ number_lvs = 2
+ dev = mock_device(path=f'foo{str(n)}',
+ vg_name='vg_foo',
+ lv_name=f'lv_foo{str(n)}',
+ lv_size=[21474836480],
+ number_lvs=number_lvs)
+ # after v15.2.8, a single VG is created for each PV
+ dev.vgs = [lvm.VolumeGroup(vg_name=dev.vg_name,
+ pv_name=dev.path,
+ pv_count=request.param)]
+ ret.append(dev)
+ return ret
+
+# @pytest.fixture(params=range(1,4))
+# def mock_devices_available_multi_pvs_per_vg(request):
+# ret = []
+# for n in range(1, request.param+1):
+# dev = mock_device(suffix=str(n), vg_name=f'vg_foo', lv_name='')
+# # after v15.2.8, a single VG is created for each PV
+# dev.vgs = [lvm.VolumeGroup(vg_name=dev.vg_name, lv_name=dev.lv_name)]
+# ret.append(dev)
+# return ret
+
@pytest.fixture
def mock_device_generator():
return mock_device
@@ -198,6 +256,13 @@ def is_root(monkeypatch):
"""
monkeypatch.setattr('os.getuid', lambda: 0)
+@pytest.fixture
+def is_non_root(monkeypatch):
+ """
+ Patch ``os.getuid()`` so that ceph-volume's decorators that ensure a user
+ is not root.
+ """
+ monkeypatch.setattr('os.getuid', lambda: 100)
@pytest.fixture
def tmpfile(tmpdir):
@@ -322,4 +387,120 @@ def fake_filesystem(fs):
fs.create_dir('/sys/block/sda/slaves')
fs.create_dir('/sys/block/sda/queue')
fs.create_dir('/sys/block/rbd0')
+ fs.create_dir('/var/log/ceph')
+ fs.create_dir('/tmp/osdpath')
yield fs
+
+@pytest.fixture
+def key_size(monkeypatch):
+ monkeypatch.setattr("ceph_volume.util.encryption.get_key_size_from_conf", lambda: 512)
+
+lvm_direct_report_data = {
+ '1': [{
+ 'lv_tags': 'ceph.block_device=/dev/ceph-40bc7bd7-4aee-483e-ba95-89a64bc8a4fd/osd-block-824f7edf-371f-4b75-9231-4ab62a32d5c0,ceph.block_uuid=kS7zXI-bpmu-3ciB-0rVY-d08b-gWDf-Y9oums,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=7dccab18-14cf-11ee-837b-5254008f8ca5,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.db_device=/dev/ceph-73d6d4db-6528-48f2-a4e2-1c82bc87a9ac/osd-db-b82d920d-be3c-4e4d-ba64-18f7e8445892,ceph.db_uuid=Kuvi0U-05vW-sETB-QiNW-lpaK-XBfD-82eQWw,ceph.encrypted=0,ceph.osd_fsid=824f7edf-371f-4b75-9231-4ab62a32d5c0,ceph.osd_id=1,ceph.osdspec_affinity=,ceph.type=block,ceph.vdo=0',
+ 'lv_path': '/dev/ceph-40bc7bd7-4aee-483e-ba95-89a64bc8a4fd/osd-block-824f7edf-371f-4b75-9231-4ab62a32d5c0',
+ 'lv_name': 'osd-block-824f7edf-371f-4b75-9231-4ab62a32d5c0',
+ 'vg_name': 'ceph-40bc7bd7-4aee-483e-ba95-89a64bc8a4fd',
+ 'lv_uuid': 'kS7zXI-bpmu-3ciB-0rVY-d08b-gWDf-Y9oums',
+ 'lv_size': '214744170496',
+ 'tags': {
+ 'ceph.block_device': '/dev/ceph-40bc7bd7-4aee-483e-ba95-89a64bc8a4fd/osd-block-824f7edf-371f-4b75-9231-4ab62a32d5c0',
+ 'ceph.block_uuid': 'kS7zXI-bpmu-3ciB-0rVY-d08b-gWDf-Y9oums',
+ 'ceph.cephx_lockbox_secret': '',
+ 'ceph.cluster_fsid': '7dccab18-14cf-11ee-837b-5254008f8ca5',
+ 'ceph.cluster_name': 'ceph',
+ 'ceph.crush_device_class': '',
+ 'ceph.db_device': '/dev/ceph-73d6d4db-6528-48f2-a4e2-1c82bc87a9ac/osd-db-b82d920d-be3c-4e4d-ba64-18f7e8445892',
+ 'ceph.db_uuid': 'Kuvi0U-05vW-sETB-QiNW-lpaK-XBfD-82eQWw',
+ 'ceph.encrypted': '0',
+ 'ceph.osd_fsid': '824f7edf-371f-4b75-9231-4ab62a32d5c0',
+ 'ceph.osd_id': '1',
+ 'ceph.osdspec_affinity': '',
+ 'ceph.type': 'block',
+ 'ceph.vdo': '0'
+ },
+ 'name': 'osd-block-824f7edf-371f-4b75-9231-4ab62a32d5c0',
+ 'type': 'block',
+ 'path': '/dev/ceph-40bc7bd7-4aee-483e-ba95-89a64bc8a4fd/osd-block-824f7edf-371f-4b75-9231-4ab62a32d5c0',
+ 'devices': ['/dev/vdc']
+ }, {
+ 'lv_tags': 'ceph.block_device=/dev/ceph-40bc7bd7-4aee-483e-ba95-89a64bc8a4fd/osd-block-824f7edf-371f-4b75-9231-4ab62a32d5c0,ceph.block_uuid=kS7zXI-bpmu-3ciB-0rVY-d08b-gWDf-Y9oums,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=7dccab18-14cf-11ee-837b-5254008f8ca5,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.db_device=/dev/ceph-73d6d4db-6528-48f2-a4e2-1c82bc87a9ac/osd-db-b82d920d-be3c-4e4d-ba64-18f7e8445892,ceph.db_uuid=Kuvi0U-05vW-sETB-QiNW-lpaK-XBfD-82eQWw,ceph.encrypted=0,ceph.osd_fsid=824f7edf-371f-4b75-9231-4ab62a32d5c0,ceph.osd_id=1,ceph.osdspec_affinity=,ceph.type=db,ceph.vdo=0',
+ 'lv_path': '/dev/ceph-73d6d4db-6528-48f2-a4e2-1c82bc87a9ac/osd-db-b82d920d-be3c-4e4d-ba64-18f7e8445892',
+ 'lv_name': 'osd-db-b82d920d-be3c-4e4d-ba64-18f7e8445892',
+ 'vg_name': 'ceph-73d6d4db-6528-48f2-a4e2-1c82bc87a9ac',
+ 'lv_uuid': 'Kuvi0U-05vW-sETB-QiNW-lpaK-XBfD-82eQWw',
+ 'lv_size': '214744170496',
+ 'tags': {
+ 'ceph.block_device': '/dev/ceph-40bc7bd7-4aee-483e-ba95-89a64bc8a4fd/osd-block-824f7edf-371f-4b75-9231-4ab62a32d5c0',
+ 'ceph.block_uuid': 'kS7zXI-bpmu-3ciB-0rVY-d08b-gWDf-Y9oums',
+ 'ceph.cephx_lockbox_secret': '',
+ 'ceph.cluster_fsid': '7dccab18-14cf-11ee-837b-5254008f8ca5',
+ 'ceph.cluster_name': 'ceph',
+ 'ceph.crush_device_class': '',
+ 'ceph.db_device': '/dev/ceph-73d6d4db-6528-48f2-a4e2-1c82bc87a9ac/osd-db-b82d920d-be3c-4e4d-ba64-18f7e8445892',
+ 'ceph.db_uuid': 'Kuvi0U-05vW-sETB-QiNW-lpaK-XBfD-82eQWw',
+ 'ceph.encrypted': '0',
+ 'ceph.osd_fsid': '824f7edf-371f-4b75-9231-4ab62a32d5c0',
+ 'ceph.osd_id': '1',
+ 'ceph.osdspec_affinity': '',
+ 'ceph.type': 'db',
+ 'ceph.vdo': '0'
+ },
+ 'name': 'osd-db-b82d920d-be3c-4e4d-ba64-18f7e8445892',
+ 'type': 'db',
+ 'path': '/dev/ceph-73d6d4db-6528-48f2-a4e2-1c82bc87a9ac/osd-db-b82d920d-be3c-4e4d-ba64-18f7e8445892',
+ 'devices': ['/dev/vdd']
+ }],
+ '0': [{
+ 'lv_tags': 'ceph.block_device=/dev/ceph-e34cc3f5-a70d-49df-82b3-46bcbd63d4b0/osd-block-a0e07c5b-bee1-4ea2-ae07-cb89deda9b27,ceph.block_uuid=cYBGv9-s2cn-FfEy-dGQh-VHci-5jj9-9l5kvH,ceph.cephx_lockbox_secret=,ceph.cluster_fsid=7dccab18-14cf-11ee-837b-5254008f8ca5,ceph.cluster_name=ceph,ceph.crush_device_class=,ceph.encrypted=0,ceph.osd_fsid=a0e07c5b-bee1-4ea2-ae07-cb89deda9b27,ceph.osd_id=0,ceph.osdspec_affinity=,ceph.type=block,ceph.vdo=0',
+ 'lv_path': '/dev/ceph-e34cc3f5-a70d-49df-82b3-46bcbd63d4b0/osd-block-a0e07c5b-bee1-4ea2-ae07-cb89deda9b27',
+ 'lv_name': 'osd-block-a0e07c5b-bee1-4ea2-ae07-cb89deda9b27',
+ 'vg_name': 'ceph-e34cc3f5-a70d-49df-82b3-46bcbd63d4b0',
+ 'lv_uuid': 'cYBGv9-s2cn-FfEy-dGQh-VHci-5jj9-9l5kvH',
+ 'lv_size': '214744170496',
+ 'tags': {
+ 'ceph.block_device': '/dev/ceph-e34cc3f5-a70d-49df-82b3-46bcbd63d4b0/osd-block-a0e07c5b-bee1-4ea2-ae07-cb89deda9b27',
+ 'ceph.block_uuid': 'cYBGv9-s2cn-FfEy-dGQh-VHci-5jj9-9l5kvH',
+ 'ceph.cephx_lockbox_secret': '',
+ 'ceph.cluster_fsid': '7dccab18-14cf-11ee-837b-5254008f8ca5',
+ 'ceph.cluster_name': 'ceph',
+ 'ceph.crush_device_class': '',
+ 'ceph.encrypted': '0',
+ 'ceph.osd_fsid': 'a0e07c5b-bee1-4ea2-ae07-cb89deda9b27',
+ 'ceph.osd_id': '0',
+ 'ceph.osdspec_affinity': '',
+ 'ceph.type': 'block',
+ 'ceph.vdo': '0'
+ },
+ 'name': 'osd-block-a0e07c5b-bee1-4ea2-ae07-cb89deda9b27',
+ 'type': 'block',
+ 'path': '/dev/ceph-e34cc3f5-a70d-49df-82b3-46bcbd63d4b0/osd-block-a0e07c5b-bee1-4ea2-ae07-cb89deda9b27',
+ 'devices': ['/dev/vdb1']
+ }]
+ }
+
+raw_direct_report_data = {
+ "824f7edf-371f-4b75-9231-4ab62a32d5c0": {
+ "ceph_fsid": "7dccab18-14cf-11ee-837b-5254008f8ca5",
+ "device": "/dev/mapper/ceph--40bc7bd7--4aee--483e--ba95--89a64bc8a4fd-osd--block--824f7edf--371f--4b75--9231--4ab62a32d5c0",
+ "device_db": "/dev/mapper/ceph--73d6d4db--6528--48f2--a4e2--1c82bc87a9ac-osd--db--b82d920d--be3c--4e4d--ba64--18f7e8445892",
+ "osd_id": 8,
+ "osd_uuid": "824f7edf-371f-4b75-9231-4ab62a32d5c0",
+ "type": "bluestore"
+ },
+ "a0e07c5b-bee1-4ea2-ae07-cb89deda9b27": {
+ "ceph_fsid": "7dccab18-14cf-11ee-837b-5254008f8ca5",
+ "device": "/dev/mapper/ceph--e34cc3f5--a70d--49df--82b3--46bcbd63d4b0-osd--block--a0e07c5b--bee1--4ea2--ae07--cb89deda9b27",
+ "osd_id": 9,
+ "osd_uuid": "a0e07c5b-bee1-4ea2-ae07-cb89deda9b27",
+ "type": "bluestore"
+ }
+}
+
+@pytest.fixture
+def mock_lvm_direct_report(monkeypatch):
+ monkeypatch.setattr('ceph_volume.objectstore.lvmbluestore.direct_report', lambda: lvm_direct_report_data)
+
+@pytest.fixture
+def mock_raw_direct_report(monkeypatch):
+ monkeypatch.setattr('ceph_volume.objectstore.rawbluestore.direct_report', lambda x: raw_direct_report_data) \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/devices/lvm/test_activate.py b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_activate.py
index 5d48a0ef404..b44071026ad 100644
--- a/src/ceph-volume/ceph_volume/tests/devices/lvm/test_activate.py
+++ b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_activate.py
@@ -3,7 +3,10 @@ from copy import deepcopy
from ceph_volume.devices.lvm import activate
from ceph_volume.api import lvm as api
from ceph_volume.tests.conftest import Capture
-
+from ceph_volume import objectstore
+#from ceph_volume.util.prepare import create_key
+from mock import patch, call
+from argparse import Namespace
class Args(object):
@@ -16,44 +19,59 @@ class Args(object):
setattr(self, k, v)
+@patch('ceph_volume.util.prepare.create_key', return_value='fake-secret')
class TestActivate(object):
# these tests are very functional, hence the heavy patching, it is hard to
# test the negative side effect with an actual functional run, so we must
# setup a perfect scenario for this test to check it can really work
# with/without osd_id
- def test_no_osd_id_matches_fsid_bluestore(self, is_root, monkeypatch, capture):
- FooVolume = api.Volume(lv_name='foo', lv_path='/dev/vg/foo',
- lv_tags="ceph.osd_fsid=1234")
+ def test_no_osd_id_matches_fsid_bluestore(self,
+ m_create_key,
+ is_root,
+ monkeypatch,
+ capture):
+ FooVolume = api.Volume(lv_name='foo',
+ lv_path='/dev/vg/foo',
+ lv_tags="ceph.osd_fsid=1234")
volumes = []
volumes.append(FooVolume)
monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: volumes)
- monkeypatch.setattr(activate, 'activate_bluestore', capture)
+ monkeypatch.setattr(objectstore.lvmbluestore.LvmBlueStore,
+ '_activate',
+ capture)
+
args = Args(osd_id=None, osd_fsid='1234', bluestore=True)
- activate.Activate([]).activate(args)
+ a = activate.Activate([])
+ a.objectstore = objectstore.lvmbluestore.LvmBlueStore(args=args)
+ a.objectstore.activate()
assert capture.calls[0]['args'][0] == [FooVolume]
- def test_osd_id_no_osd_fsid(self, is_root):
+ def test_osd_id_no_osd_fsid(self, m_create_key, is_root):
args = Args(osd_id=42, osd_fsid=None)
+ a = activate.Activate([])
+ a.objectstore = objectstore.lvmbluestore.LvmBlueStore(args=args)
with pytest.raises(RuntimeError) as result:
- activate.Activate([]).activate(args)
+ a.objectstore.activate()
assert result.value.args[0] == 'could not activate osd.42, please provide the osd_fsid too'
- def test_no_osd_id_no_osd_fsid(self, is_root):
+ def test_no_osd_id_no_osd_fsid(self, m_create_key, is_root):
args = Args(osd_id=None, osd_fsid=None)
+ a = activate.Activate([])
+ a.objectstore = objectstore.lvmbluestore.LvmBlueStore(args=args)
with pytest.raises(RuntimeError) as result:
- activate.Activate([]).activate(args)
+ a.objectstore.activate()
assert result.value.args[0] == 'Please provide both osd_id and osd_fsid'
- def test_bluestore_no_systemd(self, is_root, monkeypatch, capture):
+ def test_bluestore_no_systemd(self, m_create_key, is_root, monkeypatch, capture):
monkeypatch.setattr('ceph_volume.configuration.load', lambda: None)
fake_enable = Capture()
fake_start_osd = Capture()
monkeypatch.setattr('ceph_volume.util.system.path_is_mounted', lambda *a, **kw: True)
monkeypatch.setattr('ceph_volume.util.system.chown', lambda *a, **kw: True)
monkeypatch.setattr('ceph_volume.process.run', lambda *a, **kw: True)
- monkeypatch.setattr(activate.systemctl, 'enable_volume', fake_enable)
- monkeypatch.setattr(activate.systemctl, 'start_osd', fake_start_osd)
+ monkeypatch.setattr(objectstore.lvmbluestore.systemctl, 'enable_volume', fake_enable)
+ monkeypatch.setattr(objectstore.lvmbluestore.systemctl, 'start_osd', fake_start_osd)
DataVolume = api.Volume(
lv_name='data',
lv_path='/dev/vg/data',
@@ -64,19 +82,21 @@ class TestActivate(object):
monkeypatch.setattr(api, 'get_lvs', lambda **kwargs: deepcopy(volumes))
args = Args(osd_id=None, osd_fsid='1234', no_systemd=True, bluestore=True)
- activate.Activate([]).activate(args)
+ a = activate.Activate([])
+ a.objectstore = objectstore.lvmbluestore.LvmBlueStore(args=args)
+ a.objectstore.activate()
assert fake_enable.calls == []
assert fake_start_osd.calls == []
- def test_bluestore_systemd(self, is_root, monkeypatch, capture):
+ def test_bluestore_systemd(self, m_create_key, is_root, monkeypatch, capture):
monkeypatch.setattr('ceph_volume.configuration.load', lambda: None)
fake_enable = Capture()
fake_start_osd = Capture()
monkeypatch.setattr('ceph_volume.util.system.path_is_mounted', lambda *a, **kw: True)
monkeypatch.setattr('ceph_volume.util.system.chown', lambda *a, **kw: True)
monkeypatch.setattr('ceph_volume.process.run', lambda *a, **kw: True)
- monkeypatch.setattr(activate.systemctl, 'enable_volume', fake_enable)
- monkeypatch.setattr(activate.systemctl, 'start_osd', fake_start_osd)
+ monkeypatch.setattr(objectstore.lvmbluestore.systemctl, 'enable_volume', fake_enable)
+ monkeypatch.setattr(objectstore.lvmbluestore.systemctl, 'start_osd', fake_start_osd)
DataVolume = api.Volume(
lv_name='data',
lv_path='/dev/vg/data',
@@ -88,19 +108,21 @@ class TestActivate(object):
args = Args(osd_id=None, osd_fsid='1234', no_systemd=False,
bluestore=True)
- activate.Activate([]).activate(args)
+ a = activate.Activate([])
+ a.objectstore = objectstore.lvmbluestore.LvmBlueStore(args=args)
+ a.objectstore.activate()
assert fake_enable.calls != []
assert fake_start_osd.calls != []
- def test_bluestore_no_systemd_autodetect(self, is_root, monkeypatch, capture):
+ def test_bluestore_no_systemd_autodetect(self, m_create_key, is_root, monkeypatch, capture):
monkeypatch.setattr('ceph_volume.configuration.load', lambda: None)
fake_enable = Capture()
fake_start_osd = Capture()
monkeypatch.setattr('ceph_volume.util.system.path_is_mounted', lambda *a, **kw: True)
monkeypatch.setattr('ceph_volume.util.system.chown', lambda *a, **kw: True)
monkeypatch.setattr('ceph_volume.process.run', lambda *a, **kw: True)
- monkeypatch.setattr(activate.systemctl, 'enable_volume', fake_enable)
- monkeypatch.setattr(activate.systemctl, 'start_osd', fake_start_osd)
+ monkeypatch.setattr(objectstore.lvmbluestore.systemctl, 'enable_volume', fake_enable)
+ monkeypatch.setattr(objectstore.lvmbluestore.systemctl, 'start_osd', fake_start_osd)
DataVolume = api.Volume(
lv_name='data',
lv_path='/dev/vg/data',
@@ -112,11 +134,13 @@ class TestActivate(object):
args = Args(osd_id=None, osd_fsid='1234', no_systemd=True,
bluestore=True, auto_detect_objectstore=True)
- activate.Activate([]).activate(args)
+ a = activate.Activate([])
+ a.objectstore = objectstore.lvmbluestore.LvmBlueStore(args=args)
+ a.objectstore.activate()
assert fake_enable.calls == []
assert fake_start_osd.calls == []
- def test_bluestore_systemd_autodetect(self, is_root, monkeypatch, capture):
+ def test_bluestore_systemd_autodetect(self, m_create_key, is_root, monkeypatch, capture):
monkeypatch.setattr('ceph_volume.configuration.load', lambda: None)
fake_enable = Capture()
fake_start_osd = Capture()
@@ -125,8 +149,8 @@ class TestActivate(object):
monkeypatch.setattr('ceph_volume.util.system.chown', lambda *a, **kw:
True)
monkeypatch.setattr('ceph_volume.process.run', lambda *a, **kw: True)
- monkeypatch.setattr(activate.systemctl, 'enable_volume', fake_enable)
- monkeypatch.setattr(activate.systemctl, 'start_osd', fake_start_osd)
+ monkeypatch.setattr(objectstore.lvmbluestore.systemctl, 'enable_volume', fake_enable)
+ monkeypatch.setattr(objectstore.lvmbluestore.systemctl, 'start_osd', fake_start_osd)
DataVolume = api.Volume(
lv_name='data',
lv_path='/dev/vg/data',
@@ -138,33 +162,37 @@ class TestActivate(object):
args = Args(osd_id=None, osd_fsid='1234', no_systemd=False,
bluestore=True, auto_detect_objectstore=False)
- activate.Activate([]).activate(args)
+ a = activate.Activate([])
+ a.objectstore = objectstore.lvmbluestore.LvmBlueStore(args=args)
+ a.objectstore.activate()
assert fake_enable.calls != []
assert fake_start_osd.calls != []
+
+@patch('ceph_volume.util.prepare.create_key', return_value='fake-secret')
+@patch('ceph_volume.objectstore.lvmbluestore.LvmBlueStore.activate_all')
+@patch('ceph_volume.objectstore.lvmbluestore.LvmBlueStore.activate')
class TestActivateFlags(object):
- def test_default_objectstore(self, capture):
+ def test_default_objectstore(self, m_activate, m_activate_all, m_create_key, capture):
args = ['0', 'asdf-ljh-asdf']
- activation = activate.Activate(args)
- activation.activate = capture
- activation.main()
- parsed_args = capture.calls[0]['args'][0]
- assert parsed_args.bluestore is False
- def test_uses_bluestore(self, capture):
+ a = activate.Activate(args)
+ a.main()
+ assert a.args.objectstore == 'bluestore'
+
+ def test_bluestore_backward_compatibility(self, m_activate, m_activate_all, m_create_key, capture):
args = ['--bluestore', '0', 'asdf-ljh-asdf']
- activation = activate.Activate(args)
- activation.activate = capture
- activation.main()
- parsed_args = capture.calls[0]['args'][0]
- assert parsed_args.bluestore is True
+ a = activate.Activate(args)
+ a.main()
+ assert a.args.objectstore == 'bluestore'
+@patch('ceph_volume.util.prepare.create_key', return_value='fake-secret')
class TestActivateAll(object):
- def test_does_not_detect_osds(self, capsys, is_root, capture, monkeypatch):
- monkeypatch.setattr('ceph_volume.devices.lvm.activate.direct_report', lambda: {})
+ def test_does_not_detect_osds(self, m_create_key, capsys, is_root, monkeypatch):
+ monkeypatch.setattr('ceph_volume.objectstore.lvmbluestore.direct_report', lambda: {})
args = ['--all']
activation = activate.Activate(args)
activation.main()
@@ -172,9 +200,9 @@ class TestActivateAll(object):
assert 'Was unable to find any OSDs to activate' in err
assert 'Verify OSDs are present with ' in err
- def test_detects_running_osds(self, capsys, is_root, capture, monkeypatch):
- monkeypatch.setattr('ceph_volume.devices.lvm.activate.direct_report', lambda: direct_report)
- monkeypatch.setattr('ceph_volume.devices.lvm.activate.systemctl.osd_is_active', lambda x: True)
+ def test_detects_running_osds(self, m_create_key, capsys, is_root, capture, monkeypatch):
+ monkeypatch.setattr('ceph_volume.objectstore.lvmbluestore.direct_report', lambda: direct_report)
+ monkeypatch.setattr('ceph_volume.objectstore.lvmbluestore.systemctl.osd_is_active', lambda x: True)
args = ['--all']
activation = activate.Activate(args)
activation.main()
@@ -182,30 +210,66 @@ class TestActivateAll(object):
assert 'a8789a96ce8b process is active. Skipping activation' in err
assert 'b8218eaa1634 process is active. Skipping activation' in err
- def test_detects_osds_to_activate_systemd(self, is_root, capture, monkeypatch):
- monkeypatch.setattr('ceph_volume.devices.lvm.activate.direct_report', lambda: direct_report)
- monkeypatch.setattr('ceph_volume.devices.lvm.activate.systemctl.osd_is_active', lambda x: False)
- args = ['--all']
- activation = activate.Activate(args)
- activation.activate = capture
- activation.main()
- calls = sorted(capture.calls, key=lambda x: x['kwargs']['osd_id'])
- assert calls[0]['kwargs']['osd_id'] == '0'
- assert calls[0]['kwargs']['osd_fsid'] == '957d22b7-24ce-466a-9883-b8218eaa1634'
- assert calls[1]['kwargs']['osd_id'] == '1'
- assert calls[1]['kwargs']['osd_fsid'] == 'd0f3e4ad-e52a-4520-afc0-a8789a96ce8b'
+ @patch('ceph_volume.objectstore.lvmbluestore.LvmBlueStore.activate')
+ def test_detects_osds_to_activate_systemd(self, m_activate, m_create_key, is_root, monkeypatch):
+ monkeypatch.setattr('ceph_volume.objectstore.lvmbluestore.direct_report', lambda: direct_report)
+ monkeypatch.setattr('ceph_volume.objectstore.lvmbluestore.systemctl.osd_is_active', lambda x: False)
+ args = ['--all', '--bluestore']
+ a = activate.Activate(args)
+ a.main()
+ calls = [
+ call(Namespace(activate_all=True,
+ auto_detect_objectstore=False,
+ bluestore=True,
+ no_systemd=False,
+ no_tmpfs=False,
+ objectstore='bluestore',
+ osd_fsid=None,
+ osd_id=None),
+ osd_id='0',
+ osd_fsid='957d22b7-24ce-466a-9883-b8218eaa1634'),
+ call(Namespace(activate_all=True,
+ auto_detect_objectstore=False,
+ bluestore=True,
+ no_systemd=False,
+ no_tmpfs=False,
+ objectstore='bluestore',
+ osd_fsid=None,
+ osd_id=None),
+ osd_id='1',
+ osd_fsid='d0f3e4ad-e52a-4520-afc0-a8789a96ce8b')
+ ]
+ m_activate.assert_has_calls(calls)
- def test_detects_osds_to_activate_no_systemd(self, is_root, capture, monkeypatch):
- monkeypatch.setattr('ceph_volume.devices.lvm.activate.direct_report', lambda: direct_report)
- args = ['--all', '--no-systemd']
- activation = activate.Activate(args)
- activation.activate = capture
- activation.main()
- calls = sorted(capture.calls, key=lambda x: x['kwargs']['osd_id'])
- assert calls[0]['kwargs']['osd_id'] == '0'
- assert calls[0]['kwargs']['osd_fsid'] == '957d22b7-24ce-466a-9883-b8218eaa1634'
- assert calls[1]['kwargs']['osd_id'] == '1'
- assert calls[1]['kwargs']['osd_fsid'] == 'd0f3e4ad-e52a-4520-afc0-a8789a96ce8b'
+ @patch('ceph_volume.objectstore.lvmbluestore.LvmBlueStore.activate')
+ def test_detects_osds_to_activate_no_systemd(self, m_activate, m_create_key, is_root, monkeypatch):
+ monkeypatch.setattr('ceph_volume.objectstore.lvmbluestore.direct_report', lambda: direct_report)
+ args = ['--all', '--no-systemd', '--bluestore']
+ a = activate.Activate(args)
+ a.main()
+ calls = [
+ call(Namespace(activate_all=True,
+ auto_detect_objectstore=False,
+ bluestore=True,
+ no_systemd=True,
+ no_tmpfs=False,
+ objectstore='bluestore',
+ osd_fsid=None,
+ osd_id=None),
+ osd_id='0',
+ osd_fsid='957d22b7-24ce-466a-9883-b8218eaa1634'),
+ call(Namespace(activate_all=True,
+ auto_detect_objectstore=False,
+ bluestore=True,
+ no_systemd=True,
+ no_tmpfs=False,
+ objectstore='bluestore',
+ osd_fsid=None,
+ osd_id=None),
+ osd_id='1',
+ osd_fsid='d0f3e4ad-e52a-4520-afc0-a8789a96ce8b')
+ ]
+ m_activate.assert_has_calls(calls)
#
# Activate All fixture
diff --git a/src/ceph-volume/ceph_volume/tests/devices/lvm/test_batch.py b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_batch.py
index 75073c51aca..e26a733b09c 100644
--- a/src/ceph-volume/ceph_volume/tests/devices/lvm/test_batch.py
+++ b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_batch.py
@@ -5,7 +5,6 @@ import random
from argparse import ArgumentError
from mock import MagicMock, patch
-from ceph_volume.api import lvm
from ceph_volume.devices.lvm import batch
from ceph_volume.util import arg_validators
@@ -54,14 +53,14 @@ class TestBatch(object):
devices=devs,
db_devices=[],
wal_devices=[],
- bluestore=True,
+ objectstore='bluestore',
block_db_size="1G",
dmcrypt=True,
data_allocate_fraction=1.0,
)
b = batch.Batch([])
- plan = b.get_plan(args)
b.args = args
+ plan = b.get_deployment_layout()
b.report(plan)
@pytest.mark.parametrize('format_', ['json', 'json-pretty'])
@@ -77,14 +76,14 @@ class TestBatch(object):
devices=devs,
db_devices=[],
wal_devices=[],
- bluestore=True,
+ objectstore='bluestore',
block_db_size="1G",
dmcrypt=True,
data_allocate_fraction=1.0,
)
b = batch.Batch([])
- plan = b.get_plan(args)
b.args = args
+ plan = b.get_deployment_layout()
report = b._create_report(plan)
json.loads(report)
@@ -103,14 +102,15 @@ class TestBatch(object):
devices=devs,
db_devices=fast_devs,
wal_devices=[],
- bluestore=True,
+ objectstore='bluestore',
block_db_size="1G",
+ block_db_slots=1.0,
dmcrypt=True,
data_allocate_fraction=1.0,
)
b = batch.Batch([])
- plan = b.get_plan(args)
b.args = args
+ plan = b.get_deployment_layout()
report = b._create_report(plan)
json.loads(report)
@@ -121,6 +121,7 @@ class TestBatch(object):
conf_ceph_stub('[global]\nfsid=asdf-lkjh')
devs = [mock_device_generator() for _ in range(5)]
fast_devs = [mock_device_generator()]
+ fast_devs[0].available_lvm = False
very_fast_devs = [mock_device_generator()]
very_fast_devs[0].available_lvm = False
args = factory(data_slots=1,
@@ -131,14 +132,15 @@ class TestBatch(object):
devices=devs,
db_devices=fast_devs,
wal_devices=very_fast_devs,
- bluestore=True,
+ objectstore='bluestore',
block_db_size="1G",
+ block_db_slots=5,
dmcrypt=True,
data_allocate_fraction=1.0,
)
b = batch.Batch([])
- plan = b.get_plan(args)
b.args = args
+ plan = b.get_deployment_layout()
report = b._create_report(plan)
json.loads(report)
@@ -250,35 +252,50 @@ class TestBatch(object):
for (_, _, slot_size, _) in fasts:
assert slot_size == expected_slot_size
- def test_get_physical_fast_allocs_abs_size_multi_pvs_per_vg(self, factory,
- conf_ceph_stub,
- mock_devices_available):
+ def test_get_physical_fast_allocs_abs_size_multi_pvs_per_vg(self,
+ factory,
+ conf_ceph_stub,
+ mock_device_generator,
+ mock_devices_available_multi_pvs_per_vg):
conf_ceph_stub('[global]\nfsid=asdf-lkjh')
- args = factory(block_db_slots=None, get_block_db_size=None)
- dev_size = 21474836480
- num_devices = len(mock_devices_available)
+ data_devices = []
+ # existing_osds = sum([len(dev.lvs) for dev in mock_devices_available_multi_pvs_per_vg])
+ for i in range(len(mock_devices_available_multi_pvs_per_vg)+2):
+ data_devices.append(mock_device_generator(name='data',
+ vg_name=f'vg_foo_data{str(i)}',
+ lv_name=f'lv_foo_data{str(i)}'))
+ args = factory(block_db_slots=None,
+ block_db_size=None,
+ devices=[dev.lv_path for dev in data_devices])
+ dev_size = 53687091200
+ num_devices = len(mock_devices_available_multi_pvs_per_vg)
vg_size = dev_size * num_devices
- vg_name = 'vg_foo'
- for dev in mock_devices_available:
- dev.vg_name = vg_name
- dev.vg_size = [vg_size]
- dev.vg_free = dev.vg_size
- dev.vgs = [lvm.VolumeGroup(vg_name=dev.vg_name, lv_name=dev.lv_name)]
- slots_per_device = 2
- slots_per_vg = slots_per_device * num_devices
- fasts = batch.get_physical_fast_allocs(mock_devices_available,
- 'block_db', slots_per_device, 2, args)
- expected_slot_size = int(vg_size / slots_per_vg)
+ vg_free = vg_size
+ for dev in mock_devices_available_multi_pvs_per_vg:
+ for lv in dev.lvs:
+ vg_free -= lv.lv_size[0]
+ dev.vg_size = [vg_size] # override the `vg_size` set in mock_device() since it's 1VG that has multiple PVs
+ for dev in mock_devices_available_multi_pvs_per_vg:
+ dev.vg_free = [vg_free] # override the `vg_free` set in mock_device() since it's 1VG that has multiple PVs
+ b = batch.Batch([])
+ b.args = args
+ new_osds = len(data_devices) - len(mock_devices_available_multi_pvs_per_vg)
+ fasts = b.fast_allocations(mock_devices_available_multi_pvs_per_vg,
+ len(data_devices),
+ new_osds,
+ 'block_db')
+ expected_slot_size = int(vg_size / len(data_devices))
for (_, _, slot_size, _) in fasts:
assert slot_size == expected_slot_size
- def test_batch_fast_allocations_one_block_db_length(self, factory, conf_ceph_stub,
- mock_lv_device_generator):
+ def test_batch_fast_allocations_one_block_db_length(self,
+ factory, conf_ceph_stub,
+ mock_device_generator):
conf_ceph_stub('[global]\nfsid=asdf-lkjh')
b = batch.Batch([])
- db_lv_devices = [mock_lv_device_generator()]
- fast = b.fast_allocations(db_lv_devices, 1, 0, 'block_db')
+ db_device = [mock_device_generator()]
+ fast = b.fast_allocations(db_device, 1, 1, 'block_db')
assert len(fast) == 1
@pytest.mark.parametrize('occupied_prior', range(7))
@@ -293,22 +310,24 @@ class TestBatch(object):
mock_device_generator):
conf_ceph_stub('[global]\nfsid=asdf-lkjh')
occupied_prior = min(occupied_prior, slots)
- devs = [mock_device_generator() for _ in range(num_devs)]
+ devs = [mock_device_generator(lv_name=f'foo{n}') for n in range(slots)]
+ dev_paths = [dev.path for dev in devs]
+ fast_devs = [mock_device_generator(lv_name=f'ssd{n}') for n in range(num_devs)]
already_assigned = 0
while already_assigned < occupied_prior:
dev_i = random.randint(0, num_devs - 1)
- dev = devs[dev_i]
+ dev = fast_devs[dev_i]
if len(dev.lvs) < occupied_prior:
dev.lvs.append('foo')
dev.path = '/dev/bar'
- already_assigned = sum([len(d.lvs) for d in devs])
- args = factory(block_db_slots=None, get_block_db_size=None)
- expected_num_osds = max(len(devs) * slots - occupied_prior, 0)
- fast = batch.get_physical_fast_allocs(devs,
+ already_assigned = sum([len(dev.lvs) for dev in fast_devs])
+ args = factory(block_db_slots=None, get_block_db_size=None, devices=dev_paths)
+ expected_num_osds = max(len(fast_devs) * slots - occupied_prior, 0)
+ fast = batch.get_physical_fast_allocs(fast_devs,
'block_db', slots,
expected_num_osds, args)
assert len(fast) == expected_num_osds
- expected_assignment_on_used_devices = sum([slots - len(d.lvs) for d in devs if len(d.lvs) > 0])
+ expected_assignment_on_used_devices = sum([slots - len(d.lvs) for d in fast_devs if len(d.lvs) > 0])
assert len([f for f in fast if f[0] == '/dev/bar']) == expected_assignment_on_used_devices
assert len([f for f in fast if f[0] != '/dev/bar']) == expected_num_osds - expected_assignment_on_used_devices
diff --git a/src/ceph-volume/ceph_volume/tests/devices/lvm/test_migrate.py b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_migrate.py
index 91e6155f38c..072d4f1ef35 100644
--- a/src/ceph-volume/ceph_volume/tests/devices/lvm/test_migrate.py
+++ b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_migrate.py
@@ -524,7 +524,7 @@ class TestNew(object):
def mock_prepare_dmcrypt(self, *args, **kwargs):
return '/dev/mapper/' + kwargs['mapping']
- def test_newdb_non_root(self):
+ def test_newdb_non_root(self, is_non_root):
with pytest.raises(Exception) as error:
migrate.NewDB(argv=[
'--osd-id', '1',
@@ -533,9 +533,7 @@ class TestNew(object):
expected = 'This command needs to be executed with sudo or as root'
assert expected in str(error.value)
- @patch('os.getuid')
- def test_newdb_not_target_lvm(self, m_getuid, capsys):
- m_getuid.return_value = 0
+ def test_newdb_not_target_lvm(self, is_root, capsys):
with pytest.raises(SystemExit) as error:
migrate.NewDB(argv=[
'--osd-id', '1',
@@ -548,10 +546,7 @@ class TestNew(object):
assert expected in stderr
- @patch('os.getuid')
- def test_newdb_already_in_use(self, m_getuid, monkeypatch, capsys):
- m_getuid.return_value = 0
-
+ def test_newdb_already_in_use(self, is_root, monkeypatch, capsys):
self.mock_volume = api.Volume(lv_name='volume1',
lv_uuid='y',
vg_name='vg',
@@ -570,10 +565,7 @@ class TestNew(object):
expected = 'Target Logical Volume is already used by ceph: vgname/new_db'
assert expected in stderr
- @patch('os.getuid')
- def test_newdb(self, m_getuid, monkeypatch, capsys):
- m_getuid.return_value = 0
-
+ def test_newdb(self, is_root, monkeypatch, capsys):
source_tags = \
'ceph.osd_id=0,ceph.type=data,ceph.osd_fsid=1234,'\
'ceph.wal_uuid=wal_uuid,ceph.db_device=/dbdevice'
@@ -818,10 +810,7 @@ class TestNew(object):
'--dev-target', '/dev/VolGroup/target_volume',
'--command', 'bluefs-bdev-new-db']
- @patch('os.getuid')
- def test_newwal(self, m_getuid, monkeypatch, capsys):
- m_getuid.return_value = 0
-
+ def test_newwal(self, is_root, monkeypatch, capsys):
source_tags = \
'ceph.osd_id=0,ceph.type=data,ceph.osd_fsid=1234'
@@ -1225,9 +1214,7 @@ Example calls for supported scenarios:
assert not stderr
- @patch('os.getuid')
- def test_migrate_data_db_to_new_db(self, m_getuid, monkeypatch):
- m_getuid.return_value = 0
+ def test_migrate_data_db_to_new_db(self, is_root, monkeypatch):
source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234,' \
'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev'
@@ -1599,10 +1586,7 @@ Example calls for supported scenarios:
'--devs-source', '/var/lib/ceph/osd/ceph-2/block',
'--devs-source', '/var/lib/ceph/osd/ceph-2/block.db']
- @patch('os.getuid')
- def test_migrate_data_db_to_new_db_skip_wal(self, m_getuid, monkeypatch):
- m_getuid.return_value = 0
-
+ def test_migrate_data_db_to_new_db_skip_wal(self, is_root, monkeypatch):
source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234,' \
'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev'
source_db_tags = 'ceph.osd_id=2,ceph.type=db,ceph.osd_fsid=1234,' \
@@ -1721,10 +1705,7 @@ Example calls for supported scenarios:
'--devs-source', '/var/lib/ceph/osd/ceph-2/block',
'--devs-source', '/var/lib/ceph/osd/ceph-2/block.db']
- @patch('os.getuid')
- def test_migrate_data_db_wal_to_new_db(self, m_getuid, monkeypatch):
- m_getuid.return_value = 0
-
+ def test_migrate_data_db_wal_to_new_db(self, is_root, monkeypatch):
source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234,' \
'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \
'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev'
@@ -1995,7 +1976,6 @@ Example calls for supported scenarios:
monkeypatch,
capsys):
m_getuid.return_value = 0
-
source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234,' \
'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev'
source_db_tags = 'ceph.osd_id=2,ceph.type=db,ceph.osd_fsid=1234,' \
@@ -2057,13 +2037,10 @@ Example calls for supported scenarios:
' please use new-db or new-wal command before.'
assert expected in stderr
- @patch('os.getuid')
def test_dont_migrate_db_to_wal(self,
- m_getuid,
+ is_root,
monkeypatch,
capsys):
- m_getuid.return_value = 0
-
source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234,' \
'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \
'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev'
@@ -2133,13 +2110,10 @@ Example calls for supported scenarios:
expected = 'Migrate to WAL is not supported'
assert expected in stderr
- @patch('os.getuid')
def test_migrate_data_db_to_db(self,
- m_getuid,
+ is_root,
monkeypatch,
capsys):
- m_getuid.return_value = 0
-
source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234,' \
'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \
'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev'
@@ -2360,13 +2334,10 @@ Example calls for supported scenarios:
'--command', 'bluefs-bdev-migrate',
'--devs-source', '/var/lib/ceph/osd/ceph-2/block']
- @patch('os.getuid')
def test_migrate_data_wal_to_db(self,
- m_getuid,
+ is_root,
monkeypatch,
capsys):
- m_getuid.return_value = 0
-
source_tags = 'ceph.osd_id=2,ceph.type=data,ceph.osd_fsid=1234,' \
'ceph.cluster_name=ceph,ceph.db_uuid=dbuuid,ceph.db_device=db_dev,' \
'ceph.wal_uuid=waluuid,ceph.wal_device=wal_dev'
diff --git a/src/ceph-volume/ceph_volume/tests/devices/lvm/test_prepare.py b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_prepare.py
index 0a356988eeb..ec301d6eb93 100644
--- a/src/ceph-volume/ceph_volume/tests/devices/lvm/test_prepare.py
+++ b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_prepare.py
@@ -2,6 +2,7 @@ import pytest
from ceph_volume.devices import lvm
from ceph_volume.api import lvm as api
from mock.mock import patch, Mock
+from ceph_volume import objectstore
class TestLVM(object):
@@ -24,102 +25,116 @@ class TestLVM(object):
assert 'Format an LVM device' in stdout
+@patch('ceph_volume.util.prepare.create_key', return_value='fake-secret')
class TestPrepareDevice(object):
- def test_cannot_use_device(self, factory):
+ def test_cannot_use_device(self, m_create_key, factory):
args = factory(data='/dev/var/foo')
with pytest.raises(RuntimeError) as error:
p = lvm.prepare.Prepare([])
- p.args = args
- p.prepare_data_device( 'data', '0')
+ p.objectstore = objectstore.lvmbluestore.LvmBlueStore(args=args)
+ p.objectstore.prepare_data_device( 'data', '0')
assert 'Cannot use device (/dev/var/foo)' in str(error.value)
assert 'A vg/lv path or an existing device is needed' in str(error.value)
-
+@patch('ceph_volume.util.prepare.create_key', return_value='fake-secret')
class TestGetClusterFsid(object):
+ def setup_method(self):
+ self.p = lvm.prepare.Prepare([])
- def test_fsid_is_passed_in(self, factory):
+ def test_fsid_is_passed_in(self, m_create_key, factory):
args = factory(cluster_fsid='aaaa-1111')
- prepare_obj = lvm.prepare.Prepare([])
- prepare_obj.args = args
- assert prepare_obj.get_cluster_fsid() == 'aaaa-1111'
+ self.p.objectstore = objectstore.lvmbluestore.LvmBlueStore(args)
+ assert self.p.objectstore.get_cluster_fsid() == 'aaaa-1111'
- def test_fsid_is_read_from_ceph_conf(self, factory, conf_ceph_stub):
+ def test_fsid_is_read_from_ceph_conf(self, m_create_key, factory, conf_ceph_stub):
conf_ceph_stub('[global]\nfsid = bbbb-2222')
- prepare_obj = lvm.prepare.Prepare([])
- prepare_obj.args = factory(cluster_fsid=None)
- assert prepare_obj.get_cluster_fsid() == 'bbbb-2222'
+ args = factory(cluster_fsid='')
+ self.p.objectstore = objectstore.lvmbluestore.LvmBlueStore(args)
+ assert self.p.objectstore.get_cluster_fsid() == 'bbbb-2222'
+@patch('ceph_volume.util.prepare.create_key', return_value='fake-secret')
class TestPrepare(object):
- def test_main_spits_help_with_no_arguments(self, capsys):
+ def setup_method(self):
+ self.p = lvm.prepare.Prepare([])
+
+ def test_main_spits_help_with_no_arguments(self, m_create_key, capsys):
lvm.prepare.Prepare([]).main()
stdout, stderr = capsys.readouterr()
assert 'Prepare an OSD by assigning an ID and FSID' in stdout
- def test_main_shows_full_help(self, capsys):
+ def test_main_shows_full_help(self, m_create_key, capsys):
with pytest.raises(SystemExit):
lvm.prepare.Prepare(argv=['--help']).main()
stdout, stderr = capsys.readouterr()
assert 'Use the bluestore objectstore' in stdout
assert 'A physical device or logical' in stdout
- @patch('ceph_volume.devices.lvm.prepare.api.is_ceph_device')
- def test_safe_prepare_osd_already_created(self, m_is_ceph_device):
+ @patch('ceph_volume.api.lvm.is_ceph_device')
+ def test_safe_prepare_osd_already_created(self, m_create_key, m_is_ceph_device):
m_is_ceph_device.return_value = True
with pytest.raises(RuntimeError) as error:
- prepare = lvm.prepare.Prepare(argv=[])
- prepare.args = Mock()
- prepare.args.data = '/dev/sdfoo'
- prepare.get_lv = Mock()
- prepare.safe_prepare()
+ self.p.args = Mock()
+ self.p.args.data = '/dev/sdfoo'
+ self.p.get_lv = Mock()
+ self.p.objectstore = objectstore.lvmbluestore.LvmBlueStore(args=self.p.args)
+ self.p.objectstore.safe_prepare()
expected = 'skipping {}, it is already prepared'.format('/dev/sdfoo')
assert expected in str(error.value)
- def test_setup_device_device_name_is_none(self):
- result = lvm.prepare.Prepare([]).setup_device(device_type='data', device_name=None, tags={'ceph.type': 'data'}, size=0, slots=None)
+ def test_setup_device_device_name_is_none(self, m_create_key):
+ self.p.objectstore = objectstore.lvmbluestore.LvmBlueStore(args=[])
+ result = self.p.objectstore.setup_device(device_type='data',
+ device_name=None,
+ tags={'ceph.type': 'data'},
+ size=0,
+ slots=None)
assert result == ('', '', {'ceph.type': 'data'})
@patch('ceph_volume.api.lvm.Volume.set_tags')
- @patch('ceph_volume.devices.lvm.prepare.api.get_single_lv')
- def test_setup_device_lv_passed(self, m_get_single_lv, m_set_tags):
+ @patch('ceph_volume.api.lvm.get_single_lv')
+ def test_setup_device_lv_passed(self, m_get_single_lv, m_set_tags, m_create_key):
fake_volume = api.Volume(lv_name='lv_foo', lv_path='/fake-path', vg_name='vg_foo', lv_tags='', lv_uuid='fake-uuid')
m_get_single_lv.return_value = fake_volume
- result = lvm.prepare.Prepare([]).setup_device(device_type='data', device_name='vg_foo/lv_foo', tags={'ceph.type': 'data'}, size=0, slots=None)
+ self.p.objectstore = objectstore.lvmbluestore.LvmBlueStore(args=[])
+ result = self.p.objectstore.setup_device(device_type='data', device_name='vg_foo/lv_foo', tags={'ceph.type': 'data'}, size=0, slots=None)
assert result == ('/fake-path', 'fake-uuid', {'ceph.type': 'data',
'ceph.vdo': '0',
'ceph.data_uuid': 'fake-uuid',
'ceph.data_device': '/fake-path'})
- @patch('ceph_volume.devices.lvm.prepare.api.create_lv')
+ @patch('ceph_volume.api.lvm.create_lv')
@patch('ceph_volume.api.lvm.Volume.set_tags')
@patch('ceph_volume.util.disk.is_device')
- def test_setup_device_device_passed(self, m_is_device, m_set_tags, m_create_lv):
+ def test_setup_device_device_passed(self, m_is_device, m_set_tags, m_create_lv, m_create_key):
fake_volume = api.Volume(lv_name='lv_foo', lv_path='/fake-path', vg_name='vg_foo', lv_tags='', lv_uuid='fake-uuid')
m_is_device.return_value = True
m_create_lv.return_value = fake_volume
- result = lvm.prepare.Prepare([]).setup_device(device_type='data', device_name='/dev/sdx', tags={'ceph.type': 'data'}, size=0, slots=None)
+ self.p.objectstore = objectstore.lvmbluestore.LvmBlueStore(args=[])
+ result = self.p.objectstore.setup_device(device_type='data', device_name='/dev/sdx', tags={'ceph.type': 'data'}, size=0, slots=None)
assert result == ('/fake-path', 'fake-uuid', {'ceph.type': 'data',
'ceph.vdo': '0',
'ceph.data_uuid': 'fake-uuid',
'ceph.data_device': '/fake-path'})
- @patch('ceph_volume.devices.lvm.prepare.Prepare.get_ptuuid')
- @patch('ceph_volume.devices.lvm.prepare.api.get_single_lv')
- def test_setup_device_partition_passed(self, m_get_single_lv, m_get_ptuuid):
+ @patch('ceph_volume.objectstore.baseobjectstore.BaseObjectStore.get_ptuuid')
+ @patch('ceph_volume.api.lvm.get_single_lv')
+ def test_setup_device_partition_passed(self, m_get_single_lv, m_get_ptuuid, m_create_key):
m_get_single_lv.side_effect = ValueError()
m_get_ptuuid.return_value = 'fake-uuid'
- result = lvm.prepare.Prepare([]).setup_device(device_type='data', device_name='/dev/sdx', tags={'ceph.type': 'data'}, size=0, slots=None)
+ self.p.objectstore = objectstore.lvmbluestore.LvmBlueStore(args=[])
+ result = self.p.objectstore.setup_device(device_type='data', device_name='/dev/sdx', tags={'ceph.type': 'data'}, size=0, slots=None)
assert result == ('/dev/sdx', 'fake-uuid', {'ceph.type': 'data',
'ceph.vdo': '0',
'ceph.data_uuid': 'fake-uuid',
'ceph.data_device': '/dev/sdx'})
- def test_invalid_osd_id_passed(self):
+ def test_invalid_osd_id_passed(self, m_create_key):
with pytest.raises(SystemExit):
lvm.prepare.Prepare(argv=['--osd-id', 'foo']).main()
diff --git a/src/ceph-volume/ceph_volume/tests/devices/lvm/test_zap.py b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_zap.py
index 2446c5ed665..51f66abfc78 100644
--- a/src/ceph-volume/ceph_volume/tests/devices/lvm/test_zap.py
+++ b/src/ceph-volume/ceph_volume/tests/devices/lvm/test_zap.py
@@ -139,17 +139,6 @@ class TestEnsureAssociatedLVs(object):
out, err = capsys.readouterr()
assert "Zapping successful for OSD: 1" in err
- def test_block_and_partition_are_found(self, monkeypatch):
- monkeypatch.setattr(zap.disk, 'get_device_from_partuuid', lambda x: '/dev/sdb1')
- tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,ceph.type=block'
- osd = api.Volume(
- lv_name='volume1', lv_uuid='y', vg_name='', lv_path='/dev/VolGroup/block', lv_tags=tags)
- volumes = []
- volumes.append(osd)
- result = zap.ensure_associated_lvs(volumes)
- assert '/dev/sdb1' in result
- assert '/dev/VolGroup/block' in result
-
def test_journal_is_found(self, fake_call):
tags = 'ceph.osd_id=0,ceph.osd_fsid=asdf-lkjh,ceph.journal_uuid=x,ceph.type=journal'
osd = api.Volume(
@@ -211,7 +200,6 @@ class TestEnsureAssociatedLVs(object):
def test_ensure_associated_lvs(self, m_get_lvs):
zap.ensure_associated_lvs([], lv_tags={'ceph.osd_id': '1'})
calls = [
- call(tags={'ceph.type': 'journal', 'ceph.osd_id': '1'}),
call(tags={'ceph.type': 'db', 'ceph.osd_id': '1'}),
call(tags={'ceph.type': 'wal', 'ceph.osd_id': '1'})
]
diff --git a/src/ceph-volume/ceph_volume/tests/devices/raw/test_list.py b/src/ceph-volume/ceph_volume/tests/devices/raw/test_list.py
index 5ad501bab94..604fb4faa3e 100644
--- a/src/ceph-volume/ceph_volume/tests/devices/raw/test_list.py
+++ b/src/ceph-volume/ceph_volume/tests/devices/raw/test_list.py
@@ -44,23 +44,27 @@ def _devices_side_effect():
"/dev/sdb3": {},
"/dev/sdc": {},
"/dev/sdd": {},
+ "/dev/sde": {},
+ "/dev/sde1": {},
"/dev/mapper/ceph--osd--block--1": {},
"/dev/mapper/ceph--osd--block--2": {},
}
def _lsblk_all_devices(abspath=True):
return [
- {"NAME": "/dev/sda", "KNAME": "/dev/sda", "PKNAME": ""},
- {"NAME": "/dev/sda1", "KNAME": "/dev/sda1", "PKNAME": "/dev/sda"},
- {"NAME": "/dev/sda2", "KNAME": "/dev/sda2", "PKNAME": "/dev/sda"},
- {"NAME": "/dev/sda3", "KNAME": "/dev/sda3", "PKNAME": "/dev/sda"},
- {"NAME": "/dev/sdb", "KNAME": "/dev/sdb", "PKNAME": ""},
- {"NAME": "/dev/sdb2", "KNAME": "/dev/sdb2", "PKNAME": "/dev/sdb"},
- {"NAME": "/dev/sdb3", "KNAME": "/dev/sdb3", "PKNAME": "/dev/sdb"},
- {"NAME": "/dev/sdc", "KNAME": "/dev/sdc", "PKNAME": ""},
- {"NAME": "/dev/sdd", "KNAME": "/dev/sdd", "PKNAME": ""},
- {"NAME": "/dev/mapper/ceph--osd--block--1", "KNAME": "/dev/mapper/ceph--osd--block--1", "PKNAME": "/dev/sdd"},
- {"NAME": "/dev/mapper/ceph--osd--block--2", "KNAME": "/dev/mapper/ceph--osd--block--2", "PKNAME": "/dev/sdd"},
+ {"NAME": "/dev/sda", "KNAME": "/dev/sda", "PKNAME": "", "TYPE": "disk"},
+ {"NAME": "/dev/sda1", "KNAME": "/dev/sda1", "PKNAME": "/dev/sda", "TYPE": "part"},
+ {"NAME": "/dev/sda2", "KNAME": "/dev/sda2", "PKNAME": "/dev/sda", "TYPE": "part"},
+ {"NAME": "/dev/sda3", "KNAME": "/dev/sda3", "PKNAME": "/dev/sda", "TYPE": "part"},
+ {"NAME": "/dev/sdb", "KNAME": "/dev/sdb", "PKNAME": "", "TYPE": "disk"},
+ {"NAME": "/dev/sdb2", "KNAME": "/dev/sdb2", "PKNAME": "/dev/sdb", "TYPE": "part"},
+ {"NAME": "/dev/sdb3", "KNAME": "/dev/sdb3", "PKNAME": "/dev/sdb", "TYPE": "part"},
+ {"NAME": "/dev/sdc", "KNAME": "/dev/sdc", "PKNAME": "", "TYPE": "disk"},
+ {"NAME": "/dev/sdd", "KNAME": "/dev/sdd", "PKNAME": "", "TYPE": "disk"},
+ {"NAME": "/dev/sde", "KNAME": "/dev/sde", "PKNAME": "", "TYPE": "disk"},
+ {"NAME": "/dev/sde1", "KNAME": "/dev/sde1", "PKNAME": "/dev/sde", "TYPE": "part"},
+ {"NAME": "/dev/mapper/ceph--osd--block--1", "KNAME": "/dev/mapper/ceph--osd--block--1", "PKNAME": "/dev/sdd", "TYPE": "lvm"},
+ {"NAME": "/dev/mapper/ceph--osd--block--2", "KNAME": "/dev/mapper/ceph--osd--block--2", "PKNAME": "/dev/sdd", "TYPE": "lvm"},
]
# dummy lsblk output for device with optional parent output
@@ -116,6 +120,29 @@ def _bluestore_tool_label_output_sdb2():
}
}'''
+def _bluestore_tool_label_output_sde1():
+ return '''{
+ "/dev/sde1": {
+ "osd_uuid": "sde1-uuid",
+ "size": 214747316224,
+ "btime": "2023-07-26T13:20:19.509457+0000",
+ "description": "main",
+ "bfm_blocks": "268435456",
+ "bfm_blocks_per_key": "128",
+ "bfm_bytes_per_block": "4096",
+ "bfm_size": "214747316224",
+ "bluefs": "1",
+ "ceph_fsid": "sde1-fsid",
+ "kv_backend": "rocksdb",
+ "magic": "ceph osd volume v026",
+ "mkfs_done": "yes",
+ "osd_key": "AQCSHcFkUeLIMBAAjKqANkXafjvVISkXt6FGCA==",
+ "ready": "ready",
+ "require_osd_release": "16",
+ "whoami": "1"
+ }
+}'''
+
def _bluestore_tool_label_output_dm_okay():
return '''{
"/dev/mapper/ceph--osd--block--1": {
@@ -149,6 +176,8 @@ def _process_call_side_effect(command, **kw):
return _lsblk_output(dev, parent="/dev/sdb"), '', 0
if dev == "/dev/sda" or dev == "/dev/sdb" or dev == "/dev/sdc" or dev == "/dev/sdd":
return _lsblk_output(dev), '', 0
+ if dev == "/dev/sde1":
+ return _lsblk_output(dev, parent="/dev/sde"), '', 0
if "mapper" in dev:
return _lsblk_output(dev, parent="/dev/sdd"), '', 0
pytest.fail('dev {} needs behavior specified for it'.format(dev))
@@ -163,6 +192,8 @@ def _process_call_side_effect(command, **kw):
if "/dev/sdb2" in command:
# sdb2 is a phantom atari partition that appears to have some valid bluestore info
return _bluestore_tool_label_output_sdb2(), '', 0
+ if "/dev/sde1" in command:
+ return _bluestore_tool_label_output_sde1(), '', 0
if "/dev/mapper/ceph--osd--block--1" in command:
# dm device 1 is a valid bluestore OSD (the other is corrupted/invalid)
return _bluestore_tool_label_output_dm_okay(), '', 0
@@ -181,6 +212,10 @@ def _has_bluestore_label_side_effect(disk_path):
return False # empty disk
if disk_path == "/dev/sdd":
return False # has LVM subdevices
+ if disk_path == "/dev/sde":
+ return False # has partitions, it means it shouldn't be an OSD
+ if disk_path == "/dev/sde1":
+ return True # is a valid OSD
if disk_path == "/dev/mapper/ceph--osd--block--1":
return True # good OSD
if disk_path == "/dev/mapper/ceph--osd--block--2":
@@ -209,13 +244,18 @@ class TestList(object):
assert sdb['device'] == '/dev/sdb'
assert sdb['ceph_fsid'] == 'sdb-fsid'
assert sdb['type'] == 'bluestore'
-
lvm1 = result['lvm-1-uuid']
assert lvm1['osd_uuid'] == 'lvm-1-uuid'
assert lvm1['osd_id'] == 2
assert lvm1['device'] == '/dev/mapper/ceph--osd--block--1'
assert lvm1['ceph_fsid'] == 'lvm-1-fsid'
assert lvm1['type'] == 'bluestore'
+ sde1 = result['sde1-uuid']
+ assert sde1['osd_uuid'] == 'sde1-uuid'
+ assert sde1['osd_id'] == 1
+ assert sde1['device'] == '/dev/sde1'
+ assert sde1['ceph_fsid'] == 'sde1-fsid'
+ assert sde1['type'] == 'bluestore'
@patch('ceph_volume.util.device.disk.get_devices')
@patch('ceph_volume.util.disk.has_bluestore_label')
@@ -234,5 +274,5 @@ class TestList(object):
patched_get_devices.side_effect = _devices_side_effect
result = raw.list.List([]).generate()
- assert len(result) == 3
+ assert len(result) == 2
assert 'sdb-uuid' in result
diff --git a/src/ceph-volume/ceph_volume/tests/devices/raw/test_prepare.py b/src/ceph-volume/ceph_volume/tests/devices/raw/test_prepare.py
index f814bbf136b..285bc8b5cdf 100644
--- a/src/ceph-volume/ceph_volume/tests/devices/raw/test_prepare.py
+++ b/src/ceph-volume/ceph_volume/tests/devices/raw/test_prepare.py
@@ -1,7 +1,7 @@
import pytest
from ceph_volume.devices import raw
-from mock.mock import patch
-
+from mock.mock import patch, MagicMock
+from ceph_volume import objectstore
class TestRaw(object):
@@ -22,15 +22,21 @@ class TestRaw(object):
assert 'prepare ' in stdout
assert 'Format a raw device' in stdout
-
+@patch('ceph_volume.util.prepare.create_key', return_value='fake-secret')
class TestPrepare(object):
+ def _setup(self, **kw):
+ args = kw.get('args', [])
+ self.p = raw.prepare.Prepare([])
+ self.p.objectstore = objectstore.rawbluestore.RawBlueStore(args=args)
+ for k, v in kw.items():
+ setattr(self.p.objectstore, k, v)
- def test_main_spits_help_with_no_arguments(self, capsys):
+ def test_main_spits_help_with_no_arguments(self, m_create_key, capsys):
raw.prepare.Prepare([]).main()
stdout, stderr = capsys.readouterr()
assert 'Prepare an OSD by assigning an ID and FSID' in stdout
- def test_main_shows_full_help(self, capsys):
+ def test_main_shows_full_help(self, m_create_key, capsys):
with pytest.raises(SystemExit):
raw.prepare.Prepare(argv=['--help']).main()
stdout, stderr = capsys.readouterr()
@@ -41,8 +47,13 @@ class TestPrepare(object):
assert 'Path to bluestore block.wal block device' in stdout
assert 'Enable device encryption via dm-crypt' in stdout
+ @patch('ceph_volume.util.arg_validators.set_dmcrypt_no_workqueue', return_value=MagicMock())
@patch('ceph_volume.util.arg_validators.ValidRawDevice.__call__')
- def test_prepare_dmcrypt_no_secret_passed(self, m_valid_device, capsys):
+ def test_prepare_dmcrypt_no_secret_passed(self,
+ m_valid_device,
+ m_set_dmcrypt_no_workqueue,
+ m_create_key,
+ capsys):
m_valid_device.return_value = '/dev/foo'
with pytest.raises(SystemExit):
raw.prepare.Prepare(argv=['--bluestore', '--data', '/dev/foo', '--dmcrypt']).main()
@@ -52,43 +63,52 @@ class TestPrepare(object):
@patch('ceph_volume.util.encryption.luks_open')
@patch('ceph_volume.util.encryption.luks_format')
@patch('ceph_volume.util.disk.lsblk')
- def test_prepare_dmcrypt_block(self, m_lsblk, m_luks_format, m_luks_open):
+ def test_prepare_dmcrypt_block(self, m_lsblk, m_luks_format, m_luks_open, m_create_key, factory):
m_lsblk.return_value = {'KNAME': 'foo'}
m_luks_format.return_value = True
m_luks_open.return_value = True
- result = raw.prepare.prepare_dmcrypt('foo', '/dev/foo', 'block', '123')
+ self._setup(block_device_path='/dev/foo',
+ osd_fsid='123',
+ secrets=dict(dmcrypt_key='foo'))
+ self.p.objectstore.prepare_dmcrypt()
m_luks_open.assert_called_with('foo', '/dev/foo', 'ceph-123-foo-block-dmcrypt')
m_luks_format.assert_called_with('foo', '/dev/foo')
- assert result == '/dev/mapper/ceph-123-foo-block-dmcrypt'
+ assert self.p.objectstore.__dict__['block_device_path'] == '/dev/mapper/ceph-123-foo-block-dmcrypt'
@patch('ceph_volume.util.encryption.luks_open')
@patch('ceph_volume.util.encryption.luks_format')
@patch('ceph_volume.util.disk.lsblk')
- def test_prepare_dmcrypt_db(self, m_lsblk, m_luks_format, m_luks_open):
+ def test_prepare_dmcrypt_db(self, m_lsblk, m_luks_format, m_luks_open, m_create_key):
m_lsblk.return_value = {'KNAME': 'foo'}
m_luks_format.return_value = True
m_luks_open.return_value = True
- result = raw.prepare.prepare_dmcrypt('foo', '/dev/foo', 'db', '123')
- m_luks_open.assert_called_with('foo', '/dev/foo', 'ceph-123-foo-db-dmcrypt')
- m_luks_format.assert_called_with('foo', '/dev/foo')
- assert result == '/dev/mapper/ceph-123-foo-db-dmcrypt'
+ self._setup(db_device_path='/dev/db-foo',
+ osd_fsid='456',
+ secrets=dict(dmcrypt_key='foo'))
+ self.p.objectstore.prepare_dmcrypt()
+ m_luks_open.assert_called_with('foo', '/dev/db-foo', 'ceph-456-foo-db-dmcrypt')
+ m_luks_format.assert_called_with('foo', '/dev/db-foo')
+ assert self.p.objectstore.__dict__['db_device_path'] == '/dev/mapper/ceph-456-foo-db-dmcrypt'
@patch('ceph_volume.util.encryption.luks_open')
@patch('ceph_volume.util.encryption.luks_format')
@patch('ceph_volume.util.disk.lsblk')
- def test_prepare_dmcrypt_wal(self, m_lsblk, m_luks_format, m_luks_open):
+ def test_prepare_dmcrypt_wal(self, m_lsblk, m_luks_format, m_luks_open, m_create_key):
m_lsblk.return_value = {'KNAME': 'foo'}
m_luks_format.return_value = True
m_luks_open.return_value = True
- result = raw.prepare.prepare_dmcrypt('foo', '/dev/foo', 'wal', '123')
- m_luks_open.assert_called_with('foo', '/dev/foo', 'ceph-123-foo-wal-dmcrypt')
- m_luks_format.assert_called_with('foo', '/dev/foo')
- assert result == '/dev/mapper/ceph-123-foo-wal-dmcrypt'
+ self._setup(wal_device_path='/dev/wal-foo',
+ osd_fsid='789',
+ secrets=dict(dmcrypt_key='foo'))
+ self.p.objectstore.prepare_dmcrypt()
+ m_luks_open.assert_called_with('foo', '/dev/wal-foo', 'ceph-789-foo-wal-dmcrypt')
+ m_luks_format.assert_called_with('foo', '/dev/wal-foo')
+ assert self.p.objectstore.__dict__['wal_device_path'] == '/dev/mapper/ceph-789-foo-wal-dmcrypt'
- @patch('ceph_volume.devices.raw.prepare.rollback_osd')
- @patch('ceph_volume.devices.raw.prepare.Prepare.prepare')
+ @patch('ceph_volume.objectstore.rawbluestore.rollback_osd')
+ @patch('ceph_volume.objectstore.rawbluestore.RawBlueStore.prepare')
@patch('ceph_volume.util.arg_validators.ValidRawDevice.__call__')
- def test_safe_prepare_exception_raised(self, m_valid_device, m_prepare, m_rollback_osd):
+ def test_safe_prepare_exception_raised(self, m_valid_device, m_prepare, m_rollback_osd, m_create_key):
m_valid_device.return_value = '/dev/foo'
m_prepare.side_effect=Exception('foo')
m_rollback_osd.return_value = 'foobar'
diff --git a/src/ceph-volume/ceph_volume/tests/devices/simple/test_activate.py b/src/ceph-volume/ceph_volume/tests/devices/simple/test_activate.py
index 152ac9b09e2..ae7e52e518b 100644
--- a/src/ceph-volume/ceph_volume/tests/devices/simple/test_activate.py
+++ b/src/ceph-volume/ceph_volume/tests/devices/simple/test_activate.py
@@ -1,11 +1,13 @@
import os
import pytest
from ceph_volume.devices.simple import activate
+from mock.mock import patch
class TestActivate(object):
- def test_no_data_uuid(self, factory, is_root, monkeypatch, capture, fake_filesystem):
+ @patch('ceph_volume.decorators.os.getuid', return_value=0)
+ def test_no_data_uuid(self, m_getuid, factory, capture, fake_filesystem):
fake_filesystem.create_file('/tmp/json-config', contents='{}')
args = factory(osd_id='0', osd_fsid='1234', json_config='/tmp/json-config')
with pytest.raises(RuntimeError):
@@ -22,7 +24,7 @@ class TestActivate(object):
stdout, stderr = capsys.readouterr()
assert 'Activate OSDs by mounting devices previously configured' in stdout
- def test_activate_all(self, is_root, monkeypatch):
+ def test_activate_all(self, monkeypatch):
'''
make sure Activate calls activate for each file returned by glob
'''
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt-explicit/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type-dmcrypt-explicit/Vagrantfile
index 16076e42452..16076e42452 120000
--- a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt-explicit/Vagrantfile
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type-dmcrypt-explicit/Vagrantfile
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt-explicit/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type-dmcrypt-explicit/group_vars/all
index c3808c1d7c9..c3808c1d7c9 120000
--- a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt-explicit/group_vars/all
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type-dmcrypt-explicit/group_vars/all
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt-explicit/hosts b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type-dmcrypt-explicit/hosts
index e1c1de6f821..e1c1de6f821 100644
--- a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt-explicit/hosts
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type-dmcrypt-explicit/hosts
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt-explicit/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type-dmcrypt-explicit/setup.yml
index 8cf11d4ef8e..8cf11d4ef8e 120000
--- a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt-explicit/setup.yml
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type-dmcrypt-explicit/setup.yml
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt-explicit/test.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type-dmcrypt-explicit/test.yml
index 66d44c728e1..66d44c728e1 120000
--- a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt-explicit/test.yml
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type-dmcrypt-explicit/test.yml
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt-explicit/test_zap.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type-dmcrypt-explicit/test_zap.yml
index cb969fa1de6..cb969fa1de6 120000
--- a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt-explicit/test_zap.yml
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type-dmcrypt-explicit/test_zap.yml
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt-explicit/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type-dmcrypt-explicit/vagrant_variables.yml
index d21531f6cec..d21531f6cec 120000
--- a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt-explicit/vagrant_variables.yml
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type-dmcrypt-explicit/vagrant_variables.yml
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type-dmcrypt/Vagrantfile
index 16076e42452..16076e42452 120000
--- a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt/Vagrantfile
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type-dmcrypt/Vagrantfile
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type-dmcrypt/group_vars/all
index c3808c1d7c9..c3808c1d7c9 120000
--- a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt/group_vars/all
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type-dmcrypt/group_vars/all
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt/hosts b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type-dmcrypt/hosts
index e1c1de6f821..e1c1de6f821 100644
--- a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt/hosts
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type-dmcrypt/hosts
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type-dmcrypt/setup.yml
index 8cf11d4ef8e..8cf11d4ef8e 120000
--- a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt/setup.yml
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type-dmcrypt/setup.yml
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt/test.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type-dmcrypt/test.yml
index aa867bcde1a..aa867bcde1a 120000
--- a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt/test.yml
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type-dmcrypt/test.yml
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt/test_zap.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type-dmcrypt/test_zap.yml
index cb969fa1de6..cb969fa1de6 120000
--- a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt/test_zap.yml
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type-dmcrypt/test_zap.yml
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type-dmcrypt/vagrant_variables.yml
index d21531f6cec..d21531f6cec 120000
--- a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-dmcrypt/vagrant_variables.yml
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type-dmcrypt/vagrant_variables.yml
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-explicit/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type-explicit/Vagrantfile
index 16076e42452..16076e42452 120000
--- a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-explicit/Vagrantfile
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type-explicit/Vagrantfile
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-explicit/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type-explicit/group_vars/all
index c3808c1d7c9..c3808c1d7c9 120000
--- a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-explicit/group_vars/all
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type-explicit/group_vars/all
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-explicit/hosts b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type-explicit/hosts
index e1c1de6f821..e1c1de6f821 100644
--- a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-explicit/hosts
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type-explicit/hosts
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-explicit/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type-explicit/setup.yml
index 8cf11d4ef8e..8cf11d4ef8e 120000
--- a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-explicit/setup.yml
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type-explicit/setup.yml
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-explicit/test.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type-explicit/test.yml
index 66d44c728e1..66d44c728e1 120000
--- a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-explicit/test.yml
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type-explicit/test.yml
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-explicit/test_zap.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type-explicit/test_zap.yml
index cb969fa1de6..cb969fa1de6 120000
--- a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-explicit/test_zap.yml
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type-explicit/test_zap.yml
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-explicit/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type-explicit/vagrant_variables.yml
index d21531f6cec..d21531f6cec 120000
--- a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type-explicit/vagrant_variables.yml
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type-explicit/vagrant_variables.yml
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type/Vagrantfile
index 16076e42452..16076e42452 120000
--- a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type/Vagrantfile
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type/Vagrantfile
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type/group_vars/all
index c3808c1d7c9..c3808c1d7c9 120000
--- a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type/group_vars/all
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type/group_vars/all
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type/hosts b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type/hosts
index e1c1de6f821..e1c1de6f821 100644
--- a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type/hosts
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type/hosts
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type/setup.yml
index 8cf11d4ef8e..8cf11d4ef8e 120000
--- a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type/setup.yml
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type/setup.yml
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type/test.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type/test.yml
index aa867bcde1a..aa867bcde1a 120000
--- a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type/test.yml
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type/test.yml
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type/test_zap.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type/test_zap.yml
index cb969fa1de6..cb969fa1de6 120000
--- a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type/test_zap.yml
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type/test_zap.yml
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type/vagrant_variables.yml
index d21531f6cec..d21531f6cec 120000
--- a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/mixed-type/vagrant_variables.yml
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/mixed-type/vagrant_variables.yml
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type-dmcrypt/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/single-type-dmcrypt/Vagrantfile
index 16076e42452..16076e42452 120000
--- a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type-dmcrypt/Vagrantfile
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/single-type-dmcrypt/Vagrantfile
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type-dmcrypt/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/single-type-dmcrypt/group_vars/all
index 1e6ea00804d..1e6ea00804d 120000
--- a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type-dmcrypt/group_vars/all
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/single-type-dmcrypt/group_vars/all
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type-dmcrypt/hosts b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/single-type-dmcrypt/hosts
index e1c1de6f821..e1c1de6f821 100644
--- a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type-dmcrypt/hosts
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/single-type-dmcrypt/hosts
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type-dmcrypt/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/single-type-dmcrypt/setup.yml
index 30874dfbb95..30874dfbb95 120000
--- a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type-dmcrypt/setup.yml
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/single-type-dmcrypt/setup.yml
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type-dmcrypt/test.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/single-type-dmcrypt/test.yml
index aa867bcde1a..aa867bcde1a 120000
--- a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type-dmcrypt/test.yml
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/single-type-dmcrypt/test.yml
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type-dmcrypt/test_zap.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/single-type-dmcrypt/test_zap.yml
index cb969fa1de6..cb969fa1de6 120000
--- a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type-dmcrypt/test_zap.yml
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/single-type-dmcrypt/test_zap.yml
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type-dmcrypt/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/single-type-dmcrypt/vagrant_variables.yml
index d21531f6cec..d21531f6cec 120000
--- a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type-dmcrypt/vagrant_variables.yml
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/single-type-dmcrypt/vagrant_variables.yml
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/single-type/Vagrantfile
index 16076e42452..16076e42452 120000
--- a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type/Vagrantfile
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/single-type/Vagrantfile
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/single-type/group_vars/all
index 1e6ea00804d..1e6ea00804d 120000
--- a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type/group_vars/all
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/single-type/group_vars/all
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type/hosts b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/single-type/hosts
index e1c1de6f821..e1c1de6f821 100644
--- a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type/hosts
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/single-type/hosts
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/single-type/setup.yml
index 30874dfbb95..30874dfbb95 120000
--- a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type/setup.yml
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/single-type/setup.yml
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type/test.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/single-type/test.yml
index aa867bcde1a..aa867bcde1a 120000
--- a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type/test.yml
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/single-type/test.yml
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type/test_zap.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/single-type/test_zap.yml
index cb969fa1de6..cb969fa1de6 120000
--- a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type/test_zap.yml
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/single-type/test_zap.yml
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/single-type/vagrant_variables.yml
index d21531f6cec..d21531f6cec 120000
--- a/src/ceph-volume/ceph_volume/tests/functional/batch/centos8/bluestore/single-type/vagrant_variables.yml
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/centos/bluestore/single-type/vagrant_variables.yml
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test.yml
index 5d5bc59f291..17f200c9dd3 100644
--- a/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test.yml
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test.yml
@@ -14,10 +14,10 @@
tasks:
- name: mark osds down
- command: "ceph --cluster {{ cluster }} osd down osd.{{ item }}"
+ command: "ceph osd down osd.{{ item }}"
with_items: "{{ osd_ids }}"
- name: purge osds
- command: "ceph --cluster {{ cluster }} osd purge osd.{{ item }} --yes-i-really-mean-it"
+ command: "ceph osd purge osd.{{ item }} --yes-i-really-mean-it"
with_items: "{{ osd_ids }}"
- hosts: osds
@@ -25,18 +25,18 @@
tasks:
- name: zap devices used for OSDs
- command: "ceph-volume --cluster {{ cluster }} lvm zap {{ item }} --destroy"
+ command: "ceph-volume lvm zap {{ item }} --destroy"
with_items: "{{ devices }}"
environment:
CEPH_VOLUME_DEBUG: 1
- name: batch create devices again
- command: "ceph-volume --cluster {{ cluster }} lvm batch --yes --{{ osd_objectstore|default('bluestore') }} {{ '--dmcrypt' if dmcrypt|default(false) else '' }} {{ devices | join(' ') }}"
+ command: "ceph-volume lvm batch --yes --{{ osd_objectstore|default('bluestore') }} {{ '--dmcrypt' if dmcrypt|default(false) else '' }} {{ devices | join(' ') }}"
environment:
CEPH_VOLUME_DEBUG: 1
- name: ensure batch create is idempotent
- command: "ceph-volume --cluster {{ cluster }} lvm batch --yes --{{ osd_objectstore|default('bluestore') }} {{ '--dmcrypt' if dmcrypt|default(false) else '' }} {{ devices | join(' ') }}"
+ command: "ceph-volume lvm batch --yes --{{ osd_objectstore|default('bluestore') }} {{ '--dmcrypt' if dmcrypt|default(false) else '' }} {{ devices | join(' ') }}"
register: batch_cmd
failed_when: false
environment:
@@ -50,7 +50,7 @@
- "'strategy changed' not in batch_cmd.stderr"
- name: run batch --report to see if devices get filtered
- command: "ceph-volume --cluster {{ cluster }} lvm batch --report --format=json --{{ osd_objectstore|default('bluestore') }} {{ '--dmcrypt' if dmcrypt|default(false) else '' }} {{ devices | join(' ') }}"
+ command: "ceph-volume lvm batch --report --format=json --{{ osd_objectstore|default('bluestore') }} {{ '--dmcrypt' if dmcrypt|default(false) else '' }} {{ devices | join(' ') }}"
register: report_cmd
failed_when: false
environment:
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_explicit.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_explicit.yml
index 1ff0acc9dec..2581f5c4615 100644
--- a/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_explicit.yml
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_explicit.yml
@@ -14,10 +14,10 @@
tasks:
- name: mark osds down
- command: "ceph --cluster {{ cluster }} osd down osd.{{ item }}"
+ command: "ceph osd down osd.{{ item }}"
with_items: "{{ osd_ids }}"
- name: purge osds
- command: "ceph --cluster {{ cluster }} osd purge osd.{{ item }} --yes-i-really-mean-it"
+ command: "ceph osd purge osd.{{ item }} --yes-i-really-mean-it"
with_items: "{{ osd_ids }}"
- hosts: osds
@@ -27,18 +27,18 @@
tasks:
- name: zap devices used for OSDs
- command: "ceph-volume --cluster {{ cluster }} lvm zap {{ item }} --destroy"
+ command: "ceph-volume lvm zap {{ item }} --destroy"
with_items: "{{ devices }}"
environment:
CEPH_VOLUME_DEBUG: 1
- name: batch create devices again
- command: "ceph-volume --cluster {{ cluster }} lvm batch --yes --{{ osd_objectstore|default('bluestore') }} {{ '--dmcrypt' if dmcrypt|default(false) else '' }} {{ devices[:2] | join(' ') }} {{ external_devices }} {{ devices[2:] | join(' ') }}"
+ command: "ceph-volume lvm batch --yes --{{ osd_objectstore|default('bluestore') }} {{ '--dmcrypt' if dmcrypt|default(false) else '' }} {{ devices[:2] | join(' ') }} {{ external_devices }} {{ devices[2:] | join(' ') }}"
environment:
CEPH_VOLUME_DEBUG: 1
- name: ensure batch create is idempotent when all data devices are filtered
- command: "ceph-volume --cluster {{ cluster }} lvm batch --yes --{{ osd_objectstore|default('bluestore') }} {{ '--dmcrypt' if dmcrypt|default(false) else '' }} {{ devices[:2] | join(' ') }} {{ external_devices }} {{ devices[2:] | join(' ') }}"
+ command: "ceph-volume lvm batch --yes --{{ osd_objectstore|default('bluestore') }} {{ '--dmcrypt' if dmcrypt|default(false) else '' }} {{ devices[:2] | join(' ') }} {{ external_devices }} {{ devices[2:] | join(' ') }}"
register: batch_cmd
failed_when: false
environment:
@@ -51,7 +51,7 @@
- batch_cmd.rc != 0
- name: run batch --report to see if devices get filtered
- command: "ceph-volume --cluster {{ cluster }} lvm batch --report --format=json --{{ osd_objectstore|default('bluestore') }} {{ '--dmcrypt' if dmcrypt|default(false) else '' }} {{ devices[:2] | join(' ') }} {{ external_devices }} {{ devices[2:] | join(' ') }}"
+ command: "ceph-volume lvm batch --report --format=json --{{ osd_objectstore|default('bluestore') }} {{ '--dmcrypt' if dmcrypt|default(false) else '' }} {{ devices[:2] | join(' ') }} {{ external_devices }} {{ devices[2:] | join(' ') }}"
register: report_cmd
failed_when: false
environment:
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_zap.yml b/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_zap.yml
index 9d63df9e0fc..4408288c8d1 100644
--- a/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_zap.yml
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/playbooks/test_zap.yml
@@ -15,10 +15,10 @@
tasks:
- name: mark osds down
- command: "ceph --cluster {{ cluster }} osd down osd.{{ item }}"
+ command: "ceph osd down osd.{{ item }}"
with_items: "{{ osd_ids }}"
- name: purge osds
- command: "ceph --cluster {{ cluster }} osd purge osd.{{ item }} --yes-i-really-mean-it"
+ command: "ceph osd purge osd.{{ item }} --yes-i-really-mean-it"
with_items: "{{ osd_ids }}"
@@ -27,7 +27,7 @@
tasks:
- name: zap devices used for OSDs
- command: "ceph-volume --cluster {{ cluster }} lvm zap --osd-id {{ item }} --destroy"
+ command: "ceph-volume lvm zap --osd-id {{ item }} --destroy"
with_items: "{{ osd_ids }}"
environment:
CEPH_VOLUME_DEBUG: 1
diff --git a/src/ceph-volume/ceph_volume/tests/functional/batch/tox.ini b/src/ceph-volume/ceph_volume/tests/functional/batch/tox.ini
index bc50be8101b..ede3868b9fe 100644
--- a/src/ceph-volume/ceph_volume/tests/functional/batch/tox.ini
+++ b/src/ceph-volume/ceph_volume/tests/functional/batch/tox.ini
@@ -1,5 +1,5 @@
[tox]
-envlist = centos8-bluestore-{single_type,single_type_dmcrypt,mixed_type,mixed_type_dmcrypt,mixed_type_explicit,mixed_type_dmcrypt_explicit}
+envlist = centos-bluestore-{single_type,single_type_dmcrypt,mixed_type,mixed_type_dmcrypt,mixed_type_explicit,mixed_type_dmcrypt_explicit}
skipsdist = True
[testenv]
@@ -18,17 +18,18 @@ setenv=
VAGRANT_CWD = {changedir}
CEPH_VOLUME_DEBUG = 1
DEBIAN_FRONTEND=noninteractive
+ ANSIBLE_COLLECTIONS_PATH = {envdir}/ansible_collections
changedir=
- centos8-bluestore-single_type: {toxinidir}/centos8/bluestore/single-type
- centos8-bluestore-single_type_dmcrypt: {toxinidir}/centos8/bluestore/single-type-dmcrypt
- centos8-bluestore-mixed_type: {toxinidir}/centos8/bluestore/mixed-type
- centos8-bluestore-mixed_type_dmcrypt: {toxinidir}/centos8/bluestore/mixed-type-dmcrypt
- centos8-bluestore-mixed_type_explicit: {toxinidir}/centos8/bluestore/mixed-type-explicit
- centos8-bluestore-mixed_type_dmcrypt_explicit: {toxinidir}/centos8/bluestore/mixed-type-dmcrypt-explicit
+ centos-bluestore-single_type: {toxinidir}/centos/bluestore/single-type
+ centos-bluestore-single_type_dmcrypt: {toxinidir}/centos/bluestore/single-type-dmcrypt
+ centos-bluestore-mixed_type: {toxinidir}/centos/bluestore/mixed-type
+ centos-bluestore-mixed_type_dmcrypt: {toxinidir}/centos/bluestore/mixed-type-dmcrypt
+ centos-bluestore-mixed_type_explicit: {toxinidir}/centos/bluestore/mixed-type-explicit
+ centos-bluestore-mixed_type_dmcrypt_explicit: {toxinidir}/centos/bluestore/mixed-type-dmcrypt-explicit
commands=
git clone -b {env:CEPH_ANSIBLE_BRANCH:master} --single-branch {env:CEPH_ANSIBLE_CLONE:"https://github.com/ceph/ceph-ansible.git"} {envdir}/tmp/ceph-ansible
python -m pip install -r {envdir}/tmp/ceph-ansible/tests/requirements.txt
- ansible-galaxy install -r {envdir}/tmp/ceph-ansible/requirements.yml -v
+ ansible-galaxy collection install -r {envdir}/tmp/ceph-ansible/requirements.yml -v -p {envdir}/ansible_collections
# bash {toxinidir}/../scripts/vagrant_up.sh {env:VAGRANT_UP_FLAGS:""} {posargs:--provider=virtualbox}
bash {toxinidir}/../scripts/vagrant_up.sh {posargs:--provider=virtualbox}
@@ -42,9 +43,6 @@ commands=
# use ceph-ansible to deploy a ceph cluster on the vms
ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/deploy.yml --extra-vars "fetch_directory={changedir}/fetch ceph_dev_branch={env:CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} toxinidir={toxinidir}"
- # prepare nodes for testing with testinfra
- ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml
-
# test cluster state using testinfra
py.test --reruns 5 --reruns-delay 10 -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
diff --git a/src/ceph-volume/ceph_volume/tests/functional/group_vars/bluestore b/src/ceph-volume/ceph_volume/tests/functional/group_vars/bluestore
index ca0146b19fe..1a4fadc1067 100644
--- a/src/ceph-volume/ceph_volume/tests/functional/group_vars/bluestore
+++ b/src/ceph-volume/ceph_volume/tests/functional/group_vars/bluestore
@@ -1,10 +1,8 @@
---
ceph_dev: True
-cluster: test
public_network: "192.168.3.0/24"
cluster_network: "192.168.4.0/24"
-monitor_interface: eth1
osd_objectstore: "bluestore"
osd_scenario: lvm
num_osds: 2
diff --git a/src/ceph-volume/ceph_volume/tests/functional/group_vars/bluestore_lvm b/src/ceph-volume/ceph_volume/tests/functional/group_vars/bluestore_lvm
index c333af3e522..40abe4c8c6a 100644
--- a/src/ceph-volume/ceph_volume/tests/functional/group_vars/bluestore_lvm
+++ b/src/ceph-volume/ceph_volume/tests/functional/group_vars/bluestore_lvm
@@ -1,10 +1,8 @@
---
ceph_dev: True
-cluster: test
public_network: "192.168.3.0/24"
cluster_network: "192.168.4.0/24"
-monitor_interface: eth1
journal_size: 100
osd_objectstore: "bluestore"
osd_scenario: lvm
diff --git a/src/ceph-volume/ceph_volume/tests/functional/group_vars/bluestore_lvm_dmcrypt b/src/ceph-volume/ceph_volume/tests/functional/group_vars/bluestore_lvm_dmcrypt
index 3cd68aaf1db..5f8eb38274a 100644
--- a/src/ceph-volume/ceph_volume/tests/functional/group_vars/bluestore_lvm_dmcrypt
+++ b/src/ceph-volume/ceph_volume/tests/functional/group_vars/bluestore_lvm_dmcrypt
@@ -2,10 +2,8 @@
dmcrypt: True
ceph_dev: True
-cluster: test
public_network: "192.168.3.0/24"
cluster_network: "192.168.4.0/24"
-monitor_interface: eth1
journal_size: 100
osd_objectstore: "bluestore"
osd_scenario: lvm
diff --git a/src/ceph-volume/ceph_volume/tests/functional/group_vars/bluestore_single b/src/ceph-volume/ceph_volume/tests/functional/group_vars/bluestore_single
index e43b14a75a4..688d65352d8 100644
--- a/src/ceph-volume/ceph_volume/tests/functional/group_vars/bluestore_single
+++ b/src/ceph-volume/ceph_volume/tests/functional/group_vars/bluestore_single
@@ -1,10 +1,8 @@
---
ceph_dev: True
-cluster: test
public_network: "192.168.3.0/24"
cluster_network: "192.168.4.0/24"
-monitor_interface: eth1
osd_objectstore: "bluestore"
osd_scenario: lvm
ceph_origin: 'repository'
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/create/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos/bluestore/create/Vagrantfile
index 16076e42452..16076e42452 120000
--- a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/create/Vagrantfile
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos/bluestore/create/Vagrantfile
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/create/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos/bluestore/create/group_vars/all
index 5a7af3be06b..5a7af3be06b 120000
--- a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/create/group_vars/all
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos/bluestore/create/group_vars/all
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/create/hosts b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos/bluestore/create/hosts
index e1c1de6f821..e1c1de6f821 100644
--- a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/create/hosts
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos/bluestore/create/hosts
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/create/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos/bluestore/create/setup.yml
index 1c1a3ce8dfe..1c1a3ce8dfe 120000
--- a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/create/setup.yml
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos/bluestore/create/setup.yml
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/create/test.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos/bluestore/create/test.yml
index 165d9da291e..165d9da291e 120000
--- a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/create/test.yml
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos/bluestore/create/test.yml
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/create/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos/bluestore/create/vagrant_variables.yml
index d21531f6cec..d21531f6cec 120000
--- a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/create/vagrant_variables.yml
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos/bluestore/create/vagrant_variables.yml
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos/bluestore/dmcrypt/Vagrantfile
index 16076e42452..16076e42452 120000
--- a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/Vagrantfile
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos/bluestore/dmcrypt/Vagrantfile
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos/bluestore/dmcrypt/group_vars/all
index 6ef6a98441e..6ef6a98441e 120000
--- a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/group_vars/all
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos/bluestore/dmcrypt/group_vars/all
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/hosts b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos/bluestore/dmcrypt/hosts
index e1c1de6f821..e1c1de6f821 100644
--- a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/hosts
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos/bluestore/dmcrypt/hosts
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos/bluestore/dmcrypt/setup.yml
index 1c1a3ce8dfe..1c1a3ce8dfe 120000
--- a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/setup.yml
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos/bluestore/dmcrypt/setup.yml
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/test.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos/bluestore/dmcrypt/test.yml
index 0a47b5eb851..c35591ca033 100644
--- a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/test.yml
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos/bluestore/dmcrypt/test.yml
@@ -16,20 +16,20 @@
become: yes
tasks:
- name: mark osds down
- command: "ceph --cluster {{ cluster }} osd down osd.{{ item }}"
+ command: "ceph osd down osd.{{ item }}"
with_items:
- 0
- 2
- name: destroy osd.2
- command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it"
+ command: "ceph osd destroy osd.2 --yes-i-really-mean-it"
register: result
retries: 30
delay: 1
until: result is succeeded
- name: destroy osd.0
- command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it"
+ command: "ceph osd destroy osd.0 --yes-i-really-mean-it"
register: result
retries: 30
delay: 1
@@ -41,7 +41,7 @@
# osd.2 device
- name: zap /dev/vdd1
- command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdd1 --destroy"
+ command: "ceph-volume lvm zap /dev/vdd1 --destroy"
environment:
CEPH_VOLUME_DEBUG: 1
@@ -57,18 +57,18 @@
state: present
- name: redeploy osd.2 using /dev/vdd1
- command: "ceph-volume --cluster {{ cluster }} lvm create --bluestore --data /dev/vdd1 --osd-id 2"
+ command: "ceph-volume lvm create --bluestore --data /dev/vdd1 --osd-id 2"
environment:
CEPH_VOLUME_DEBUG: 1
# osd.0 lv
- name: zap test_group/data-lv1
- command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1"
+ command: "ceph-volume lvm zap test_group/data-lv1"
environment:
CEPH_VOLUME_DEBUG: 1
- name: redeploy osd.0 using test_group/data-lv1
- command: "ceph-volume --cluster {{ cluster }} lvm create --bluestore --data test_group/data-lv1 --osd-id 0"
+ command: "ceph-volume lvm create --bluestore --data test_group/data-lv1 --osd-id 0"
environment:
CEPH_VOLUME_DEBUG: 1
@@ -82,10 +82,10 @@
become: yes
tasks:
- name: mark osds down
- command: "ceph --cluster {{ cluster }} osd down osd.0"
+ command: "ceph osd down osd.0"
- name: destroy osd.0
- command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it"
+ command: "ceph osd destroy osd.0 --yes-i-really-mean-it"
register: result
retries: 30
delay: 1
@@ -98,12 +98,12 @@
- name: zap test_group/data-lv1
- command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1"
+ command: "ceph-volume lvm zap test_group/data-lv1"
environment:
CEPH_VOLUME_DEBUG: 1
- name: prepare osd.0 using test_group/data-lv1
- command: "ceph-volume --cluster {{ cluster }} lvm prepare --bluestore --data test_group/data-lv1 --osd-id 0"
+ command: "ceph-volume lvm prepare --bluestore --data test_group/data-lv1 --osd-id 0"
environment:
CEPH_VOLUME_DEBUG: 1
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos/bluestore/dmcrypt/vagrant_variables.yml
index d21531f6cec..d21531f6cec 120000
--- a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos8/bluestore/dmcrypt/vagrant_variables.yml
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos/bluestore/dmcrypt/vagrant_variables.yml
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_bluestore.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_bluestore.yml
index 97d77a7f460..b6b038c90be 100644
--- a/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_bluestore.yml
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_bluestore.yml
@@ -18,20 +18,20 @@
become: yes
tasks:
- name: mark osds down
- command: "ceph --cluster {{ cluster }} osd down osd.{{ item }}"
+ command: "ceph osd down osd.{{ item }}"
with_items:
- 0
- 2
- name: destroy osd.2
- command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it"
+ command: "ceph osd destroy osd.2 --yes-i-really-mean-it"
register: result
retries: 30
delay: 1
until: result is succeeded
- name: destroy osd.0
- command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it"
+ command: "ceph osd destroy osd.0 --yes-i-really-mean-it"
register: result
retries: 30
delay: 1
@@ -44,7 +44,7 @@
# osd.2 device
- name: zap /dev/vdd1
- command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/vdd1 --destroy"
+ command: "ceph-volume lvm zap /dev/vdd1 --destroy"
environment:
CEPH_VOLUME_DEBUG: 1
@@ -60,18 +60,18 @@
state: present
- name: redeploy osd.2 using /dev/vdd1
- command: "ceph-volume --cluster {{ cluster }} lvm create --bluestore --data /dev/vdd1 --osd-id 2"
+ command: "ceph-volume lvm create --bluestore --data /dev/vdd1 --osd-id 2"
environment:
CEPH_VOLUME_DEBUG: 1
# osd.0 device (zap without --destroy that removes the LV)
- name: zap test_group/data-lv1
- command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1"
+ command: "ceph-volume lvm zap test_group/data-lv1"
environment:
CEPH_VOLUME_DEBUG: 1
- name: prepare osd.0 again using test_group/data-lv1
- command: "ceph-volume --cluster {{ cluster }} lvm prepare --bluestore --data test_group/data-lv1 --osd-id 0"
+ command: "ceph-volume lvm prepare --bluestore --data test_group/data-lv1 --osd-id 0"
environment:
CEPH_VOLUME_DEBUG: 1
@@ -151,11 +151,11 @@
# zapping the first lv shouldn't remove the vg, allowing the second zap to succeed
- name: zap test_zap/data-lv1
- command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_zap/data-lv1"
+ command: "ceph-volume lvm zap --destroy test_zap/data-lv1"
environment:
CEPH_VOLUME_DEBUG: 1
- name: zap test_zap/data-lv2
- command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_zap/data-lv2"
+ command: "ceph-volume lvm zap --destroy test_zap/data-lv2"
environment:
CEPH_VOLUME_DEBUG: 1
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/tox.ini b/src/ceph-volume/ceph_volume/tests/functional/lvm/tox.ini
index fe60c7db228..4c76c3ef914 100644
--- a/src/ceph-volume/ceph_volume/tests/functional/lvm/tox.ini
+++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/tox.ini
@@ -1,5 +1,5 @@
[tox]
-envlist = centos8-bluestore-{create,prepare_activate,dmcrypt}
+envlist = centos-bluestore-{create,prepare_activate,dmcrypt}
skipsdist = True
[testenv]
@@ -18,18 +18,20 @@ setenv=
VAGRANT_CWD = {changedir}
CEPH_VOLUME_DEBUG = 1
DEBIAN_FRONTEND=noninteractive
+ ANSIBLE_COLLECTIONS_PATH = {envdir}/ansible_collections
+ CEPH_ANSIBLE_VAGRANT_BOX = centos/stream9
changedir=
# plain/unencrypted
- centos8-bluestore-create: {toxinidir}/centos8/bluestore/create
+ centos-bluestore-create: {toxinidir}/centos/bluestore/create
# dmcrypt
- centos8-bluestore-dmcrypt: {toxinidir}/centos8/bluestore/dmcrypt
+ centos-bluestore-dmcrypt: {toxinidir}/centos/bluestore/dmcrypt
# TODO: these are placeholders for now, eventually we want to
# test the prepare/activate workflow of ceph-volume as well
- centos8-bluestore-prepare_activate: {toxinidir}/xenial/bluestore/prepare_activate
+ centos-bluestore-prepare_activate: {toxinidir}/xenial/bluestore/prepare_activate
commands=
- git clone -b {env:CEPH_ANSIBLE_BRANCH:master} --single-branch {env:CEPH_ANSIBLE_CLONE:"https://github.com/ceph/ceph-ansible.git"} {envdir}/tmp/ceph-ansible
+ git clone -b {env:CEPH_ANSIBLE_BRANCH:main} --single-branch {env:CEPH_ANSIBLE_CLONE:"https://github.com/ceph/ceph-ansible.git"} {envdir}/tmp/ceph-ansible
pip install -r {envdir}/tmp/ceph-ansible/tests/requirements.txt
- ansible-galaxy install -r {envdir}/tmp/ceph-ansible/requirements.yml -v
+ ansible-galaxy collection install -r {envdir}/tmp/ceph-ansible/requirements.yml -v -p {envdir}/ansible_collections
bash {toxinidir}/../scripts/vagrant_up.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox}
bash {toxinidir}/../scripts/generate_ssh_config.sh {changedir}
@@ -43,10 +45,7 @@ commands=
cp {toxinidir}/../playbooks/deploy.yml {envdir}/tmp/ceph-ansible
# use ceph-ansible to deploy a ceph cluster on the vms
- ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/deploy.yml --extra-vars "fetch_directory={changedir}/fetch ceph_dev_branch={env:CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} toxinidir={toxinidir}"
-
- # prepare nodes for testing with testinfra
- ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml
+ ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/deploy.yml --extra-vars "fetch_directory={changedir}/fetch ceph_dev_branch={env:CEPH_DEV_BRANCH:main} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} toxinidir={toxinidir}"
# test cluster state using testinfra
py.test --reruns 5 --reruns-delay 10 -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
diff --git a/src/ceph-volume/ceph_volume/tests/functional/playbooks/deploy.yml b/src/ceph-volume/ceph_volume/tests/functional/playbooks/deploy.yml
index 0ac200c6bc0..036c4daf504 100644
--- a/src/ceph-volume/ceph_volume/tests/functional/playbooks/deploy.yml
+++ b/src/ceph-volume/ceph_volume/tests/functional/playbooks/deploy.yml
@@ -21,20 +21,6 @@
DEBIAN_FRONTEND: noninteractive
pre_tasks:
- # If we can't get python2 installed before any module is used we will fail
- # so just try what we can to get it installed
- - name: check for python2
- stat:
- path: /usr/bin/python
- ignore_errors: yes
- register: systempython2
-
- - name: install python2 for debian based systems
- raw: sudo apt-get -y install python-simplejson
- ignore_errors: yes
- when:
- - systempython2.stat is undefined or systempython2.stat.exists == false
-
# Ansible will try to auto-install python-apt, in some systems this might be
# python3-apt, or python-apt, and it has caused whole runs to fail because
# it is trying to do an interactive prompt
@@ -46,18 +32,6 @@
- python-apt
- aptitude
- - name: install python2 for fedora
- raw: sudo dnf -y install python creates=/usr/bin/python
- ignore_errors: yes
- when:
- - systempython2.stat is undefined or systempython2.stat.exists == false
-
- - name: install python2 for opensuse
- raw: sudo zypper -n install python-base creates=/usr/bin/python2.7
- ignore_errors: yes
- when:
- - systempython2.stat is undefined or systempython2.stat.exists == false
-
- name: gather facts
setup:
when:
@@ -93,6 +67,12 @@
state: latest
when: not is_atomic | bool
+ - name: install net-tools
+ package:
+ name: net-tools
+ state: present
+ when: not is_atomic | bool
+
- name: update the system
command: dnf update -y
changed_when: false
diff --git a/src/ceph-volume/ceph_volume/tests/functional/scripts/vagrant_up.sh b/src/ceph-volume/ceph_volume/tests/functional/scripts/vagrant_up.sh
index 8f4cd3bca9b..104ab118c98 100644
--- a/src/ceph-volume/ceph_volume/tests/functional/scripts/vagrant_up.sh
+++ b/src/ceph-volume/ceph_volume/tests/functional/scripts/vagrant_up.sh
@@ -2,6 +2,15 @@
set -e
+CEPH_ANSIBLE_VAGRANT_BOX="${CEPH_ANSIBLE_VAGRANT_BOX:-centos/stream9}"
+
+if [[ "${CEPH_ANSIBLE_VAGRANT_BOX}" =~ "centos/stream" ]]; then
+ EL_VERSION="${CEPH_ANSIBLE_VAGRANT_BOX: -1}"
+ LATEST_IMAGE="$(curl -s https://cloud.centos.org/centos/${EL_VERSION}-stream/x86_64/images/CHECKSUM | sed -nE 's/^SHA256.*\((.*-([0-9]+).*vagrant-libvirt.box)\).*$/\1/p' | sort -u | tail -n1)"
+ vagrant box remove "${CEPH_ANSIBLE_VAGRANT_BOX}" --all --force || true
+ vagrant box add --force --provider libvirt --name "${CEPH_ANSIBLE_VAGRANT_BOX}" "https://cloud.centos.org/centos/${EL_VERSION}-stream/x86_64/images/${LATEST_IMAGE}" --force
+fi
+
retries=0
until [ $retries -ge 5 ]
do
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/Vagrantfile
deleted file mode 120000
index 16076e42452..00000000000
--- a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/Vagrantfile
+++ /dev/null
@@ -1 +0,0 @@
-../../../../Vagrantfile \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/group_vars/all
deleted file mode 100644
index c265e783b07..00000000000
--- a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/group_vars/all
+++ /dev/null
@@ -1,19 +0,0 @@
----
-
-ceph_dev: True
-cluster: test
-public_network: "192.168.1.0/24"
-cluster_network: "192.168.2.0/24"
-monitor_interface: eth1
-journal_size: 100
-osd_objectstore: "bluestore"
-ceph_origin: 'repository'
-ceph_repository: 'dev'
-copy_admin_key: false
-os_tuning_params:
- - { name: kernel.pid_max, value: 4194303 }
- - { name: fs.file-max, value: 26234859 }
-ceph_conf_overrides:
- global:
- osd_pool_default_pg_num: 8
- osd_pool_default_size: 1
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/host_vars/osd0.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/host_vars/osd0.yml
deleted file mode 100644
index 2e1c7ee9e89..00000000000
--- a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/host_vars/osd0.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-
-devices:
- - '/dev/sdb'
-dedicated_devices:
- - '/dev/sdc'
-osd_scenario: "non-collocated"
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/host_vars/osd1.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/host_vars/osd1.yml
deleted file mode 100644
index 7e90071c9b1..00000000000
--- a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/host_vars/osd1.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-
-devices:
- - '/dev/sdb'
- - '/dev/sdc'
-osd_scenario: "collocated"
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/hosts b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/hosts
deleted file mode 100644
index e0c08b94659..00000000000
--- a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/hosts
+++ /dev/null
@@ -1,9 +0,0 @@
-[mons]
-mon0 monitor_interface=eth1
-
-[osds]
-osd0
-osd1
-
-[mgrs]
-mon0
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/test.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/test.yml
deleted file mode 100644
index 24e2c0353c9..00000000000
--- a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/test.yml
+++ /dev/null
@@ -1,31 +0,0 @@
----
-
-- hosts: osds
- become: yes
- tasks:
-
- - name: list all OSD directories
- find:
- paths: /var/lib/ceph/osd
- file_type: directory
- register: osd_paths
-
- - name: scan all OSD directories
- command: "ceph-volume --cluster={{ cluster }} simple scan {{ item.path }}"
- environment:
- CEPH_VOLUME_DEBUG: 1
- with_items:
- - "{{ osd_paths.files }}"
-
- - name: list all OSD JSON files
- find:
- paths: /etc/ceph/osd
- file_type: file
- register: osd_configs
-
- - name: activate all scanned OSDs
- command: "ceph-volume --cluster={{ cluster }} simple activate --file {{ item.path }}"
- environment:
- CEPH_VOLUME_DEBUG: 1
- with_items:
- - "{{ osd_configs.files }}"
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/vagrant_variables.yml
deleted file mode 100644
index 63700c3c902..00000000000
--- a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/activate/vagrant_variables.yml
+++ /dev/null
@@ -1,73 +0,0 @@
----
-
-# DEPLOY CONTAINERIZED DAEMONS
-docker: false
-
-# DEFINE THE NUMBER OF VMS TO RUN
-mon_vms: 1
-osd_vms: 2
-mds_vms: 0
-rgw_vms: 0
-nfs_vms: 0
-rbd_mirror_vms: 0
-client_vms: 0
-iscsi_gw_vms: 0
-mgr_vms: 0
-
-
-# INSTALL SOURCE OF CEPH
-# valid values are 'stable' and 'dev'
-ceph_install_source: stable
-
-# SUBNETS TO USE FOR THE VMS
-public_subnet: 192.168.1
-cluster_subnet: 192.168.2
-
-# MEMORY
-# set 1024 for CentOS
-memory: 512
-
-# Ethernet interface name
-# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
-eth: 'eth1'
-
-# Disks
-# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
-# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
-disks: "[ '/dev/sdb', '/dev/sdc' ]"
-
-# VAGRANT BOX
-# Ceph boxes are *strongly* suggested. They are under better control and will
-# not get updated frequently unless required for build systems. These are (for
-# now):
-#
-# * ceph/ubuntu-xenial
-#
-# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
-# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
-# libvirt CentOS: centos/7
-# parallels Ubuntu: parallels/ubuntu-14.04
-# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
-# For more boxes have a look at:
-# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
-# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
-vagrant_box: centos/7
-#ssh_private_key_path: "~/.ssh/id_rsa"
-# The sync directory changes based on vagrant box
-# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
-#vagrant_sync_dir: /home/vagrant/sync
-#vagrant_sync_dir: /
-# Disables synced folder creation. Not needed for testing, will skip mounting
-# the vagrant directory on the remote box regardless of the provider.
-vagrant_disable_synced_folder: true
-# VAGRANT URL
-# This is a URL to download an image from an alternate location. vagrant_box
-# above should be set to the filename of the image.
-# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
-# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
-# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
-
-os_tuning_params:
- - { name: kernel.pid_max, value: 4194303 }
- - { name: fs.file-max, value: 26234859 }
-
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/Vagrantfile
deleted file mode 120000
index 16076e42452..00000000000
--- a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/Vagrantfile
+++ /dev/null
@@ -1 +0,0 @@
-../../../../Vagrantfile \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/group_vars/all
deleted file mode 100644
index 885c2c82f4e..00000000000
--- a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/group_vars/all
+++ /dev/null
@@ -1,22 +0,0 @@
----
-
-dmcrypt: True
-ceph_dev: True
-cluster: test
-public_network: "192.168.1.0/24"
-cluster_network: "192.168.2.0/24"
-monitor_interface: eth1
-journal_size: 100
-osd_objectstore: "bluestore"
-ceph_origin: 'repository'
-ceph_repository: 'dev'
-copy_admin_key: false
-os_tuning_params:
- - { name: kernel.pid_max, value: 4194303 }
- - { name: fs.file-max, value: 26234859 }
-ceph_conf_overrides:
- global:
- osd_pool_default_pg_num: 8
- osd_pool_default_size: 1
- osd:
- osd_dmcrypt_type: luks
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/host_vars/osd0.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/host_vars/osd0.yml
deleted file mode 100644
index 2e1c7ee9e89..00000000000
--- a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/host_vars/osd0.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-
-devices:
- - '/dev/sdb'
-dedicated_devices:
- - '/dev/sdc'
-osd_scenario: "non-collocated"
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/host_vars/osd1.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/host_vars/osd1.yml
deleted file mode 100644
index 7e90071c9b1..00000000000
--- a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/host_vars/osd1.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-
-devices:
- - '/dev/sdb'
- - '/dev/sdc'
-osd_scenario: "collocated"
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/hosts b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/hosts
deleted file mode 100644
index e0c08b94659..00000000000
--- a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/hosts
+++ /dev/null
@@ -1,9 +0,0 @@
-[mons]
-mon0 monitor_interface=eth1
-
-[osds]
-osd0
-osd1
-
-[mgrs]
-mon0
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/test.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/test.yml
deleted file mode 100644
index 55ae7cc8eb9..00000000000
--- a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/test.yml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-
-- hosts: osds
- become: yes
- tasks:
-
- - name: scan all running OSDs
- command: "ceph-volume --cluster={{ cluster }} simple scan"
- environment:
- CEPH_VOLUME_DEBUG: 1
-
- - name: activate all scanned OSDs
- command: "ceph-volume --cluster={{ cluster }} simple activate --all"
- environment:
- CEPH_VOLUME_DEBUG: 1
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/vagrant_variables.yml
deleted file mode 100644
index 63700c3c902..00000000000
--- a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-luks/vagrant_variables.yml
+++ /dev/null
@@ -1,73 +0,0 @@
----
-
-# DEPLOY CONTAINERIZED DAEMONS
-docker: false
-
-# DEFINE THE NUMBER OF VMS TO RUN
-mon_vms: 1
-osd_vms: 2
-mds_vms: 0
-rgw_vms: 0
-nfs_vms: 0
-rbd_mirror_vms: 0
-client_vms: 0
-iscsi_gw_vms: 0
-mgr_vms: 0
-
-
-# INSTALL SOURCE OF CEPH
-# valid values are 'stable' and 'dev'
-ceph_install_source: stable
-
-# SUBNETS TO USE FOR THE VMS
-public_subnet: 192.168.1
-cluster_subnet: 192.168.2
-
-# MEMORY
-# set 1024 for CentOS
-memory: 512
-
-# Ethernet interface name
-# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
-eth: 'eth1'
-
-# Disks
-# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
-# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
-disks: "[ '/dev/sdb', '/dev/sdc' ]"
-
-# VAGRANT BOX
-# Ceph boxes are *strongly* suggested. They are under better control and will
-# not get updated frequently unless required for build systems. These are (for
-# now):
-#
-# * ceph/ubuntu-xenial
-#
-# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
-# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
-# libvirt CentOS: centos/7
-# parallels Ubuntu: parallels/ubuntu-14.04
-# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
-# For more boxes have a look at:
-# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
-# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
-vagrant_box: centos/7
-#ssh_private_key_path: "~/.ssh/id_rsa"
-# The sync directory changes based on vagrant box
-# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
-#vagrant_sync_dir: /home/vagrant/sync
-#vagrant_sync_dir: /
-# Disables synced folder creation. Not needed for testing, will skip mounting
-# the vagrant directory on the remote box regardless of the provider.
-vagrant_disable_synced_folder: true
-# VAGRANT URL
-# This is a URL to download an image from an alternate location. vagrant_box
-# above should be set to the filename of the image.
-# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
-# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
-# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
-
-os_tuning_params:
- - { name: kernel.pid_max, value: 4194303 }
- - { name: fs.file-max, value: 26234859 }
-
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/Vagrantfile
deleted file mode 120000
index 16076e42452..00000000000
--- a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/Vagrantfile
+++ /dev/null
@@ -1 +0,0 @@
-../../../../Vagrantfile \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/group_vars/all
deleted file mode 100644
index 30bcf5be7c6..00000000000
--- a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/group_vars/all
+++ /dev/null
@@ -1,22 +0,0 @@
----
-
-dmcrypt: True
-ceph_dev: True
-cluster: test
-public_network: "192.168.1.0/24"
-cluster_network: "192.168.2.0/24"
-monitor_interface: eth1
-journal_size: 100
-osd_objectstore: "bluestore"
-ceph_origin: 'repository'
-ceph_repository: 'dev'
-copy_admin_key: false
-os_tuning_params:
- - { name: kernel.pid_max, value: 4194303 }
- - { name: fs.file-max, value: 26234859 }
-ceph_conf_overrides:
- global:
- osd_pool_default_pg_num: 8
- osd_pool_default_size: 1
- osd:
- osd_dmcrypt_type: plain
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/host_vars/osd0.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/host_vars/osd0.yml
deleted file mode 100644
index 2e1c7ee9e89..00000000000
--- a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/host_vars/osd0.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-
-devices:
- - '/dev/sdb'
-dedicated_devices:
- - '/dev/sdc'
-osd_scenario: "non-collocated"
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/host_vars/osd1.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/host_vars/osd1.yml
deleted file mode 100644
index 7e90071c9b1..00000000000
--- a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/host_vars/osd1.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-
-devices:
- - '/dev/sdb'
- - '/dev/sdc'
-osd_scenario: "collocated"
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/hosts b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/hosts
deleted file mode 100644
index e0c08b94659..00000000000
--- a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/hosts
+++ /dev/null
@@ -1,9 +0,0 @@
-[mons]
-mon0 monitor_interface=eth1
-
-[osds]
-osd0
-osd1
-
-[mgrs]
-mon0
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/test.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/test.yml
deleted file mode 100644
index 24e2c0353c9..00000000000
--- a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/test.yml
+++ /dev/null
@@ -1,31 +0,0 @@
----
-
-- hosts: osds
- become: yes
- tasks:
-
- - name: list all OSD directories
- find:
- paths: /var/lib/ceph/osd
- file_type: directory
- register: osd_paths
-
- - name: scan all OSD directories
- command: "ceph-volume --cluster={{ cluster }} simple scan {{ item.path }}"
- environment:
- CEPH_VOLUME_DEBUG: 1
- with_items:
- - "{{ osd_paths.files }}"
-
- - name: list all OSD JSON files
- find:
- paths: /etc/ceph/osd
- file_type: file
- register: osd_configs
-
- - name: activate all scanned OSDs
- command: "ceph-volume --cluster={{ cluster }} simple activate --file {{ item.path }}"
- environment:
- CEPH_VOLUME_DEBUG: 1
- with_items:
- - "{{ osd_configs.files }}"
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/vagrant_variables.yml
deleted file mode 100644
index 63700c3c902..00000000000
--- a/src/ceph-volume/ceph_volume/tests/functional/simple/centos7/bluestore/dmcrypt-plain/vagrant_variables.yml
+++ /dev/null
@@ -1,73 +0,0 @@
----
-
-# DEPLOY CONTAINERIZED DAEMONS
-docker: false
-
-# DEFINE THE NUMBER OF VMS TO RUN
-mon_vms: 1
-osd_vms: 2
-mds_vms: 0
-rgw_vms: 0
-nfs_vms: 0
-rbd_mirror_vms: 0
-client_vms: 0
-iscsi_gw_vms: 0
-mgr_vms: 0
-
-
-# INSTALL SOURCE OF CEPH
-# valid values are 'stable' and 'dev'
-ceph_install_source: stable
-
-# SUBNETS TO USE FOR THE VMS
-public_subnet: 192.168.1
-cluster_subnet: 192.168.2
-
-# MEMORY
-# set 1024 for CentOS
-memory: 512
-
-# Ethernet interface name
-# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial
-eth: 'eth1'
-
-# Disks
-# For libvirt use disks: "[ '/dev/vdb', '/dev/vdc' ]"
-# For CentOS7 use disks: "[ '/dev/sda', '/dev/sdb' ]"
-disks: "[ '/dev/sdb', '/dev/sdc' ]"
-
-# VAGRANT BOX
-# Ceph boxes are *strongly* suggested. They are under better control and will
-# not get updated frequently unless required for build systems. These are (for
-# now):
-#
-# * ceph/ubuntu-xenial
-#
-# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64
-# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet
-# libvirt CentOS: centos/7
-# parallels Ubuntu: parallels/ubuntu-14.04
-# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller'
-# For more boxes have a look at:
-# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q=
-# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/
-vagrant_box: centos/7
-#ssh_private_key_path: "~/.ssh/id_rsa"
-# The sync directory changes based on vagrant box
-# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant
-#vagrant_sync_dir: /home/vagrant/sync
-#vagrant_sync_dir: /
-# Disables synced folder creation. Not needed for testing, will skip mounting
-# the vagrant directory on the remote box regardless of the provider.
-vagrant_disable_synced_folder: true
-# VAGRANT URL
-# This is a URL to download an image from an alternate location. vagrant_box
-# above should be set to the filename of the image.
-# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
-# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box
-# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box
-
-os_tuning_params:
- - { name: kernel.pid_max, value: 4194303 }
- - { name: fs.file-max, value: 26234859 }
-
diff --git a/src/ceph-volume/ceph_volume/tests/functional/simple/tox.ini b/src/ceph-volume/ceph_volume/tests/functional/simple/tox.ini
deleted file mode 100644
index c910754c337..00000000000
--- a/src/ceph-volume/ceph_volume/tests/functional/simple/tox.ini
+++ /dev/null
@@ -1,56 +0,0 @@
-[tox]
-envlist = centos7-bluestore-{activate,dmcrypt_plain,dmcrypt_luks}
-skipsdist = True
-
-[testenv]
-deps = mock
-allowlist_externals =
- vagrant
- bash
- git
- sleep
- cp
-passenv=*
-setenv=
- ANSIBLE_CONFIG = {envdir}/tmp/ceph-ansible/ansible.cfg
- ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey
- ANSIBLE_STDOUT_CALLBACK = debug
- VAGRANT_CWD = {changedir}
- CEPH_VOLUME_DEBUG = 1
- DEBIAN_FRONTEND=noninteractive
-changedir=
- centos7-bluestore-activate: {toxinidir}/centos7/bluestore/activate
- centos7-bluestore-dmcrypt_plain: {toxinidir}/centos7/bluestore/dmcrypt-plain
- centos7-bluestore-dmcrypt_luks: {toxinidir}/centos7/bluestore/dmcrypt-luks
-commands=
- git clone -b {env:CEPH_ANSIBLE_BRANCH:master} --single-branch https://github.com/ceph/ceph-ansible.git {envdir}/tmp/ceph-ansible
- pip install -r {envdir}/tmp/ceph-ansible/tests/requirements.txt
- ansible-galaxy install -r {envdir}/tmp/ceph-ansible/requirements.yml -v
-
- bash {toxinidir}/../scripts/vagrant_up.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox}
- bash {toxinidir}/../scripts/generate_ssh_config.sh {changedir}
-
- cp {toxinidir}/../playbooks/deploy.yml {envdir}/tmp/ceph-ansible
-
- # use ceph-ansible to deploy a ceph cluster on the vms
- ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/deploy.yml --extra-vars "fetch_directory={changedir}/fetch ceph_dev_branch={env:CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} toxinidir={toxinidir}"
-
- # prepare nodes for testing with testinfra
- ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml
-
- # test cluster state testinfra
- py.test --reruns 5 --reruns-delay 10 -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
-
- # make ceph-volume simple take over all the OSDs that got deployed, disabling ceph-disk
- ansible-playbook -vv -i {changedir}/hosts {changedir}/test.yml
-
- # reboot all vms
- bash {toxinidir}/../scripts/vagrant_reload.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox}
-
- # wait 2 minutes for services to be ready
- sleep 120
-
- # retest to ensure cluster came back up correctly after rebooting
- py.test --reruns 5 --reruns-delay 10 -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests
-
- vagrant destroy {env:VAGRANT_DESTROY_FLAGS:"--force"}
diff --git a/src/ceph-volume/ceph_volume/tests/objectstore/test_baseobjectstore.py b/src/ceph-volume/ceph_volume/tests/objectstore/test_baseobjectstore.py
new file mode 100644
index 00000000000..248adf66e9e
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/objectstore/test_baseobjectstore.py
@@ -0,0 +1,162 @@
+import pytest
+from mock.mock import patch, Mock, call
+from ceph_volume.objectstore.baseobjectstore import BaseObjectStore
+from ceph_volume.util import system
+
+
+@patch('ceph_volume.objectstore.baseobjectstore.prepare_utils.create_key', Mock(return_value=['AQCee6ZkzhOrJRAAZWSvNC3KdXOpC2w8ly4AZQ==']))
+class TestBaseObjectStore:
+ def test_init_dmcrypt(self, factory):
+ args = factory(dmcrypt=True)
+ bo = BaseObjectStore(args)
+ assert bo.encrypted == 1
+ assert bo.cephx_lockbox_secret == ['AQCee6ZkzhOrJRAAZWSvNC3KdXOpC2w8ly4AZQ==']
+ assert bo.secrets['cephx_lockbox_secret'] == ['AQCee6ZkzhOrJRAAZWSvNC3KdXOpC2w8ly4AZQ==']
+
+ @patch('ceph_volume.process.call', Mock(return_value=(['c6798f59-01'], '', 0)))
+ def test_get_ptuuid_ok(self):
+ """
+ Test that the ptuuid is returned
+ """
+ assert BaseObjectStore([]).get_ptuuid('/dev/sda') == 'c6798f59-01'
+
+ @patch('ceph_volume.process.call', Mock(return_value=('', '', 0)))
+ def test_get_ptuuid_raises_runtime_error(self, capsys):
+ """
+ Test that the ptuuid is returned
+ """
+ with pytest.raises(RuntimeError) as error:
+ bo = BaseObjectStore([])
+ bo.get_ptuuid('/dev/sda')
+ stdout, stderr = capsys.readouterr()
+ assert 'blkid could not detect a PARTUUID for device: /dev/sda' in stderr
+ assert str(error.value) == 'unable to use device'
+
+ @patch.dict('os.environ', {'CEPH_VOLUME_OSDSPEC_AFFINITY': 'foo'})
+ def test_get_osdspec_affinity(self):
+ assert BaseObjectStore([]).get_osdspec_affinity() == 'foo'
+
+ def test_pre_prepare(self):
+ with pytest.raises(NotImplementedError):
+ BaseObjectStore([]).pre_prepare()
+
+ def test_prepare_data_device(self):
+ with pytest.raises(NotImplementedError):
+ BaseObjectStore([]).prepare_data_device('foo', 'bar')
+
+ def test_safe_prepare(self):
+ with pytest.raises(NotImplementedError):
+ BaseObjectStore([]).safe_prepare(args=None)
+
+ def test_add_objectstore_opts(self):
+ with pytest.raises(NotImplementedError):
+ BaseObjectStore([]).add_objectstore_opts()
+
+ @patch('ceph_volume.util.prepare.create_osd_path')
+ @patch('ceph_volume.util.prepare.link_block')
+ @patch('ceph_volume.util.prepare.get_monmap')
+ @patch('ceph_volume.util.prepare.write_keyring')
+ def test_prepare_osd_req(self, m_write_keyring, m_get_monmap, m_link_block, m_create_osd_path):
+ bo = BaseObjectStore([])
+ bo.osd_id = '123'
+ bo.block_device_path = '/dev/foo'
+ bo.prepare_osd_req()
+ assert m_create_osd_path.mock_calls == [call('123', tmpfs=True)]
+ assert m_link_block.mock_calls == [call('/dev/foo', '123')]
+ assert m_get_monmap.mock_calls == [call('123')]
+ assert m_write_keyring.mock_calls == [call('123', ['AQCee6ZkzhOrJRAAZWSvNC3KdXOpC2w8ly4AZQ=='])]
+
+ def test_prepare(self):
+ with pytest.raises(NotImplementedError):
+ BaseObjectStore([]).prepare()
+
+ def test_prepare_dmcrypt(self):
+ with pytest.raises(NotImplementedError):
+ BaseObjectStore([]).prepare_dmcrypt()
+
+ def test_cluster_fsid_from_args(self, factory):
+ args = factory(cluster_fsid='abcd')
+ bo = BaseObjectStore(args)
+ assert bo.get_cluster_fsid() == 'abcd'
+
+ def test_cluster_fsid_from_conf(self, conf_ceph_stub, factory):
+ args = factory(cluster_fsid=None)
+ conf_ceph_stub('[global]\nfsid = abcd-123')
+ bo = BaseObjectStore([])
+ bo.args = args
+ assert bo.get_cluster_fsid() == 'abcd-123'
+
+ @patch('ceph_volume.conf.cluster', 'ceph')
+ def test_get_osd_path(self):
+ bo = BaseObjectStore([])
+ bo.osd_id = '123'
+ assert bo.get_osd_path() == '/var/lib/ceph/osd/ceph-123/'
+
+ @patch('ceph_volume.conf.cluster', 'ceph')
+ def test_build_osd_mkfs_cmd_base(self):
+ bo = BaseObjectStore([])
+ bo.osd_path = '/var/lib/ceph/osd/ceph-123/'
+ bo.osd_fsid = 'abcd-1234'
+ bo.objectstore = 'my-fake-objectstore'
+ bo.osd_id = '123'
+ bo.monmap = '/etc/ceph/ceph.monmap'
+ result = bo.build_osd_mkfs_cmd()
+
+ assert result == ['ceph-osd',
+ '--cluster',
+ 'ceph',
+ '--osd-objectstore',
+ 'my-fake-objectstore',
+ '--mkfs', '-i', '123',
+ '--monmap',
+ '/etc/ceph/ceph.monmap',
+ '--keyfile', '-',
+ '--osd-data',
+ '/var/lib/ceph/osd/ceph-123/',
+ '--osd-uuid', 'abcd-1234',
+ '--setuser', 'ceph',
+ '--setgroup', 'ceph']
+
+ def test_osd_mkfs_ok(self, monkeypatch, fake_call):
+ bo = BaseObjectStore([])
+ bo.get_osd_path = lambda: '/var/lib/ceph/osd/ceph-123/'
+ bo.build_osd_mkfs_cmd = lambda: ['ceph-osd', '--mkfs', 'some', 'fake', 'args']
+ monkeypatch.setattr(system, 'chown', lambda path: 0)
+ bo.osd_mkfs()
+ assert fake_call.calls == [
+ {
+ 'args': (['ceph-osd',
+ '--mkfs',
+ 'some',
+ 'fake',
+ 'args'],),
+ 'kwargs': {
+ 'stdin': ['AQCee6ZkzhOrJRAAZWSvNC3KdXOpC2w8ly4AZQ=='],
+ 'terminal_verbose': True,
+ 'show_command': True}
+ }
+ ]
+
+ @patch('ceph_volume.process.call', Mock(return_value=([], [], 999)))
+ def test_osd_mkfs_fails(self, monkeypatch):
+ bo = BaseObjectStore([])
+ bo.get_osd_path = lambda: '/var/lib/ceph/osd/ceph-123/'
+ bo.build_osd_mkfs_cmd = lambda: ['ceph-osd', '--mkfs', 'some', 'fake', 'args']
+ monkeypatch.setattr(system, 'chown', lambda path: 0)
+ with pytest.raises(RuntimeError) as error:
+ bo.osd_mkfs()
+ assert str(error.value) == 'Command failed with exit code 999: ceph-osd --mkfs some fake args'
+
+ @patch('time.sleep', Mock())
+ @patch('ceph_volume.process.call', return_value=([], [], 11))
+ def test_osd_mkfs_fails_EWOULDBLOCK(self, m_call, monkeypatch):
+ bo = BaseObjectStore([])
+ bo.get_osd_path = lambda: '/var/lib/ceph/osd/ceph-123/'
+ bo.build_osd_mkfs_cmd = lambda: ['ceph-osd', '--mkfs', 'some', 'fake', 'args']
+ monkeypatch.setattr(system, 'chown', lambda path: 0)
+ bo.osd_mkfs()
+ assert m_call.call_count == 5
+
+ def test_activate(self):
+ with pytest.raises(NotImplementedError):
+ BaseObjectStore([]).activate()
diff --git a/src/ceph-volume/ceph_volume/tests/objectstore/test_bluestore.py b/src/ceph-volume/ceph_volume/tests/objectstore/test_bluestore.py
new file mode 100644
index 00000000000..77bb383284e
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/objectstore/test_bluestore.py
@@ -0,0 +1,27 @@
+from mock import patch, Mock
+from ceph_volume.objectstore.bluestore import BlueStore
+
+
+class TestBlueStore:
+ @patch('ceph_volume.objectstore.baseobjectstore.prepare_utils.create_key', Mock(return_value=['AQCee6ZkzhOrJRAAZWSvNC3KdXOpC2w8ly4AZQ==']))
+ def setup_method(self, m_create_key):
+ self.b = BlueStore([])
+ self.b.osd_mkfs_cmd = ['binary', 'arg1']
+
+ def test_add_objectstore_opts_wal_device_path(self, monkeypatch):
+ monkeypatch.setattr('ceph_volume.util.system.chown', lambda path: 0)
+ self.b.wal_device_path = '/dev/nvme0n1'
+ self.b.add_objectstore_opts()
+ assert self.b.osd_mkfs_cmd == ['binary', 'arg1', '--bluestore-block-wal-path', '/dev/nvme0n1']
+
+ def test_add_objectstore_opts_db_device_path(self, monkeypatch):
+ monkeypatch.setattr('ceph_volume.util.system.chown', lambda path: 0)
+ self.b.db_device_path = '/dev/ssd1'
+ self.b.add_objectstore_opts()
+ assert self.b.osd_mkfs_cmd == ['binary', 'arg1', '--bluestore-block-db-path', '/dev/ssd1']
+
+ def test_add_objectstore_opts_osdspec_affinity(self, monkeypatch):
+ monkeypatch.setattr('ceph_volume.util.system.chown', lambda path: 0)
+ self.b.get_osdspec_affinity = lambda: 'foo'
+ self.b.add_objectstore_opts()
+ assert self.b.osd_mkfs_cmd == ['binary', 'arg1', '--osdspec-affinity', 'foo'] \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/objectstore/test_lvmbluestore.py b/src/ceph-volume/ceph_volume/tests/objectstore/test_lvmbluestore.py
new file mode 100644
index 00000000000..45fbd3005b6
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/objectstore/test_lvmbluestore.py
@@ -0,0 +1,571 @@
+import pytest
+from mock import patch, Mock, MagicMock, call
+from ceph_volume.objectstore.lvmbluestore import LvmBlueStore
+from ceph_volume.api.lvm import Volume
+from ceph_volume.util import system
+
+
+class TestLvmBlueStore:
+ @patch('ceph_volume.objectstore.lvmbluestore.prepare_utils.create_key', Mock(return_value=['AQCee6ZkzhOrJRAAZWSvNC3KdXOpC2w8ly4AZQ==']))
+ def setup_method(self, m_create_key):
+ self.lvm_bs = LvmBlueStore([])
+
+ @patch('ceph_volume.conf.cluster', 'ceph')
+ @patch('ceph_volume.api.lvm.get_single_lv')
+ @patch('ceph_volume.objectstore.lvmbluestore.prepare_utils.create_id', Mock(return_value='111'))
+ @patch('ceph_volume.objectstore.lvmbluestore.encryption_utils.create_dmcrypt_key', Mock(return_value='fake-dmcrypt-key'))
+ def test_pre_prepare_lv(self, m_get_single_lv, factory):
+ args = factory(cluster_fsid='abcd',
+ osd_fsid='abc123',
+ crush_device_class='ssd',
+ osd_id='111',
+ data='vg_foo/lv_foo')
+ m_get_single_lv.return_value = Volume(lv_name='lv_foo',
+ lv_path='/fake-path',
+ vg_name='vg_foo',
+ lv_tags='',
+ lv_uuid='fake-uuid')
+ self.lvm_bs.encrypted = True
+ self.lvm_bs.args = args
+ self.lvm_bs.pre_prepare()
+ assert self.lvm_bs.secrets['dmcrypt_key'] == 'fake-dmcrypt-key'
+ assert self.lvm_bs.secrets['crush_device_class'] == 'ssd'
+ assert self.lvm_bs.osd_id == '111'
+ assert self.lvm_bs.block_device_path == '/fake-path'
+ assert self.lvm_bs.tags == {'ceph.osd_fsid': 'abc123',
+ 'ceph.osd_id': '111',
+ 'ceph.cluster_fsid': 'abcd',
+ 'ceph.cluster_name': 'ceph',
+ 'ceph.crush_device_class': 'ssd',
+ 'ceph.osdspec_affinity': '',
+ 'ceph.block_device': '/fake-path',
+ 'ceph.block_uuid': 'fake-uuid',
+ 'ceph.cephx_lockbox_secret': '',
+ 'ceph.encrypted': True,
+ 'ceph.vdo': '0'}
+
+ @patch('ceph_volume.objectstore.lvmbluestore.prepare_utils.create_id', Mock(return_value='111'))
+ @patch('ceph_volume.objectstore.lvmbluestore.encryption_utils.create_dmcrypt_key', Mock(return_value='fake-dmcrypt-key'))
+ def test_pre_prepare_no_lv(self, factory):
+ args = factory(cluster_fsid='abcd',
+ osd_fsid='abc123',
+ crush_device_class='ssd',
+ osd_id='111',
+ data='/dev/foo')
+ self.lvm_bs.prepare_data_device = lambda x, y: Volume(lv_name='lv_foo',
+ lv_path='/fake-path',
+ vg_name='vg_foo',
+ lv_tags='',
+ lv_uuid='fake-uuid')
+ self.lvm_bs.encrypted = True
+ self.lvm_bs.args = args
+ self.lvm_bs.pre_prepare()
+ assert self.lvm_bs.secrets['dmcrypt_key'] == 'fake-dmcrypt-key'
+ assert self.lvm_bs.secrets['crush_device_class'] == 'ssd'
+ assert self.lvm_bs.osd_id == '111'
+ assert self.lvm_bs.block_device_path == '/fake-path'
+ assert self.lvm_bs.tags == {'ceph.osd_fsid': 'abc123',
+ 'ceph.osd_id': '111',
+ 'ceph.cluster_fsid': 'abcd',
+ 'ceph.cluster_name': None,
+ 'ceph.crush_device_class': 'ssd',
+ 'ceph.osdspec_affinity': '',
+ 'ceph.block_device': '/fake-path',
+ 'ceph.block_uuid': 'fake-uuid',
+ 'ceph.cephx_lockbox_secret': '',
+ 'ceph.encrypted': True,
+ 'ceph.vdo': '0'}
+
+ @patch('ceph_volume.util.disk.is_partition', Mock(return_value=True))
+ @patch('ceph_volume.api.lvm.create_lv')
+ def test_prepare_data_device(self, m_create_lv, factory):
+ args = factory(data='/dev/foo',
+ data_slots=1,
+ data_size=102400)
+ self.lvm_bs.args = args
+ m_create_lv.return_value = Volume(lv_name='lv_foo',
+ lv_path='/fake-path',
+ vg_name='vg_foo',
+ lv_tags='',
+ lv_uuid='abcd')
+ assert self.lvm_bs.prepare_data_device('block', 'abcd') == m_create_lv.return_value
+ assert self.lvm_bs.args.data_size == 102400
+
+ @patch('ceph_volume.util.disk.is_device', Mock(return_value=False))
+ @patch('ceph_volume.util.disk.is_partition', Mock(return_value=False))
+ def test_prepare_data_device_fails(self, factory):
+ args = factory(data='/dev/foo')
+ self.lvm_bs.args = args
+ with pytest.raises(RuntimeError) as error:
+ self.lvm_bs.prepare_data_device('block', 'abcd')
+ assert ('Cannot use device (/dev/foo). '
+ 'A vg/lv path or an existing device is needed') == str(error.value)
+
+ @patch('ceph_volume.api.lvm.is_ceph_device', Mock(return_value=True))
+ @patch('ceph_volume.api.lvm.get_single_lv')
+ def test_safe_prepare_is_ceph_device(self, m_get_single_lv, factory):
+ args = factory(data='/dev/foo')
+ self.lvm_bs.args = args
+ m_get_single_lv.return_value = Volume(lv_name='lv_foo',
+ lv_path='/fake-path',
+ vg_name='vg_foo',
+ lv_tags='',
+ lv_uuid='fake-uuid')
+ self.lvm_bs.prepare = MagicMock()
+ with pytest.raises(RuntimeError) as error:
+ self.lvm_bs.safe_prepare(args)
+ assert str(error.value) == 'skipping /dev/foo, it is already prepared'
+
+ @patch('ceph_volume.api.lvm.is_ceph_device', Mock(return_value=False))
+ @patch('ceph_volume.api.lvm.get_single_lv')
+ def test_safe_prepare(self, m_get_single_lv, factory):
+ args = factory(data='vg_foo/lv_foo')
+ self.lvm_bs.args = args
+ m_get_single_lv.return_value = Volume(lv_name='lv_foo',
+ lv_path='/fake-path',
+ vg_name='vg_foo',
+ lv_tags='',
+ lv_uuid='fake-uuid')
+ self.lvm_bs.prepare = MagicMock()
+ self.lvm_bs.safe_prepare()
+ assert self.lvm_bs.prepare.called
+
+ @patch('ceph_volume.objectstore.lvmbluestore.LvmBlueStore.prepare', Mock(side_effect=Exception))
+ @patch('ceph_volume.api.lvm.is_ceph_device', Mock(return_value=False))
+ # @patch('ceph_volume.devices.lvm.common.rollback_osd')
+ @patch('ceph_volume.objectstore.lvmbluestore.rollback_osd')
+ @patch('ceph_volume.api.lvm.get_single_lv')
+ def test_safe_prepare_raises_exception(self, m_get_single_lv, m_rollback_osd, factory):
+ args = factory(data='/dev/foo')
+ self.lvm_bs.args = args
+ self.lvm_bs.osd_id = '111'
+ m_get_single_lv.return_value = Volume(lv_name='lv_foo',
+ lv_path='/fake-path',
+ vg_name='vg_foo',
+ lv_tags='',
+ lv_uuid='fake-uuid')
+ m_rollback_osd.return_value = MagicMock()
+ with pytest.raises(Exception):
+ self.lvm_bs.safe_prepare()
+ assert m_rollback_osd.mock_calls == [call(self.lvm_bs.args, '111')]
+
+ @patch('ceph_volume.objectstore.baseobjectstore.BaseObjectStore.get_ptuuid', Mock(return_value='c6798f59-01'))
+ @patch('ceph_volume.api.lvm.Volume.set_tags', MagicMock())
+ @patch('ceph_volume.api.lvm.get_single_lv')
+ def test_prepare(self, m_get_single_lv, is_root, factory):
+ m_get_single_lv.return_value = Volume(lv_name='lv_foo',
+ lv_path='/fake-path',
+ vg_name='vg_foo',
+ lv_tags='',
+ lv_uuid='fake-uuid')
+ args = factory(data='vg_foo/lv_foo',
+ block_wal='/dev/foo1',
+ block_db='/dev/foo2',
+ block_wal_size=123,
+ block_db_size=123,
+ block_wal_slots=1,
+ block_db_slots=1,
+ )
+ self.lvm_bs.args = args
+ self.lvm_bs.pre_prepare = lambda: None
+ self.lvm_bs.block_lv = MagicMock()
+ self.lvm_bs.prepare_osd_req = MagicMock()
+ self.lvm_bs.osd_mkfs = MagicMock()
+ self.lvm_bs.prepare_dmcrypt = MagicMock()
+ self.lvm_bs.secrets['dmcrypt_key'] = 'fake-secret'
+ self.lvm_bs.prepare()
+ assert self.lvm_bs.wal_device_path == '/dev/foo1'
+ assert self.lvm_bs.db_device_path == '/dev/foo2'
+ assert self.lvm_bs.block_lv.set_tags.mock_calls == [call({'ceph.type': 'block', 'ceph.vdo': '0', 'ceph.wal_uuid': 'c6798f59-01', 'ceph.wal_device': '/dev/foo1', 'ceph.db_uuid': 'c6798f59-01', 'ceph.db_device': '/dev/foo2'})]
+ assert self.lvm_bs.prepare_dmcrypt.called
+ assert self.lvm_bs.osd_mkfs.called
+ assert self.lvm_bs.prepare_osd_req.called
+
+ def test_prepare_dmcrypt(self):
+ self.lvm_bs.secrets = {'dmcrypt_key': 'fake-secret'}
+ self.lvm_bs.tags = {'ceph.block_uuid': 'block-uuid1',
+ 'ceph.db_uuid': 'db-uuid2',
+ 'ceph.wal_uuid': 'wal-uuid3'}
+ self.lvm_bs.luks_format_and_open = lambda *a: f'/dev/mapper/{a[3]["ceph."+a[2]+"_uuid"]}'
+ self.lvm_bs.prepare_dmcrypt()
+ assert self.lvm_bs.block_device_path == '/dev/mapper/block-uuid1'
+ assert self.lvm_bs.db_device_path == '/dev/mapper/db-uuid2'
+ assert self.lvm_bs.wal_device_path == '/dev/mapper/wal-uuid3'
+
+ @patch('ceph_volume.objectstore.lvmbluestore.encryption_utils.luks_open')
+ @patch('ceph_volume.objectstore.lvmbluestore.encryption_utils.luks_format')
+ def test_luks_format_and_open(self, m_luks_format, m_luks_open):
+ result = self.lvm_bs.luks_format_and_open('key',
+ '/dev/foo',
+ 'block',
+ {'ceph.block_uuid': 'block-uuid1'})
+ assert result == '/dev/mapper/block-uuid1'
+
+ def test_luks_format_and_open_not_device(self):
+ result = self.lvm_bs.luks_format_and_open('key',
+ '',
+ 'block',
+ {})
+ assert result == ''
+
+ def test_setup_device_is_none(self):
+ result = self.lvm_bs.setup_device('block',
+ None,
+ {},
+ 1,
+ 1)
+ assert result == ('', '', {})
+
+ @patch('ceph_volume.api.lvm.Volume.set_tags', return_value=MagicMock())
+ @patch('ceph_volume.util.system.generate_uuid',
+ Mock(return_value='d83fa1ca-bd68-4c75-bdc2-464da58e8abd'))
+ @patch('ceph_volume.api.lvm.create_lv')
+ @patch('ceph_volume.util.disk.is_device', Mock(return_value=True))
+ def test_setup_device_is_device(self, m_create_lv, m_set_tags):
+ m_create_lv.return_value = Volume(lv_name='lv_foo',
+ lv_path='/fake-path',
+ vg_name='vg_foo',
+ lv_tags='',
+ lv_uuid='fake-uuid')
+ result = self.lvm_bs.setup_device('block',
+ '/dev/foo',
+ {},
+ 1,
+ 1)
+ assert m_create_lv.mock_calls == [call('osd-block',
+ 'd83fa1ca-bd68-4c75-bdc2-464da58e8abd',
+ device='/dev/foo',
+ tags={'ceph.type': 'block',
+ 'ceph.vdo': '0',
+ 'ceph.block_device': '/fake-path',
+ 'ceph.block_uuid': 'fake-uuid'},
+ slots=1,
+ size=1)]
+ assert result == ('/fake-path',
+ 'fake-uuid',
+ {'ceph.type': 'block',
+ 'ceph.vdo': '0',
+ 'ceph.block_device': '/fake-path',
+ 'ceph.block_uuid': 'fake-uuid'
+ })
+
+ @patch('ceph_volume.api.lvm.get_single_lv')
+ @patch('ceph_volume.api.lvm.Volume.set_tags', return_value=MagicMock())
+ def test_setup_device_is_lv(self, m_set_tags, m_get_single_lv):
+ m_get_single_lv.return_value = Volume(lv_name='lv_foo',
+ lv_path='/fake-path',
+ vg_name='vg_foo',
+ lv_tags='',
+ lv_uuid='fake-uuid')
+ result = self.lvm_bs.setup_device('block',
+ 'vg_foo/lv_foo',
+ {},
+ 1,
+ 1)
+ assert result == ('/fake-path',
+ 'fake-uuid',
+ {'ceph.type': 'block',
+ 'ceph.vdo': '0',
+ 'ceph.block_device': '/fake-path',
+ 'ceph.block_uuid': 'fake-uuid'
+ })
+
+ @patch('ceph_volume.api.lvm.Volume.set_tags', return_value=MagicMock())
+ def test_setup_device_partition(self, m_set_tags):
+ self.lvm_bs.get_ptuuid = lambda x: 'c6798f59-01'
+ result = self.lvm_bs.setup_device('block',
+ '/dev/foo1',
+ {},
+ 1,
+ 1)
+ assert result == ('/dev/foo1',
+ 'c6798f59-01',
+ {'ceph.type': 'block',
+ 'ceph.vdo': '0',
+ 'ceph.block_uuid': 'c6798f59-01',
+ 'ceph.block_device': '/dev/foo1'})
+
+ def test_get_osd_device_path_lv_block(self):
+ lvs = [Volume(lv_name='lv_foo',
+ lv_path='/fake-path',
+ vg_name='vg_foo',
+ lv_tags='ceph.type=block,ceph.block_uuid=fake-block-uuid',
+ lv_uuid='fake-block-uuid')]
+ assert self.lvm_bs.get_osd_device_path(lvs, 'block') == '/fake-path'
+
+ @patch('ceph_volume.objectstore.lvmbluestore.encryption_utils.luks_open', MagicMock())
+ def test_get_osd_device_path_lv_block_encrypted(self):
+ lvs = [Volume(lv_name='lv_foo',
+ lv_path='/fake-path',
+ vg_name='vg_foo',
+ lv_tags='ceph.type=block,ceph.block_uuid=fake-block-uuid,ceph.encrypted=1',
+ lv_uuid='fake-block-uuid')]
+ assert self.lvm_bs.get_osd_device_path(lvs, 'block') == '/dev/mapper/fake-block-uuid'
+
+ def test_get_osd_device_path_lv_db(self):
+ lvs = [Volume(lv_name='lv_foo-block',
+ lv_path='/fake-block-path',
+ vg_name='vg_foo',
+ lv_tags='ceph.type=block,ceph.block_uuid=fake-block-uuid,ceph.db_uuid=fake-db-uuid',
+ lv_uuid='fake-block-uuid'),
+ Volume(lv_name='lv_foo-db',
+ lv_path='/fake-db-path',
+ vg_name='vg_foo_db',
+ lv_tags='ceph.type=db,ceph.block_uuid=fake-block-uuid,ceph.db_uuid=fake-db-uuid',
+ lv_uuid='fake-db-uuid')]
+ assert self.lvm_bs.get_osd_device_path(lvs, 'db') == '/fake-db-path'
+
+ def test_get_osd_device_path_no_device_uuid(self):
+ lvs = [Volume(lv_name='lv_foo-block',
+ lv_path='/fake-block-path',
+ vg_name='vg_foo',
+ lv_tags='ceph.type=block,ceph.block_uuid=fake-block-uuid',
+ lv_uuid='fake-block-uuid'),
+ Volume(lv_name='lv_foo-db',
+ lv_path='/fake-db-path',
+ vg_name='vg_foo_db',
+ lv_tags='ceph.type=db,ceph.block_uuid=fake-block-uuid',
+ lv_uuid='fake-db-uuid')]
+ assert not self.lvm_bs.get_osd_device_path(lvs, 'db')
+
+ @patch('ceph_volume.util.disk.get_device_from_partuuid')
+ @patch('ceph_volume.objectstore.lvmbluestore.encryption_utils.luks_open', MagicMock())
+ def test_get_osd_device_path_phys_encrypted(self, m_get_device_from_partuuid):
+ m_get_device_from_partuuid.return_value = '/dev/sda1'
+ lvs = [Volume(lv_name='lv_foo-block',
+ lv_path='/fake-block-path',
+ vg_name='vg_foo',
+ lv_tags='ceph.type=block,ceph.block_uuid=fake-block-uuid,ceph.db_uuid=fake-db-uuid,ceph.osd_id=0,ceph.osd_fsid=abcd,ceph.cluster_name=ceph,ceph.encrypted=1',
+ lv_uuid='fake-block-uuid')]
+ assert self.lvm_bs.get_osd_device_path(lvs, 'db') == '/dev/mapper/fake-db-uuid'
+
+ @patch('ceph_volume.util.disk.get_device_from_partuuid')
+ def test_get_osd_device_path_phys(self, m_get_device_from_partuuid):
+ m_get_device_from_partuuid.return_value = '/dev/sda1'
+ lvs = [Volume(lv_name='lv_foo-block',
+ lv_path='/fake-block-path',
+ vg_name='vg_foo',
+ lv_tags='ceph.type=block,ceph.block_uuid=fake-block-uuid,ceph.db_uuid=fake-db-uuid,ceph.osd_id=0,ceph.osd_fsid=abcd,ceph.cluster_name=ceph',
+ lv_uuid='fake-block-uuid')]
+ self.lvm_bs.get_osd_device_path(lvs, 'db')
+
+ @patch('ceph_volume.util.disk.get_device_from_partuuid')
+ def test_get_osd_device_path_phys_raises_exception(self, m_get_device_from_partuuid):
+ m_get_device_from_partuuid.return_value = ''
+ lvs = [Volume(lv_name='lv_foo-block',
+ lv_path='/fake-block-path',
+ vg_name='vg_foo',
+ lv_tags='ceph.type=block,ceph.block_uuid=fake-block-uuid,ceph.db_uuid=fake-db-uuid,ceph.osd_id=0,ceph.osd_fsid=abcd,ceph.cluster_name=ceph',
+ lv_uuid='fake-block-uuid')]
+ with pytest.raises(RuntimeError):
+ self.lvm_bs.get_osd_device_path(lvs, 'db')
+
+ def test__activate_raises_exception(self):
+ lvs = [Volume(lv_name='lv_foo-db',
+ lv_path='/fake-path',
+ vg_name='vg_foo',
+ lv_tags='ceph.type=db,ceph.db_uuid=fake-db-uuid',
+ lv_uuid='fake-db-uuid')]
+ with pytest.raises(RuntimeError) as error:
+ self.lvm_bs._activate(lvs)
+ assert str(error.value) == 'could not find a bluestore OSD to activate'
+
+ @patch('ceph_volume.objectstore.lvmbluestore.encryption_utils.write_lockbox_keyring', MagicMock())
+ @patch('ceph_volume.objectstore.lvmbluestore.encryption_utils.get_dmcrypt_key', MagicMock())
+ @patch('ceph_volume.objectstore.lvmbluestore.prepare_utils.create_osd_path')
+ @patch('ceph_volume.terminal.success')
+ @pytest.mark.parametrize("encrypted", ["ceph.encrypted=0", "ceph.encrypted=1"])
+ def test__activate(self,
+ m_success, m_create_osd_path,
+ monkeypatch, fake_run, fake_call, encrypted, conf_ceph_stub):
+ conf_ceph_stub('[global]\nfsid=asdf-lkjh')
+ monkeypatch.setattr(system, 'chown', lambda path: 0)
+ monkeypatch.setattr('ceph_volume.configuration.load', lambda: None)
+ monkeypatch.setattr('ceph_volume.util.system.path_is_mounted', lambda path: False)
+ m_create_osd_path.return_value = MagicMock()
+ m_success.return_value = MagicMock()
+ lvs = [Volume(lv_name='lv_foo-block',
+ lv_path='/fake-block-path',
+ vg_name='vg_foo',
+ lv_tags=f'ceph.type=block,ceph.db_uuid=fake-db-uuid,ceph.block_uuid=fake-block-uuid,ceph.wal_uuid=fake-wal-uuid,ceph.osd_id=0,ceph.osd_fsid=abcd,ceph.cluster_name=ceph,{encrypted},ceph.cephx_lockbox_secret=abcd',
+ lv_uuid='fake-block-uuid'),
+ Volume(lv_name='lv_foo-db',
+ lv_path='/fake-db-path',
+ vg_name='vg_foo_db',
+ lv_tags=f'ceph.type=db,ceph.db_uuid=fake-db-uuid,ceph.block_uuid=fake-block-uuid,ceph.wal_uuid=fake-wal-uuid,ceph.osd_id=0,ceph.osd_fsid=abcd,ceph.cluster_name=ceph,{encrypted},ceph.cephx_lockbox_secret=abcd',
+ lv_uuid='fake-db-uuid'),
+ Volume(lv_name='lv_foo-db',
+ lv_path='/fake-db-path',
+ vg_name='vg_foo_db',
+ lv_tags=f'ceph.type=wal,ceph.block_uuid=fake-block-uuid,ceph.wal_uuid=fake-wal-uuid,ceph.db_uuid=fake-db-uuid,ceph.osd_id=0,ceph.osd_fsid=abcd,ceph.cluster_name=ceph,{encrypted},ceph.cephx_lockbox_secret=abcd',
+ lv_uuid='fake-wal-uuid')]
+ self.lvm_bs._activate(lvs)
+ if encrypted == "ceph.encrypted=0":
+ assert fake_run.calls == [{'args': (['ceph-bluestore-tool', '--cluster=ceph',
+ 'prime-osd-dir', '--dev', '/fake-block-path',
+ '--path', '/var/lib/ceph/osd/ceph-0', '--no-mon-config'],),
+ 'kwargs': {}},
+ {'args': (['ln', '-snf', '/fake-block-path',
+ '/var/lib/ceph/osd/ceph-0/block'],),
+ 'kwargs': {}},
+ {'args': (['ln', '-snf', '/fake-db-path',
+ '/var/lib/ceph/osd/ceph-0/block.db'],),
+ 'kwargs': {}},
+ {'args': (['ln', '-snf', '/fake-db-path',
+ '/var/lib/ceph/osd/ceph-0/block.wal'],),
+ 'kwargs': {}},
+ {'args': (['systemctl', 'enable',
+ 'ceph-volume@lvm-0-abcd'],),
+ 'kwargs': {}},
+ {'args': (['systemctl', 'enable', '--runtime', 'ceph-osd@0'],),
+ 'kwargs': {}},
+ {'args': (['systemctl', 'start', 'ceph-osd@0'],),
+ 'kwargs': {}}]
+ else:
+ assert fake_run.calls == [{'args': (['ceph-bluestore-tool', '--cluster=ceph',
+ 'prime-osd-dir', '--dev', '/dev/mapper/fake-block-uuid',
+ '--path', '/var/lib/ceph/osd/ceph-0', '--no-mon-config'],),
+ 'kwargs': {}},
+ {'args': (['ln', '-snf', '/dev/mapper/fake-block-uuid',
+ '/var/lib/ceph/osd/ceph-0/block'],),
+ 'kwargs': {}},
+ {'args': (['ln', '-snf', '/dev/mapper/fake-db-uuid',
+ '/var/lib/ceph/osd/ceph-0/block.db'],),
+ 'kwargs': {}},
+ {'args': (['ln', '-snf', '/dev/mapper/fake-wal-uuid',
+ '/var/lib/ceph/osd/ceph-0/block.wal'],),
+ 'kwargs': {}},
+ {'args': (['systemctl', 'enable', 'ceph-volume@lvm-0-abcd'],),
+ 'kwargs': {}},
+ {'args': (['systemctl', 'enable', '--runtime', 'ceph-osd@0'],),
+ 'kwargs': {}},
+ {'args': (['systemctl', 'start', 'ceph-osd@0'],),
+ 'kwargs': {}}]
+ assert m_success.mock_calls == [call('ceph-volume lvm activate successful for osd ID: 0')]
+
+ @patch('ceph_volume.systemd.systemctl.osd_is_active', return_value=False)
+ def test_activate_all(self,
+ m_create_key,
+ mock_lvm_direct_report,
+ is_root,
+ factory,
+ fake_run):
+ args = factory(no_systemd=True)
+ self.lvm_bs.args = args
+ self.lvm_bs.activate = MagicMock()
+ self.lvm_bs.activate_all()
+ assert self.lvm_bs.activate.mock_calls == [call(args,
+ osd_id='1',
+ osd_fsid='824f7edf-371f-4b75-9231-4ab62a32d5c0'),
+ call(args,
+ osd_id='0',
+ osd_fsid='a0e07c5b-bee1-4ea2-ae07-cb89deda9b27')]
+
+ @patch('ceph_volume.systemd.systemctl.osd_is_active', return_value=False)
+ def test_activate_all_no_osd_found(self,
+ m_create_key,
+ is_root,
+ factory,
+ fake_run,
+ monkeypatch,
+ capsys):
+ monkeypatch.setattr('ceph_volume.objectstore.lvmbluestore.direct_report', lambda: {})
+ args = factory(no_systemd=True)
+ self.lvm_bs.args = args
+ self.lvm_bs.activate_all()
+ stdout, stderr = capsys.readouterr()
+ assert "Was unable to find any OSDs to activate" in stderr
+ assert "Verify OSDs are present with" in stderr
+
+ @patch('ceph_volume.systemd.systemctl.osd_is_active', return_value=True)
+ def test_activate_all_osd_is_active(self,
+ mock_lvm_direct_report,
+ is_root,
+ factory,
+ fake_run):
+ args = factory(no_systemd=False)
+ self.lvm_bs.args = args
+ self.lvm_bs.activate = MagicMock()
+ self.lvm_bs.activate_all()
+ assert self.lvm_bs.activate.mock_calls == []
+
+ @patch('ceph_volume.api.lvm.get_lvs')
+ def test_activate_osd_id_and_fsid(self,
+ m_get_lvs,
+ is_root,
+ factory):
+ args = factory(osd_id='1',
+ osd_fsid='824f7edf',
+ no_systemd=True)
+ lvs = [Volume(lv_name='lv_foo',
+ lv_path='/fake-path',
+ vg_name='vg_foo',
+ lv_tags=f'ceph.osd_id={args.osd_id},ceph.osd_fsid={args.osd_fsid}',
+ lv_uuid='fake-uuid')]
+ m_get_lvs.return_value = lvs
+ self.lvm_bs.args = args
+ self.lvm_bs._activate = MagicMock()
+ self.lvm_bs.activate()
+ assert self.lvm_bs._activate.mock_calls == [call(lvs, True, False)]
+ assert m_get_lvs.mock_calls == [call(tags={'ceph.osd_id': '1',
+ 'ceph.osd_fsid': '824f7edf'})]
+
+ @patch('ceph_volume.api.lvm.get_lvs')
+ def test_activate_not_osd_id_and_fsid(self,
+ m_get_lvs,
+ is_root,
+ factory):
+ args = factory(no_systemd=True,
+ osd_id=None,
+ osd_fsid='824f7edf')
+ lvs = [Volume(lv_name='lv_foo',
+ lv_path='/fake-path',
+ vg_name='vg_foo',
+ lv_tags='',
+ lv_uuid='fake-uuid')]
+ m_get_lvs.return_value = lvs
+ self.lvm_bs.args = args
+ self.lvm_bs._activate = MagicMock()
+ self.lvm_bs.activate()
+ assert self.lvm_bs._activate.mock_calls == [call(lvs, True, False)]
+ assert m_get_lvs.mock_calls == [call(tags={'ceph.osd_fsid': '824f7edf'})]
+
+ def test_activate_osd_id_and_not_fsid(self,
+ is_root,
+ factory):
+ args = factory(no_systemd=True,
+ osd_id='1',
+ osd_fsid=None)
+ self.lvm_bs.args = args
+ self.lvm_bs._activate = MagicMock()
+ with pytest.raises(RuntimeError) as error:
+ self.lvm_bs.activate()
+ assert str(error.value) == 'could not activate osd.1, please provide the osd_fsid too'
+
+ def test_activate_not_osd_id_and_not_fsid(self,
+ is_root,
+ factory):
+ args = factory(no_systemd=True,
+ osd_id=None,
+ osd_fsid=None)
+ self.lvm_bs.args = args
+ self.lvm_bs._activate = MagicMock()
+ with pytest.raises(RuntimeError) as error:
+ self.lvm_bs.activate()
+ assert str(error.value) == 'Please provide both osd_id and osd_fsid'
+
+ @patch('ceph_volume.api.lvm.get_lvs')
+ def test_activate_couldnt_find_osd(self,
+ m_get_lvs,
+ is_root,
+ factory):
+ args = factory(osd_id='1',
+ osd_fsid='824f7edf',
+ no_systemd=True)
+ lvs = []
+ m_get_lvs.return_value = lvs
+ self.lvm_bs.args = args
+ self.lvm_bs._activate = MagicMock()
+ with pytest.raises(RuntimeError) as error:
+ self.lvm_bs.activate()
+ assert str(error.value) == 'could not find osd.1 with osd_fsid 824f7edf' \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/objectstore/test_rawbluestore.py b/src/ceph-volume/ceph_volume/tests/objectstore/test_rawbluestore.py
new file mode 100644
index 00000000000..204dcdb6f2f
--- /dev/null
+++ b/src/ceph-volume/ceph_volume/tests/objectstore/test_rawbluestore.py
@@ -0,0 +1,159 @@
+import pytest
+from mock import patch, Mock, MagicMock, call
+from ceph_volume.objectstore.rawbluestore import RawBlueStore
+from ceph_volume.util import system
+
+
+class TestRawBlueStore:
+ @patch('ceph_volume.objectstore.rawbluestore.prepare_utils.create_key', Mock(return_value=['AQCee6ZkzhOrJRAAZWSvNC3KdXOpC2w8ly4AZQ==']))
+ def setup_method(self, m_create_key):
+ self.raw_bs = RawBlueStore([])
+
+ def test_prepare_dmcrypt(self,
+ device_info,
+ fake_call,
+ key_size):
+ self.raw_bs.secrets = {'dmcrypt_key': 'foo'}
+ self.raw_bs.block_device_path = '/dev/foo0'
+ self.raw_bs.db_device_path = '/dev/foo1'
+ self.raw_bs.wal_device_path = '/dev/foo2'
+ lsblk = {"TYPE": "disk",
+ "NAME": "foo0",
+ 'KNAME': 'foo0'}
+ device_info(lsblk=lsblk)
+ self.raw_bs.prepare_dmcrypt()
+ assert self.raw_bs.block_device_path == "/dev/mapper/ceph--foo0-block-dmcrypt"
+ assert self.raw_bs.db_device_path == "/dev/mapper/ceph--foo0-db-dmcrypt"
+ assert self.raw_bs.wal_device_path == "/dev/mapper/ceph--foo0-wal-dmcrypt"
+
+ @patch('ceph_volume.objectstore.rawbluestore.rollback_osd')
+ @patch('ceph_volume.objectstore.rawbluestore.RawBlueStore.prepare')
+ def test_safe_prepare_raises_exception(self,
+ m_prepare,
+ m_rollback_osd,
+ factory,
+ capsys):
+ m_prepare.side_effect = Exception
+ m_rollback_osd.return_value = MagicMock()
+ args = factory(osd_id='1')
+ self.raw_bs.args = args
+ self.raw_bs.osd_id = self.raw_bs.args.osd_id
+ with pytest.raises(Exception):
+ self.raw_bs.safe_prepare()
+ assert m_rollback_osd.mock_calls == [call(self.raw_bs.args, '1')]
+
+ @patch('ceph_volume.objectstore.rawbluestore.RawBlueStore.prepare', MagicMock())
+ def test_safe_prepare(self,
+ factory,
+ capsys):
+ args = factory(dmcrypt=True,
+ data='/dev/foo')
+ # self.raw_bs.args = args
+ self.raw_bs.safe_prepare(args)
+ stdout, stderr = capsys.readouterr()
+ assert "prepare successful for: /dev/foo" in stderr
+
+ # @patch('ceph_volume.objectstore.rawbluestore.prepare_utils.create_id')
+ # @patch('ceph_volume.objectstore.rawbluestore.system.generate_uuid', return_value='fake-uuid')
+ @patch.dict('os.environ', {'CEPH_VOLUME_DMCRYPT_SECRET': 'dmcrypt-key'})
+ @patch('ceph_volume.objectstore.rawbluestore.prepare_utils.create_id')
+ @patch('ceph_volume.objectstore.rawbluestore.system.generate_uuid')
+ def test_prepare(self, m_generate_uuid, m_create_id, is_root, factory):
+ m_generate_uuid.return_value = 'fake-uuid'
+ m_create_id.return_value = MagicMock()
+ self.raw_bs.prepare_dmcrypt = MagicMock()
+ self.raw_bs.prepare_osd_req = MagicMock()
+ self.raw_bs.osd_mkfs = MagicMock()
+ args = factory(crush_device_class='foo',
+ no_tmpfs=False,
+ block_wal='/dev/foo1',
+ block_db='/dev/foo2',)
+ self.raw_bs.args = args
+ self.raw_bs.secrets = dict()
+ self.raw_bs.encrypted = True
+ self.raw_bs.prepare()
+ assert self.raw_bs.prepare_osd_req.mock_calls == [call(tmpfs=True)]
+ assert self.raw_bs.osd_mkfs.called
+ assert self.raw_bs.prepare_dmcrypt.called
+
+ @patch('ceph_volume.conf.cluster', 'ceph')
+ @patch('ceph_volume.objectstore.rawbluestore.prepare_utils.link_wal')
+ @patch('ceph_volume.objectstore.rawbluestore.prepare_utils.link_db')
+ @patch('ceph_volume.objectstore.rawbluestore.prepare_utils.link_block')
+ @patch('os.path.exists')
+ @patch('os.unlink')
+ @patch('ceph_volume.objectstore.rawbluestore.prepare_utils.create_osd_path')
+ @patch('ceph_volume.objectstore.rawbluestore.process.run')
+ def test__activate(self,
+ m_run,
+ m_create_osd_path,
+ m_unlink,
+ m_exists,
+ m_link_block,
+ m_link_db,
+ m_link_wal,
+ monkeypatch):
+ meta = dict(osd_id='1',
+ osd_uuid='fake-uuid',
+ device='/dev/foo',
+ device_db='/dev/foo1',
+ device_wal='/dev/foo2')
+ m_run.return_value = MagicMock()
+ m_exists.side_effect = lambda path: True
+ m_create_osd_path.return_value = MagicMock()
+ m_unlink.return_value = MagicMock()
+ monkeypatch.setattr(system, 'chown', lambda path: 0)
+ monkeypatch.setattr(system, 'path_is_mounted', lambda path: 0)
+ self.raw_bs._activate(meta, True)
+ calls = [call('/var/lib/ceph/osd/ceph-1/block'),
+ call('/var/lib/ceph/osd/ceph-1/block.db'),
+ call('/var/lib/ceph/osd/ceph-1/block.wal')]
+ assert m_run.mock_calls == [call(['ceph-bluestore-tool',
+ 'prime-osd-dir',
+ '--path', '/var/lib/ceph/osd/ceph-1',
+ '--no-mon-config', '--dev', '/dev/foo'])]
+ assert m_unlink.mock_calls == calls
+ assert m_exists.mock_calls == calls
+ assert m_create_osd_path.mock_calls == [call('1', tmpfs=True)]
+
+ def test_activate_raises_exception(self,
+ is_root,
+ mock_raw_direct_report):
+ with pytest.raises(RuntimeError) as error:
+ self.raw_bs.activate([],
+ '123',
+ 'fake-uuid',
+ True)
+ assert str(error.value) == 'did not find any matching OSD to activate'
+
+ def test_activate_osd_id(self,
+ is_root,
+ mock_raw_direct_report):
+ self.raw_bs._activate = MagicMock()
+ self.raw_bs.activate([],
+ '8',
+ '824f7edf-371f-4b75-9231-4ab62a32d5c0',
+ True)
+ self.raw_bs._activate.mock_calls == [call({'ceph_fsid': '7dccab18-14cf-11ee-837b-5254008f8ca5',
+ 'device': '/dev/mapper/ceph--40bc7bd7--4aee--483e--ba95--89a64bc8a4fd-osd--block--824f7edf--371f--4b75--9231--4ab62a32d5c0',
+ 'device_db': '/dev/mapper/ceph--73d6d4db--6528--48f2--a4e2--1c82bc87a9ac-osd--db--b82d920d--be3c--4e4d--ba64--18f7e8445892',
+ 'osd_id': 8,
+ 'osd_uuid': '824f7edf-371f-4b75-9231-4ab62a32d5c0',
+ 'type': 'bluestore'},
+ tmpfs=True)]
+
+ def test_activate_osd_fsid(self,
+ is_root,
+ mock_raw_direct_report):
+ self.raw_bs._activate = MagicMock()
+ with pytest.raises(RuntimeError):
+ self.raw_bs.activate([],
+ '8',
+ 'a0e07c5b-bee1-4ea2-ae07-cb89deda9b27',
+ True)
+ self.raw_bs._activate.mock_calls == [call({'ceph_fsid': '7dccab18-14cf-11ee-837b-5254008f8ca5',
+ 'device': '/dev/mapper/ceph--e34cc3f5--a70d--49df--82b3--46bcbd63d4b0-osd--block--a0e07c5b--bee1--4ea2--ae07--cb89deda9b27',
+ 'osd_id': 9,
+ 'osd_uuid': 'a0e07c5b-bee1-4ea2-ae07-cb89deda9b27',
+ 'type': 'bluestore'},
+ tmpfs=True)] \ No newline at end of file
diff --git a/src/ceph-volume/ceph_volume/tests/systemd/test_main.py b/src/ceph-volume/ceph_volume/tests/systemd/test_main.py
index be13438f6fb..3156d50ddfa 100644
--- a/src/ceph-volume/ceph_volume/tests/systemd/test_main.py
+++ b/src/ceph-volume/ceph_volume/tests/systemd/test_main.py
@@ -31,15 +31,15 @@ class TestMain(object):
def setup_method(self):
conf.log_path = '/tmp/'
- def test_no_arguments_parsing_error(self):
+ def test_no_arguments_parsing_error(self, fake_filesystem):
with pytest.raises(RuntimeError):
main(args=[])
- def test_parsing_suffix_error(self):
+ def test_parsing_suffix_error(self, fake_filesystem):
with pytest.raises(exceptions.SuffixParsingError):
main(args=['asdf'])
- def test_correct_command(self, monkeypatch):
+ def test_correct_command(self, monkeypatch, fake_filesystem):
run = Capture()
monkeypatch.setattr(process, 'run', run)
main(args=['ceph-volume-systemd', 'lvm-8715BEB4-15C5-49DE-BA6F-401086EC7B41-0' ])
diff --git a/src/ceph-volume/ceph_volume/tests/test_main.py b/src/ceph-volume/ceph_volume/tests/test_main.py
index d03d405d553..65689bf4f3b 100644
--- a/src/ceph-volume/ceph_volume/tests/test_main.py
+++ b/src/ceph-volume/ceph_volume/tests/test_main.py
@@ -32,7 +32,7 @@ class TestVolume(object):
assert '--cluster' in stdout
assert '--log-path' in stdout
- def test_log_ignoring_missing_ceph_conf(self, caplog):
+ def test_log_ignoring_missing_ceph_conf(self, caplog, fake_filesystem):
with pytest.raises(SystemExit) as error:
main.Volume(argv=['ceph-volume', '--cluster', 'barnacle', 'lvm', '--help'])
# make sure we aren't causing an actual error
@@ -41,7 +41,7 @@ class TestVolume(object):
assert log.message == 'ignoring inability to load ceph.conf'
assert log.levelname == 'WARNING'
- def test_logs_current_command(self, caplog):
+ def test_logs_current_command(self, caplog, fake_filesystem):
with pytest.raises(SystemExit) as error:
main.Volume(argv=['ceph-volume', '--cluster', 'barnacle', 'lvm', '--help'])
# make sure we aren't causing an actual error
@@ -50,7 +50,7 @@ class TestVolume(object):
assert log.message == 'Running command: ceph-volume --cluster barnacle lvm --help'
assert log.levelname == 'INFO'
- def test_logs_set_level_warning(self, caplog):
+ def test_logs_set_level_warning(self, caplog, fake_filesystem):
with pytest.raises(SystemExit) as error:
main.Volume(argv=['ceph-volume', '--log-level', 'warning', '--cluster', 'barnacle', 'lvm', '--help'])
# make sure we aren't causing an actual error
diff --git a/src/ceph-volume/ceph_volume/tests/test_terminal.py b/src/ceph-volume/ceph_volume/tests/test_terminal.py
index e59a036baa8..3c420f15e19 100644
--- a/src/ceph-volume/ceph_volume/tests/test_terminal.py
+++ b/src/ceph-volume/ceph_volume/tests/test_terminal.py
@@ -131,13 +131,3 @@ class TestWriteUnicode(object):
writer.seek(0)
val = buffer.getvalue()
assert self.octpus_and_squid_en.encode(encoding) in val
-
- def test_writer_uses_log_on_unicodeerror(self, stream, monkeypatch, capture):
-
- if sys.version_info > (3,):
- pytest.skip("Something breaks inside of pytest's capsys")
- monkeypatch.setattr(terminal.terminal_logger, 'info', capture)
- buffer = io.BytesIO()
- writer = stream(buffer, 'ascii')
- terminal._Write(_writer=writer).raw(self.message)
- assert self.octpus_and_squid_en in capture.calls[0]['args'][0]
diff --git a/src/ceph-volume/ceph_volume/tests/util/test_arg_validators.py b/src/ceph-volume/ceph_volume/tests/util/test_arg_validators.py
index c6349308ee7..abbf1d57f33 100644
--- a/src/ceph-volume/ceph_volume/tests/util/test_arg_validators.py
+++ b/src/ceph-volume/ceph_volume/tests/util/test_arg_validators.py
@@ -1,6 +1,5 @@
import argparse
import pytest
-import os
from ceph_volume import exceptions, process
from ceph_volume.util import arg_validators
from mock.mock import patch, MagicMock
@@ -12,23 +11,22 @@ class TestOSDPath(object):
self.validator = arg_validators.OSDPath()
def test_is_not_root(self, monkeypatch):
- monkeypatch.setattr(os, 'getuid', lambda: 100)
+ monkeypatch.setattr('ceph_volume.decorators.os.getuid', lambda : 100)
with pytest.raises(exceptions.SuperUserError):
self.validator('')
- def test_path_is_not_a_directory(self, is_root, monkeypatch, fake_filesystem):
+ def test_path_is_not_a_directory(self, monkeypatch, fake_filesystem):
fake_file = fake_filesystem.create_file('/tmp/foo')
+ monkeypatch.setattr('ceph_volume.decorators.os.getuid', lambda : 0)
monkeypatch.setattr(arg_validators.disk, 'is_partition', lambda x: False)
- validator = arg_validators.OSDPath()
with pytest.raises(argparse.ArgumentError):
- validator(fake_file.path)
+ self.validator(fake_file.path)
- def test_files_are_missing(self, is_root, tmpdir, monkeypatch):
- tmppath = str(tmpdir)
- monkeypatch.setattr(arg_validators.disk, 'is_partition', lambda x: False)
- validator = arg_validators.OSDPath()
+ @patch('ceph_volume.decorators.os.getuid', return_value=0)
+ @patch('ceph_volume.util.arg_validators.disk.is_partition', return_value=False)
+ def test_files_are_missing(self, m_is_partition, m_getuid, fake_filesystem):
with pytest.raises(argparse.ArgumentError) as error:
- validator(tmppath)
+ self.validator('/tmp/osdpath')
assert 'Required file (ceph_fsid) was not found in OSD' in str(error.value)
diff --git a/src/ceph-volume/ceph_volume/tests/util/test_disk.py b/src/ceph-volume/ceph_volume/tests/util/test_disk.py
index b287530dc17..94f3d3566e6 100644
--- a/src/ceph-volume/ceph_volume/tests/util/test_disk.py
+++ b/src/ceph-volume/ceph_volume/tests/util/test_disk.py
@@ -260,64 +260,72 @@ class TestGetDevices(object):
result = disk.get_devices(_sys_block_path=str(tmpdir))
assert result == {}
- def test_sda_block_is_found(self, patched_get_block_devs_sysfs, fake_filesystem):
+ @patch('ceph_volume.util.disk.udevadm_property')
+ def test_sda_block_is_found(self, m_udev_adm_property, patched_get_block_devs_sysfs, fake_filesystem):
sda_path = '/dev/sda'
- patched_get_block_devs_sysfs.return_value = [[sda_path, sda_path, 'disk']]
+ patched_get_block_devs_sysfs.return_value = [[sda_path, sda_path, 'disk', sda_path]]
result = disk.get_devices()
assert len(result.keys()) == 1
assert result[sda_path]['human_readable_size'] == '0.00 B'
assert result[sda_path]['model'] == ''
assert result[sda_path]['partitions'] == {}
- def test_sda_size(self, patched_get_block_devs_sysfs, fake_filesystem):
+ @patch('ceph_volume.util.disk.udevadm_property')
+ def test_sda_size(self, m_udev_adm_property, patched_get_block_devs_sysfs, fake_filesystem):
sda_path = '/dev/sda'
- patched_get_block_devs_sysfs.return_value = [[sda_path, sda_path, 'disk']]
+ patched_get_block_devs_sysfs.return_value = [[sda_path, sda_path, 'disk', sda_path]]
fake_filesystem.create_file('/sys/block/sda/size', contents = '1024')
result = disk.get_devices()
assert list(result.keys()) == [sda_path]
assert result[sda_path]['human_readable_size'] == '512.00 KB'
- def test_sda_sectorsize_fallsback(self, patched_get_block_devs_sysfs, fake_filesystem):
+ @patch('ceph_volume.util.disk.udevadm_property')
+ def test_sda_sectorsize_fallsback(self, m_udev_adm_property, patched_get_block_devs_sysfs, fake_filesystem):
# if no sectorsize, it will use queue/hw_sector_size
sda_path = '/dev/sda'
- patched_get_block_devs_sysfs.return_value = [[sda_path, sda_path, 'disk']]
+ patched_get_block_devs_sysfs.return_value = [[sda_path, sda_path, 'disk', sda_path]]
fake_filesystem.create_file('/sys/block/sda/queue/hw_sector_size', contents = '1024')
result = disk.get_devices()
assert list(result.keys()) == [sda_path]
assert result[sda_path]['sectorsize'] == '1024'
- def test_sda_sectorsize_from_logical_block(self, patched_get_block_devs_sysfs, fake_filesystem):
+ @patch('ceph_volume.util.disk.udevadm_property')
+ def test_sda_sectorsize_from_logical_block(self, m_udev_adm_property, patched_get_block_devs_sysfs, fake_filesystem):
sda_path = '/dev/sda'
- patched_get_block_devs_sysfs.return_value = [[sda_path, sda_path, 'disk']]
+ patched_get_block_devs_sysfs.return_value = [[sda_path, sda_path, 'disk', sda_path]]
fake_filesystem.create_file('/sys/block/sda/queue/logical_block_size', contents = '99')
result = disk.get_devices()
assert result[sda_path]['sectorsize'] == '99'
- def test_sda_sectorsize_does_not_fallback(self, patched_get_block_devs_sysfs, fake_filesystem):
+ @patch('ceph_volume.util.disk.udevadm_property')
+ def test_sda_sectorsize_does_not_fallback(self, m_udev_adm_property, patched_get_block_devs_sysfs, fake_filesystem):
sda_path = '/dev/sda'
- patched_get_block_devs_sysfs.return_value = [[sda_path, sda_path, 'disk']]
+ patched_get_block_devs_sysfs.return_value = [[sda_path, sda_path, 'disk', sda_path]]
fake_filesystem.create_file('/sys/block/sda/queue/logical_block_size', contents = '99')
fake_filesystem.create_file('/sys/block/sda/queue/hw_sector_size', contents = '1024')
result = disk.get_devices()
assert result[sda_path]['sectorsize'] == '99'
- def test_is_rotational(self, patched_get_block_devs_sysfs, fake_filesystem):
+ @patch('ceph_volume.util.disk.udevadm_property')
+ def test_is_rotational(self, m_udev_adm_property, patched_get_block_devs_sysfs, fake_filesystem):
sda_path = '/dev/sda'
- patched_get_block_devs_sysfs.return_value = [[sda_path, sda_path, 'disk']]
+ patched_get_block_devs_sysfs.return_value = [[sda_path, sda_path, 'disk', sda_path]]
fake_filesystem.create_file('/sys/block/sda/queue/rotational', contents = '1')
result = disk.get_devices()
assert result[sda_path]['rotational'] == '1'
- def test_is_ceph_rbd(self, patched_get_block_devs_sysfs, fake_filesystem):
+ @patch('ceph_volume.util.disk.udevadm_property')
+ def test_is_ceph_rbd(self, m_udev_adm_property, patched_get_block_devs_sysfs, fake_filesystem):
rbd_path = '/dev/rbd0'
- patched_get_block_devs_sysfs.return_value = [[rbd_path, rbd_path, 'disk']]
+ patched_get_block_devs_sysfs.return_value = [[rbd_path, rbd_path, 'disk', rbd_path]]
result = disk.get_devices()
assert rbd_path not in result
- def test_actuator_device(self, patched_get_block_devs_sysfs, fake_filesystem):
+ @patch('ceph_volume.util.disk.udevadm_property')
+ def test_actuator_device(self, m_udev_adm_property, patched_get_block_devs_sysfs, fake_filesystem):
sda_path = '/dev/sda'
fake_actuator_nb = 2
- patched_get_block_devs_sysfs.return_value = [[sda_path, sda_path, 'disk']]
+ patched_get_block_devs_sysfs.return_value = [[sda_path, sda_path, 'disk', sda_path]]
for actuator in range(0, fake_actuator_nb):
fake_filesystem.create_dir(f'/sys/block/sda/queue/independent_access_ranges/{actuator}')
result = disk.get_devices()
@@ -544,7 +552,14 @@ class TestSizeSpecificFormatting(object):
class TestAllowLoopDevsWarning(object):
+ def setup_method(self):
+ disk.AllowLoopDevices.allow = False
+ disk.AllowLoopDevices.warned = False
+ if os.environ.get('CEPH_VOLUME_ALLOW_LOOP_DEVICES'):
+ os.environ.pop('CEPH_VOLUME_ALLOW_LOOP_DEVICES')
+
def test_loop_dev_warning(self, fake_call, caplog):
+ disk.AllowLoopDevices.warned = False
assert disk.allow_loop_devices() is False
assert not caplog.records
os.environ['CEPH_VOLUME_ALLOW_LOOP_DEVICES'] = "y"
diff --git a/src/ceph-volume/ceph_volume/tests/util/test_encryption.py b/src/ceph-volume/ceph_volume/tests/util/test_encryption.py
index cd2ea8f187f..4a720241dd9 100644
--- a/src/ceph-volume/ceph_volume/tests/util/test_encryption.py
+++ b/src/ceph-volume/ceph_volume/tests/util/test_encryption.py
@@ -103,8 +103,9 @@ class TestLuksFormat(object):
class TestLuksOpen(object):
+ @patch('ceph_volume.util.encryption.bypass_workqueue', return_value=False)
@patch('ceph_volume.util.encryption.process.call')
- def test_luks_open_command_with_default_size(self, m_call, conf_ceph_stub):
+ def test_luks_open_command_with_default_size(self, m_call, m_bypass_workqueue, conf_ceph_stub):
conf_ceph_stub('[global]\nfsid=abcd')
expected = [
'cryptsetup',
@@ -120,8 +121,9 @@ class TestLuksOpen(object):
encryption.luks_open('abcd', '/dev/foo', '/dev/bar')
assert m_call.call_args[0][0] == expected
+ @patch('ceph_volume.util.encryption.bypass_workqueue', return_value=False)
@patch('ceph_volume.util.encryption.process.call')
- def test_luks_open_command_with_custom_size(self, m_call, conf_ceph_stub):
+ def test_luks_open_command_with_custom_size(self, m_call, m_bypass_workqueue, conf_ceph_stub):
conf_ceph_stub('[global]\nfsid=abcd\n[osd]\nosd_dmcrypt_key_size=256')
expected = [
'cryptsetup',
diff --git a/src/ceph-volume/ceph_volume/tests/util/test_prepare.py b/src/ceph-volume/ceph_volume/tests/util/test_prepare.py
index ee9774ecc83..4bda56581c4 100644
--- a/src/ceph-volume/ceph_volume/tests/util/test_prepare.py
+++ b/src/ceph-volume/ceph_volume/tests/util/test_prepare.py
@@ -5,6 +5,8 @@ from ceph_volume.util import prepare
from ceph_volume.util.prepare import system
from ceph_volume import conf
from ceph_volume.tests.conftest import Factory
+from ceph_volume import objectstore
+from mock.mock import patch
class TestOSDIDAvailable(object):
@@ -117,28 +119,43 @@ class TestFormatDevice(object):
class TestOsdMkfsBluestore(object):
+ def setup_method(self):
+ conf.cluster = 'ceph'
def test_keyring_is_added(self, fake_call, monkeypatch):
monkeypatch.setattr(system, 'chown', lambda path: True)
- prepare.osd_mkfs_bluestore(1, 'asdf', keyring='secret')
- assert '--keyfile' in fake_call.calls[0]['args'][0]
+ o = objectstore.baseobjectstore.BaseObjectStore([])
+ o.osd_id = '1'
+ o.osd_fsid = 'asdf'
+ o.osd_mkfs()
+ assert '--keyfile' in fake_call.calls[2]['args'][0]
def test_keyring_is_not_added(self, fake_call, monkeypatch):
monkeypatch.setattr(system, 'chown', lambda path: True)
- prepare.osd_mkfs_bluestore(1, 'asdf')
+ o = objectstore.bluestore.BlueStore([])
+ o.osd_id = '1'
+ o.osd_fsid = 'asdf'
+ o.osd_mkfs()
assert '--keyfile' not in fake_call.calls[0]['args'][0]
- def test_wal_is_added(self, fake_call, monkeypatch):
+ def test_wal_is_added(self, fake_call, monkeypatch, objectstore_bluestore):
monkeypatch.setattr(system, 'chown', lambda path: True)
- prepare.osd_mkfs_bluestore(1, 'asdf', wal='/dev/smm1')
- assert '--bluestore-block-wal-path' in fake_call.calls[0]['args'][0]
- assert '/dev/smm1' in fake_call.calls[0]['args'][0]
+ bs = objectstore_bluestore(objecstore='bluestore',
+ osd_id='1',
+ osd_fid='asdf',
+ wal_device_path='/dev/smm1',
+ cephx_secret='foo',)
+ bs.osd_mkfs()
+ assert '--bluestore-block-wal-path' in fake_call.calls[2]['args'][0]
+ assert '/dev/smm1' in fake_call.calls[2]['args'][0]
def test_db_is_added(self, fake_call, monkeypatch):
monkeypatch.setattr(system, 'chown', lambda path: True)
- prepare.osd_mkfs_bluestore(1, 'asdf', db='/dev/smm2')
- assert '--bluestore-block-db-path' in fake_call.calls[0]['args'][0]
- assert '/dev/smm2' in fake_call.calls[0]['args'][0]
+ bs = objectstore.bluestore.BlueStore([])
+ bs.db_device_path = '/dev/smm2'
+ bs.osd_mkfs()
+ assert '--bluestore-block-db-path' in fake_call.calls[2]['args'][0]
+ assert '/dev/smm2' in fake_call.calls[2]['args'][0]
class TestMountOSD(object):
@@ -263,23 +280,29 @@ class TestNormalizeFlags(object):
result = sorted(prepare._normalize_mount_flags(flags, extras=['discard','rw']).split(','))
assert ','.join(result) == 'auto,discard,exec,rw'
-
+@patch('ceph_volume.util.prepare.create_key', return_value='fake-secret')
class TestMkfsBluestore(object):
- def test_non_zero_exit_status(self, stub_call, monkeypatch):
+ def test_non_zero_exit_status(self, m_create_key, stub_call, monkeypatch, objectstore_bluestore):
conf.cluster = 'ceph'
monkeypatch.setattr('ceph_volume.util.prepare.system.chown', lambda x: True)
stub_call(([], [], 1))
+ bs = objectstore_bluestore(osd_id='1',
+ osd_fsid='asdf-1234',
+ cephx_secret='keyring')
with pytest.raises(RuntimeError) as error:
- prepare.osd_mkfs_bluestore('1', 'asdf-1234', keyring='keyring')
+ bs.osd_mkfs()
assert "Command failed with exit code 1" in str(error.value)
- def test_non_zero_exit_formats_command_correctly(self, stub_call, monkeypatch):
+ def test_non_zero_exit_formats_command_correctly(self, m_create_key, stub_call, monkeypatch, objectstore_bluestore):
conf.cluster = 'ceph'
monkeypatch.setattr('ceph_volume.util.prepare.system.chown', lambda x: True)
stub_call(([], [], 1))
+ bs = objectstore_bluestore(osd_id='1',
+ osd_fsid='asdf-1234',
+ cephx_secret='keyring')
with pytest.raises(RuntimeError) as error:
- prepare.osd_mkfs_bluestore('1', 'asdf-1234', keyring='keyring')
+ bs.osd_mkfs()
expected = ' '.join([
'ceph-osd',
'--cluster',
diff --git a/src/ceph-volume/ceph_volume/util/arg_validators.py b/src/ceph-volume/ceph_volume/util/arg_validators.py
index e936cab895e..8f49dac721b 100644
--- a/src/ceph-volume/ceph_volume/util/arg_validators.py
+++ b/src/ceph-volume/ceph_volume/util/arg_validators.py
@@ -5,7 +5,7 @@ from ceph_volume import terminal, decorators, process
from ceph_volume.util.device import Device
from ceph_volume.util import disk
from ceph_volume.util.encryption import set_dmcrypt_no_workqueue
-from ceph_volume import process, conf
+
def valid_osd_id(val):
return str(int(val))
diff --git a/src/ceph-volume/ceph_volume/util/prepare.py b/src/ceph-volume/ceph_volume/util/prepare.py
index 576c0861708..9c863b83d93 100644
--- a/src/ceph-volume/ceph_volume/util/prepare.py
+++ b/src/ceph-volume/ceph_volume/util/prepare.py
@@ -4,11 +4,9 @@ but also a compounded ("single call") helper to do them in order. Some plugins
may want to change some part of the process, while others might want to consume
the single-call helper
"""
-import errno
import os
import logging
import json
-import time
from ceph_volume import process, conf, terminal
from ceph_volume.util import system, constants, str_to_int, disk
@@ -379,82 +377,3 @@ def get_monmap(osd_id):
'--keyring', bootstrap_keyring,
'mon', 'getmap', '-o', monmap_destination
])
-
-
-def get_osdspec_affinity():
- return os.environ.get('CEPH_VOLUME_OSDSPEC_AFFINITY', '')
-
-
-def osd_mkfs_bluestore(osd_id, fsid, keyring=None, wal=False, db=False):
- """
- Create the files for the OSD to function. A normal call will look like:
-
- ceph-osd --cluster ceph --mkfs --mkkey -i 0 \
- --monmap /var/lib/ceph/osd/ceph-0/activate.monmap \
- --osd-data /var/lib/ceph/osd/ceph-0 \
- --osd-uuid 8d208665-89ae-4733-8888-5d3bfbeeec6c \
- --keyring /var/lib/ceph/osd/ceph-0/keyring \
- --setuser ceph --setgroup ceph
-
- In some cases it is required to use the keyring, when it is passed in as
- a keyword argument it is used as part of the ceph-osd command
- """
- path = '/var/lib/ceph/osd/%s-%s/' % (conf.cluster, osd_id)
- monmap = os.path.join(path, 'activate.monmap')
-
- system.chown(path)
-
- base_command = [
- 'ceph-osd',
- '--cluster', conf.cluster,
- '--osd-objectstore', 'bluestore',
- '--mkfs',
- '-i', osd_id,
- '--monmap', monmap,
- ]
-
- supplementary_command = [
- '--osd-data', path,
- '--osd-uuid', fsid,
- '--setuser', 'ceph',
- '--setgroup', 'ceph'
- ]
-
- if keyring is not None:
- base_command.extend(['--keyfile', '-'])
-
- if wal:
- base_command.extend(
- ['--bluestore-block-wal-path', wal]
- )
- system.chown(wal)
-
- if db:
- base_command.extend(
- ['--bluestore-block-db-path', db]
- )
- system.chown(db)
-
- if get_osdspec_affinity():
- base_command.extend(['--osdspec-affinity', get_osdspec_affinity()])
-
- command = base_command + supplementary_command
-
- """
- When running in containers the --mkfs on raw device sometimes fails
- to acquire a lock through flock() on the device because systemd-udevd holds one temporarily.
- See KernelDevice.cc and _lock() to understand how ceph-osd acquires the lock.
- Because this is really transient, we retry up to 5 times and wait for 1 sec in-between
- """
- for retry in range(5):
- _, _, returncode = process.call(command, stdin=keyring, terminal_verbose=True, show_command=True)
- if returncode == 0:
- break
- else:
- if returncode == errno.EWOULDBLOCK:
- time.sleep(1)
- logger.info('disk is held by another process, trying to mkfs again... (%s/5 attempt)' % retry)
- continue
- else:
- raise RuntimeError('Command failed with exit code %s: %s' % (returncode, ' '.join(command)))
-
diff --git a/src/ceph-volume/tox.ini b/src/ceph-volume/tox.ini
index 696d6dcc837..f7d294a9aad 100644
--- a/src/ceph-volume/tox.ini
+++ b/src/ceph-volume/tox.ini
@@ -11,7 +11,7 @@ deps=
allowlist_externals=
./tox_install_command.sh
install_command=./tox_install_command.sh {opts} {packages}
-commands=py.test --numprocesses=auto -vv {posargs:ceph_volume/tests} --ignore=ceph_volume/tests/functional
+commands=py.test -vv {posargs:ceph_volume/tests} --ignore=ceph_volume/tests/functional
[testenv:py3-flake8]
deps=flake8
diff --git a/src/cephadm/tests/test_util_funcs.py b/src/cephadm/tests/test_util_funcs.py
index aa64d54b073..6d53012f702 100644
--- a/src/cephadm/tests/test_util_funcs.py
+++ b/src/cephadm/tests/test_util_funcs.py
@@ -558,7 +558,7 @@ class FakeContext:
with_cephadm_ctx is not appropriate (it enables too many mocks, etc).
"""
- timeout = 30
+ timeout = 60
def _has_non_zero_exit(clog):
diff --git a/src/cephadm/tox.ini b/src/cephadm/tox.ini
index 86d2c289c76..70e9a411238 100644
--- a/src/cephadm/tox.ini
+++ b/src/cephadm/tox.ini
@@ -35,7 +35,6 @@ deps =
commands=pytest {posargs}
[testenv:mypy]
-basepython = python3
deps =
mypy
types-PyYAML
@@ -44,7 +43,6 @@ deps =
commands = mypy --config-file ../mypy.ini {posargs:cephadm.py cephadmlib}
[testenv:flake8]
-basepython = python3
allowlist_externals = bash
deps =
flake8 == 5.0.4
diff --git a/src/cls/user/cls_user.cc b/src/cls/user/cls_user.cc
index e278ad7fc12..0447bf33a2c 100644
--- a/src/cls/user/cls_user.cc
+++ b/src/cls/user/cls_user.cc
@@ -2,11 +2,14 @@
// vim: ts=8 sw=2 smarttab
#include <errno.h>
+#include <algorithm>
+#include <cctype>
#include "include/utime.h"
#include "objclass/objclass.h"
#include "cls_user_ops.h"
+#include "rgw/rgw_string.h"
using std::map;
using std::string;
@@ -71,7 +74,8 @@ static int get_existing_bucket_entry(cls_method_context_t hctx, const string& bu
return 0;
}
-static int read_header(cls_method_context_t hctx, cls_user_header *header)
+template <typename T>
+static int read_header(cls_method_context_t hctx, T *header)
{
bufferlist bl;
@@ -80,7 +84,7 @@ static int read_header(cls_method_context_t hctx, cls_user_header *header)
return ret;
if (bl.length() == 0) {
- *header = cls_user_header();
+ *header = T();
return 0;
}
@@ -501,6 +505,221 @@ static int cls_user_reset_stats2(cls_method_context_t hctx,
return 0;
} /* cls_user_reset_stats2 */
+
+// account resource names must be unique and aren't distinguished by case, so
+// convert all keys to lowercase
+static std::string resource_key(std::string_view name)
+{
+ std::string key;
+ key.resize(name.size());
+ std::transform(name.begin(), name.end(), key.begin(),
+ [](unsigned char c) { return std::tolower(c); });
+ return key;
+}
+
+static int cls_account_resource_add(cls_method_context_t hctx,
+ buffer::list *in, buffer::list *out)
+{
+ cls_user_account_resource_add_op op;
+ try {
+ auto bliter = in->cbegin();
+ decode(op, bliter);
+ } catch (const ceph::buffer::error& err) {
+ CLS_LOG(0, "ERROR: %s failed to decode op", __func__);
+ return -EINVAL;
+ }
+
+ CLS_LOG(20, "adding account resource name=%s path=%s",
+ op.entry.name.c_str(), op.entry.path.c_str());
+
+ const std::string key = resource_key(op.entry.name);
+
+ // does this resource entry exist?
+ bufferlist readbl; // unused
+ int ret = cls_cxx_map_get_val(hctx, key, &readbl);
+ if (ret < 0 && ret != -ENOENT) {
+ return ret;
+ }
+ const bool exists = (ret == 0);
+
+ std::optional<cls_user_account_header> header;
+ if (!exists) {
+ // if this is a new entry, update the resource count in the account header
+ ret = read_header(hctx, &header.emplace());
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: failed to read account header ret=%d", ret);
+ return ret;
+ }
+ if (header->count >= op.limit) {
+ CLS_LOG(4, "account resource limit exceeded, %u >= %u",
+ header->count, op.limit);
+ return -EUSERS; // too many users
+ }
+ header->count++;
+ } else if (op.exclusive) {
+ return -EEXIST;
+ }
+
+ // write/overwrite the entry
+ bufferlist writebl;
+ encode(op.entry, writebl);
+ ret = cls_cxx_map_set_val(hctx, key, &writebl);
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: failed to write account resource: %d", ret);
+ return ret;
+ }
+
+ // write the updated account header
+ if (header) {
+ bufferlist headerbl;
+ encode(*header, headerbl);
+ return cls_cxx_map_write_header(hctx, &headerbl);
+ }
+ return 0;
+} // cls_account_resource_add
+
+static int cls_account_resource_get(cls_method_context_t hctx,
+ bufferlist *in, bufferlist *out)
+{
+ cls_user_account_resource_get_op op;
+ try {
+ auto p = in->cbegin();
+ decode(op, p);
+ } catch (const ceph::buffer::error& err) {
+ CLS_LOG(0, "ERROR: %s failed to decode op", __func__);
+ return -EINVAL;
+ }
+
+ CLS_LOG(20, "reading account resource name=%s", op.name.c_str());
+
+ const std::string key = resource_key(op.name);
+
+ bufferlist bl;
+ int r = cls_cxx_map_get_val(hctx, key, &bl);
+ if (r < 0) {
+ return r;
+ }
+
+ cls_user_account_resource_get_ret ret;
+ try {
+ auto iter = bl.cbegin();
+ decode(ret.entry, iter);
+ } catch (ceph::buffer::error& err) {
+ CLS_LOG(0, "ERROR: failed to decode entry %s", key.c_str());
+ return -EIO;
+ }
+
+ encode(ret, *out);
+ return 0;
+} // cls_account_resource_get
+
+static int cls_account_resource_rm(cls_method_context_t hctx,
+ buffer::list *in, buffer::list *out)
+{
+ cls_user_account_resource_rm_op op;
+ try {
+ auto bliter = in->cbegin();
+ decode(op, bliter);
+ } catch (const ceph::buffer::error& err) {
+ CLS_LOG(0, "ERROR: %s failed to decode op", __func__);
+ return -EINVAL;
+ }
+
+ CLS_LOG(20, "removing account resource name=%s", op.name.c_str());
+
+ const std::string key = resource_key(op.name);
+
+ // verify that the resource entry exists, so we can return ENOENT otherwise.
+ // remove_key() alone would return success either way
+ bufferlist readbl; // unused
+ int ret = cls_cxx_map_get_val(hctx, key, &readbl);
+ if (ret < 0) {
+ return ret;
+ }
+
+ // remove the resource entry
+ ret = cls_cxx_map_remove_key(hctx, key);
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: failed to remove account resource: %d", ret);
+ return ret;
+ }
+
+ // update resource count in the account header
+ cls_user_account_header header;
+ ret = read_header(hctx, &header);
+ if (ret < 0) {
+ CLS_LOG(0, "ERROR: failed to read account header ret=%d", ret);
+ return ret;
+ }
+ if (header.count) { // guard underflow
+ header.count--;
+ }
+
+ bufferlist headerbl;
+ encode(header, headerbl);
+ return cls_cxx_map_write_header(hctx, &headerbl);
+} // cls_account_resource_rm
+
+static int cls_account_resource_list(cls_method_context_t hctx,
+ bufferlist *in, bufferlist *out)
+{
+ cls_user_account_resource_list_op op;
+ try {
+ auto p = in->cbegin();
+ decode(op, p);
+ } catch (const ceph::buffer::error& err) {
+ CLS_LOG(0, "ERROR: %s failed to decode op", __func__);
+ return -EINVAL;
+ }
+ CLS_LOG(20, "listing account resources from marker=%s path_prefix=%s max_entries=%d",
+ op.marker.c_str(), op.path_prefix.c_str(), (int)op.max_entries);
+
+ const std::string prefix; // empty
+ const uint32_t max_entries = std::min(op.max_entries, 1000u);
+ std::map<std::string, bufferlist> entries;
+ bool truncated = false;
+
+ int rc = cls_cxx_map_get_vals(hctx, op.marker, prefix, max_entries,
+ &entries, &truncated);
+ if (rc < 0) {
+ return rc;
+ }
+
+ cls_user_account_resource_list_ret ret;
+
+ // copy matching decoded omap values into a vector
+ for (auto& [key, bl] : entries) {
+ // decode as cls_user_account_resource
+ cls_user_account_resource entry;
+ try {
+ auto p = bl.cbegin();
+ decode(entry, p);
+ } catch (const ceph::buffer::error& e) {
+ CLS_LOG(1, "ERROR: %s failed to decode resource entry at key=%s",
+ __func__, key.c_str());
+ return -EIO;
+ }
+
+ // filter entries by path prefix
+ if (entry.path.starts_with(op.path_prefix)) {
+ CLS_LOG(20, "included resource path=%s name=%s",
+ entry.path.c_str(), entry.name.c_str());
+ ret.entries.push_back(std::move(entry));
+ }
+ }
+
+ ret.truncated = truncated;
+ if (!entries.empty()) {
+ ret.marker = entries.rbegin()->first;
+ }
+ CLS_LOG(20, "entries=%d next_marker=%s truncated=%d",
+ (int)ret.entries.size(), ret.marker.c_str(), (int)ret.truncated);
+
+ encode(ret, *out);
+ return 0;
+} // cls_account_resource_list
+
+
CLS_INIT(user)
{
CLS_LOG(1, "Loaded user class!");
@@ -527,5 +746,18 @@ CLS_INIT(user)
cls_register_cxx_method(h_class, "reset_user_stats", CLS_METHOD_RD | CLS_METHOD_WR, cls_user_reset_stats, &h_user_reset_stats);
cls_register_cxx_method(h_class, "reset_user_stats2", CLS_METHOD_RD | CLS_METHOD_WR, cls_user_reset_stats2, &h_user_reset_stats2);
- return;
+ // account
+ cls_method_handle_t h_account_resource_add;
+ cls_method_handle_t h_account_resource_get;
+ cls_method_handle_t h_account_resource_rm;
+ cls_method_handle_t h_account_resource_list;
+
+ cls_register_cxx_method(h_class, "account_resource_add", CLS_METHOD_RD | CLS_METHOD_WR,
+ cls_account_resource_add, &h_account_resource_add);
+ cls_register_cxx_method(h_class, "account_resource_get", CLS_METHOD_RD,
+ cls_account_resource_get, &h_account_resource_get);
+ cls_register_cxx_method(h_class, "account_resource_rm", CLS_METHOD_RD | CLS_METHOD_WR,
+ cls_account_resource_rm, &h_account_resource_rm);
+ cls_register_cxx_method(h_class, "account_resource_list", CLS_METHOD_RD,
+ cls_account_resource_list, &h_account_resource_list);
}
diff --git a/src/cls/user/cls_user_client.cc b/src/cls/user/cls_user_client.cc
index b74f55b48b2..acc94ca326a 100644
--- a/src/cls/user/cls_user_client.cc
+++ b/src/cls/user/cls_user_client.cc
@@ -162,3 +162,124 @@ int cls_user_get_header_async(IoCtx& io_ctx, string& oid, RGWGetUserHeader_CB *c
return 0;
}
+
+
+void cls_user_account_resource_add(librados::ObjectWriteOperation& op,
+ const cls_user_account_resource& entry,
+ bool exclusive, uint32_t limit)
+{
+ cls_user_account_resource_add_op call;
+ call.entry = entry;
+ call.exclusive = exclusive;
+ call.limit = limit;
+
+ bufferlist inbl;
+ encode(call, inbl);
+ op.exec("user", "account_resource_add", inbl);
+}
+
+class ResourceGetCB : public librados::ObjectOperationCompletion {
+ cls_user_account_resource* entry;
+ int* pret;
+public:
+ ResourceGetCB(cls_user_account_resource* entry, int* pret)
+ : entry(entry), pret(pret)
+ {}
+ void handle_completion(int r, bufferlist& outbl) override {
+ if (r >= 0) {
+ cls_user_account_resource_get_ret ret;
+ try {
+ auto iter = outbl.cbegin();
+ decode(ret, iter);
+ if (entry) {
+ *entry = std::move(ret.entry);
+ }
+ } catch (const ceph::buffer::error& err) {
+ r = -EIO;
+ }
+ }
+ if (pret) {
+ *pret = r;
+ }
+ }
+};
+
+void cls_user_account_resource_get(librados::ObjectReadOperation& op,
+ std::string_view name,
+ cls_user_account_resource& entry,
+ int* pret)
+{
+ cls_user_account_resource_get_op call;
+ call.name = name;
+
+ bufferlist inbl;
+ encode(call, inbl);
+ op.exec("user", "account_resource_get", inbl,
+ new ResourceGetCB(&entry, pret));
+}
+
+void cls_user_account_resource_rm(librados::ObjectWriteOperation& op,
+ std::string_view name)
+{
+ cls_user_account_resource_rm_op call;
+ call.name = name;
+
+ bufferlist inbl;
+ encode(call, inbl);
+ op.exec("user", "account_resource_rm", inbl);
+}
+
+class ResourceListCB : public librados::ObjectOperationCompletion {
+ std::vector<cls_user_account_resource>* entries;
+ bool* truncated;
+ std::string* next_marker;
+ int* pret;
+public:
+ ResourceListCB(std::vector<cls_user_account_resource>* entries,
+ bool* truncated, std::string* next_marker, int* pret)
+ : entries(entries), truncated(truncated),
+ next_marker(next_marker), pret(pret)
+ {}
+ void handle_completion(int r, bufferlist& outbl) override {
+ if (r >= 0) {
+ cls_user_account_resource_list_ret ret;
+ try {
+ auto iter = outbl.cbegin();
+ decode(ret, iter);
+ if (entries) {
+ *entries = std::move(ret.entries);
+ }
+ if (next_marker) {
+ *next_marker = std::move(ret.marker);
+ }
+ if (truncated) {
+ *truncated = ret.truncated;
+ }
+ } catch (const ceph::buffer::error& err) {
+ r = -EIO;
+ }
+ }
+ if (pret) {
+ *pret = r;
+ }
+ }
+};
+
+void cls_user_account_resource_list(librados::ObjectReadOperation& op,
+ std::string_view marker,
+ std::string_view path_prefix,
+ uint32_t max_entries,
+ std::vector<cls_user_account_resource>& entries,
+ bool* truncated, std::string* next_marker,
+ int* pret)
+{
+ cls_user_account_resource_list_op call;
+ call.marker = marker;
+ call.path_prefix = path_prefix;
+ call.max_entries = max_entries;
+
+ bufferlist inbl;
+ encode(call, inbl);
+ op.exec("user", "account_resource_list", inbl,
+ new ResourceListCB(&entries, truncated, next_marker, pret));
+}
diff --git a/src/cls/user/cls_user_client.h b/src/cls/user/cls_user_client.h
index 03d975c59cb..a1120f86400 100644
--- a/src/cls/user/cls_user_client.h
+++ b/src/cls/user/cls_user_client.h
@@ -33,4 +33,31 @@ void cls_user_get_header(librados::ObjectReadOperation& op, cls_user_header *hea
int cls_user_get_header_async(librados::IoCtx& io_ctx, std::string& oid, RGWGetUserHeader_CB *ctx);
void cls_user_reset_stats(librados::ObjectWriteOperation& op);
+// Account resources
+
+/// Add or overwrite an entry to the account's list of resources. Returns
+/// -EUSERS (Too many users) if the resource count would exceed the given limit.
+void cls_user_account_resource_add(librados::ObjectWriteOperation& op,
+ const cls_user_account_resource& entry,
+ bool exclusive, uint32_t limit);
+
+/// Look up an account resource by case-insensitive name.
+void cls_user_account_resource_get(librados::ObjectReadOperation& op,
+ std::string_view name,
+ cls_user_account_resource& entry,
+ int* pret);
+
+/// Remove an account resources by case-insensitive name.
+void cls_user_account_resource_rm(librados::ObjectWriteOperation& op,
+ std::string_view name);
+
+/// List the resources linked to an account.
+void cls_user_account_resource_list(librados::ObjectReadOperation& op,
+ std::string_view marker,
+ std::string_view path_prefix,
+ uint32_t max_entries,
+ std::vector<cls_user_account_resource>& entries,
+ bool* truncated, std::string* next_marker,
+ int* pret);
+
#endif
diff --git a/src/cls/user/cls_user_ops.cc b/src/cls/user/cls_user_ops.cc
index 5ae9d2c93b8..f787c1eeb02 100644
--- a/src/cls/user/cls_user_ops.cc
+++ b/src/cls/user/cls_user_ops.cc
@@ -116,3 +116,89 @@ void cls_user_complete_stats_sync_op::generate_test_instances(list<cls_user_comp
}
+void cls_user_account_resource_add_op::dump(Formatter *f) const
+{
+ encode_json("name", entry.name, f);
+ encode_json("path", entry.path, f);
+ encode_json("limit", limit, f);
+}
+
+void cls_user_account_resource_add_op::generate_test_instances(std::list<cls_user_account_resource_add_op*>& ls)
+{
+ ls.push_back(new cls_user_account_resource_add_op);
+ cls_user_account_resource_add_op *op = new cls_user_account_resource_add_op;
+ cls_user_gen_test_resource(op->entry);
+ ls.push_back(op);
+}
+
+void cls_user_account_resource_get_op::dump(Formatter *f) const
+{
+ encode_json("name", name, f);
+}
+
+void cls_user_account_resource_get_op::generate_test_instances(std::list<cls_user_account_resource_get_op*>& ls)
+{
+ ls.push_back(new cls_user_account_resource_get_op);
+ cls_user_account_resource_get_op *op = new cls_user_account_resource_get_op;
+ op->name = "name";
+ ls.push_back(op);
+}
+
+void cls_user_account_resource_get_ret::dump(Formatter *f) const
+{
+ encode_json("entry", entry, f);
+}
+
+void cls_user_account_resource_get_ret::generate_test_instances(std::list<cls_user_account_resource_get_ret*>& ls)
+{
+ ls.push_back(new cls_user_account_resource_get_ret);
+ cls_user_account_resource_get_ret *ret = new cls_user_account_resource_get_ret;
+ cls_user_gen_test_resource(ret->entry);
+ ls.push_back(ret);
+}
+
+void cls_user_account_resource_rm_op::dump(Formatter *f) const
+{
+ encode_json("name", name, f);
+}
+
+void cls_user_account_resource_rm_op::generate_test_instances(std::list<cls_user_account_resource_rm_op*>& ls)
+{
+ ls.push_back(new cls_user_account_resource_rm_op);
+ cls_user_account_resource_rm_op *op = new cls_user_account_resource_rm_op;
+ op->name = "name";
+ ls.push_back(op);
+}
+
+void cls_user_account_resource_list_op::dump(Formatter *f) const
+{
+ encode_json("marker", marker, f);
+ encode_json("path_prefix", path_prefix, f);
+ encode_json("max_entries", max_entries, f);
+}
+
+void cls_user_account_resource_list_op::generate_test_instances(std::list<cls_user_account_resource_list_op*>& ls)
+{
+ ls.push_back(new cls_user_account_resource_list_op);
+ cls_user_account_resource_list_op *op = new cls_user_account_resource_list_op;
+ op->marker = "marker";
+ op->path_prefix = "path";
+ op->max_entries = 20;
+ ls.push_back(op);
+}
+
+void cls_user_account_resource_list_ret::dump(Formatter *f) const
+{
+ encode_json("entries", entries, f);
+ encode_json("truncated", truncated, f);
+ encode_json("marker", marker, f);
+}
+
+void cls_user_account_resource_list_ret::generate_test_instances(std::list<cls_user_account_resource_list_ret*>& ls)
+{
+ ls.push_back(new cls_user_account_resource_list_ret);
+ cls_user_account_resource_list_ret *ret = new cls_user_account_resource_list_ret;
+ cls_user_gen_test_resource(ret->entries.emplace_back());
+ ret->truncated = true;
+ ls.push_back(ret);
+}
diff --git a/src/cls/user/cls_user_ops.h b/src/cls/user/cls_user_ops.h
index 7edd1bc15ce..d638896340b 100644
--- a/src/cls/user/cls_user_ops.h
+++ b/src/cls/user/cls_user_ops.h
@@ -264,4 +264,136 @@ struct cls_user_complete_stats_sync_op {
WRITE_CLASS_ENCODER(cls_user_complete_stats_sync_op)
+struct cls_user_account_resource_add_op {
+ cls_user_account_resource entry;
+ bool exclusive = false;
+ uint32_t limit = 0;
+
+ void encode(ceph::buffer::list& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(entry, bl);
+ encode(exclusive, bl);
+ encode(limit, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(ceph::buffer::list::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(entry, bl);
+ decode(exclusive, bl);
+ decode(limit, bl);
+ DECODE_FINISH(bl);
+ }
+
+ void dump(ceph::Formatter* f) const;
+ static void generate_test_instances(std::list<cls_user_account_resource_add_op*>& ls);
+};
+WRITE_CLASS_ENCODER(cls_user_account_resource_add_op)
+
+struct cls_user_account_resource_get_op {
+ std::string name;
+
+ void encode(ceph::buffer::list& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(name, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(ceph::buffer::list::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(name, bl);
+ DECODE_FINISH(bl);
+ }
+
+ void dump(ceph::Formatter* f) const;
+ static void generate_test_instances(std::list<cls_user_account_resource_get_op*>& ls);
+};
+WRITE_CLASS_ENCODER(cls_user_account_resource_get_op)
+
+struct cls_user_account_resource_get_ret {
+ cls_user_account_resource entry;
+
+ void encode(ceph::buffer::list& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(entry, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(ceph::buffer::list::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(entry, bl);
+ DECODE_FINISH(bl);
+ }
+
+ void dump(ceph::Formatter* f) const;
+ static void generate_test_instances(std::list<cls_user_account_resource_get_ret*>& ls);
+};
+WRITE_CLASS_ENCODER(cls_user_account_resource_get_ret)
+
+struct cls_user_account_resource_rm_op {
+ std::string name;
+
+ void encode(ceph::buffer::list& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(name, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(ceph::buffer::list::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(name, bl);
+ DECODE_FINISH(bl);
+ }
+
+ void dump(ceph::Formatter* f) const;
+ static void generate_test_instances(std::list<cls_user_account_resource_rm_op*>& ls);
+};
+WRITE_CLASS_ENCODER(cls_user_account_resource_rm_op)
+
+struct cls_user_account_resource_list_op {
+ std::string marker;
+ std::string path_prefix;
+ uint32_t max_entries = 0;
+
+ void encode(ceph::buffer::list& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(marker, bl);
+ encode(path_prefix, bl);
+ encode(max_entries, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(ceph::buffer::list::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(marker, bl);
+ decode(path_prefix, bl);
+ decode(max_entries, bl);
+ DECODE_FINISH(bl);
+ }
+
+ void dump(ceph::Formatter* f) const;
+ static void generate_test_instances(std::list<cls_user_account_resource_list_op*>& ls);
+};
+WRITE_CLASS_ENCODER(cls_user_account_resource_list_op)
+
+struct cls_user_account_resource_list_ret {
+ std::vector<cls_user_account_resource> entries;
+ bool truncated = false;
+ std::string marker;
+
+ void encode(ceph::buffer::list& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(entries, bl);
+ encode(truncated, bl);
+ encode(marker, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(ceph::buffer::list::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(entries, bl);
+ decode(truncated, bl);
+ decode(marker, bl);
+ DECODE_FINISH(bl);
+ }
+
+ void dump(ceph::Formatter* f) const;
+ static void generate_test_instances(std::list<cls_user_account_resource_list_ret*>& ls);
+};
+WRITE_CLASS_ENCODER(cls_user_account_resource_list_ret)
+
#endif
diff --git a/src/cls/user/cls_user_types.cc b/src/cls/user/cls_user_types.cc
index 0d823f0bea2..23f2044e963 100644
--- a/src/cls/user/cls_user_types.cc
+++ b/src/cls/user/cls_user_types.cc
@@ -109,3 +109,35 @@ void cls_user_header::generate_test_instances(list<cls_user_header*>& ls)
cls_user_gen_test_header(h);
ls.push_back(h);
}
+
+
+void cls_user_account_header::dump(ceph::Formatter* f) const
+{
+ encode_json("count", count, f);
+}
+
+void cls_user_account_header::generate_test_instances(std::list<cls_user_account_header*>& ls)
+{
+ ls.push_back(new cls_user_account_header);
+}
+
+void cls_user_account_resource::dump(ceph::Formatter* f) const
+{
+ encode_json("name", name, f);
+ encode_json("path", path, f);
+ // skip metadata
+}
+
+void cls_user_gen_test_resource(cls_user_account_resource& r)
+{
+ r.name = "name";
+ r.path = "path";
+}
+
+void cls_user_account_resource::generate_test_instances(std::list<cls_user_account_resource*>& ls)
+{
+ ls.push_back(new cls_user_account_resource);
+ auto p = new cls_user_account_resource;
+ cls_user_gen_test_resource(*p);
+ ls.push_back(p);
+}
diff --git a/src/cls/user/cls_user_types.h b/src/cls/user/cls_user_types.h
index a139449d3c3..8193ff4139a 100644
--- a/src/cls/user/cls_user_types.h
+++ b/src/cls/user/cls_user_types.h
@@ -216,9 +216,57 @@ struct cls_user_header {
};
WRITE_CLASS_ENCODER(cls_user_header)
+// omap header for an account index object
+struct cls_user_account_header {
+ uint32_t count = 0;
+
+ void encode(ceph::buffer::list& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(count, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(ceph::buffer::list::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(count, bl);
+ DECODE_FINISH(bl);
+ }
+ void dump(ceph::Formatter* f) const;
+ static void generate_test_instances(std::list<cls_user_account_header*>& ls);
+};
+WRITE_CLASS_ENCODER(cls_user_account_header)
+
+// account resource entry
+struct cls_user_account_resource {
+ // index by name for put/delete
+ std::string name;
+ // index by path for listing by PathPrefix
+ std::string path;
+ // additional opaque metadata depending on resource type
+ ceph::buffer::list metadata;
+
+ void encode(ceph::buffer::list& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(name, bl);
+ encode(path, bl);
+ encode(metadata, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(ceph::buffer::list::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(name, bl);
+ decode(path, bl);
+ decode(metadata, bl);
+ DECODE_FINISH(bl);
+ }
+ void dump(ceph::Formatter* f) const;
+ static void generate_test_instances(std::list<cls_user_account_resource*>& ls);
+};
+WRITE_CLASS_ENCODER(cls_user_account_resource)
+
void cls_user_gen_test_bucket(cls_user_bucket *bucket, int i);
void cls_user_gen_test_bucket_entry(cls_user_bucket_entry *entry, int i);
void cls_user_gen_test_stats(cls_user_stats *stats);
void cls_user_gen_test_header(cls_user_header *h);
+void cls_user_gen_test_resource(cls_user_account_resource& r);
#endif
diff --git a/src/common/dns_resolve.cc b/src/common/dns_resolve.cc
index a44510d6dea..435bcc657e4 100644
--- a/src/common/dns_resolve.cc
+++ b/src/common/dns_resolve.cc
@@ -56,6 +56,7 @@ DNSResolver::~DNSResolver()
#ifdef HAVE_RES_NQUERY
for (auto iter = states.begin(); iter != states.end(); ++iter) {
struct __res_state *s = *iter;
+ res_nclose(s);
delete s;
}
#endif
diff --git a/src/common/options/crimson.yaml.in b/src/common/options/crimson.yaml.in
index c52c54d5250..6938786d475 100644
--- a/src/common/options/crimson.yaml.in
+++ b/src/common/options/crimson.yaml.in
@@ -31,6 +31,11 @@ options:
desc: CPU cores on which alienstore threads will run in cpuset(7) format
flags:
- startup
+- name: crimson_osd_stat_interval
+ type: int
+ level: advanced
+ default: 0
+ desc: Report OSD status periodically in seconds, 0 to disable
- name: seastore_segment_size
type: size
desc: Segment size to use for SegmentManager
diff --git a/src/common/options/mon.yaml.in b/src/common/options/mon.yaml.in
index 2d5977331a9..075b335a08f 100644
--- a/src/common/options/mon.yaml.in
+++ b/src/common/options/mon.yaml.in
@@ -121,18 +121,6 @@ options:
flags:
- runtime
with_legacy: true
-- name: mon_cluster_log_to_syslog_level
- type: str
- level: advanced
- desc: Syslog level for cluster log messages
- default: info
- services:
- - mon
- see_also:
- - mon_cluster_log_to_syslog
- flags:
- - runtime
- with_legacy: true
- name: mon_cluster_log_to_syslog_facility
type: str
level: advanced
@@ -181,10 +169,12 @@ options:
flags:
- runtime
with_legacy: true
-- name: mon_cluster_log_file_level
+- name: mon_cluster_log_level
type: str
level: advanced
- desc: Lowest level to include is cluster log file
+ desc: Lowest level to include in cluster log file and/or in external log server
+ long_desc: Log level to control the cluster log message verbosity for the cluster
+ log file as well as for all external entities.
default: debug
services:
- mon
diff --git a/src/common/random_string.cc b/src/common/random_string.cc
index c728956182a..9ce8ded18a3 100644
--- a/src/common/random_string.cc
+++ b/src/common/random_string.cc
@@ -125,3 +125,19 @@ std::string gen_rand_alphanumeric_plain(CephContext *cct, size_t size)
str.pop_back(); // pop the extra \0
return str;
}
+
+void gen_rand_numeric(CephContext *cct, char *dest, size_t size) /* size should be the required string size + 1 */
+{
+ static constexpr char table[] = "0123456789";
+ choose_from(cct->random(), table, dest, size-1);
+ dest[size-1] = 0;
+}
+
+std::string gen_rand_numeric(CephContext *cct, size_t size)
+{
+ std::string str;
+ str.resize(size + 1);
+ gen_rand_numeric(cct, str.data(), str.size());
+ str.pop_back(); // pop the extra \0
+ return str;
+}
diff --git a/src/common/random_string.h b/src/common/random_string.h
index b5dd9825ebf..2516425a6b9 100644
--- a/src/common/random_string.h
+++ b/src/common/random_string.h
@@ -26,6 +26,7 @@ void gen_rand_alphanumeric_lower(CephContext *cct, char *dest, size_t size);
void gen_rand_alphanumeric_upper(CephContext *cct, char *dest, size_t size);
void gen_rand_alphanumeric_no_underscore(CephContext *cct, char *dest, size_t size);
void gen_rand_alphanumeric_plain(CephContext *cct, char *dest, size_t size);
+void gen_rand_numeric(CephContext *cct, char *dest, size_t size);
// returns a std::string with 'size' random characters
std::string gen_rand_alphanumeric(CephContext *cct, size_t size);
@@ -33,3 +34,4 @@ std::string gen_rand_alphanumeric_lower(CephContext *cct, size_t size);
std::string gen_rand_alphanumeric_upper(CephContext *cct, size_t size);
std::string gen_rand_alphanumeric_no_underscore(CephContext *cct, size_t size);
std::string gen_rand_alphanumeric_plain(CephContext *cct, size_t size);
+std::string gen_rand_numeric(CephContext *cct, size_t size);
diff --git a/src/crimson/common/operation.h b/src/crimson/common/operation.h
index bd3d79eec42..82d0d548442 100644
--- a/src/crimson/common/operation.h
+++ b/src/crimson/common/operation.h
@@ -528,18 +528,18 @@ public:
if (wait_fut.has_value()) {
return wait_fut.value().then([this, &stage, t=std::move(t)] () mutable {
auto fut = t.maybe_record_blocking(stage.enter(t), stage);
- exit();
return std::move(fut).then(
[this, t=std::move(t)](auto &&barrier_ref) mutable {
+ exit();
barrier = std::move(barrier_ref);
return seastar::now();
});
});
} else {
auto fut = t.maybe_record_blocking(stage.enter(t), stage);
- exit();
return std::move(fut).then(
[this, t=std::move(t)](auto &&barrier_ref) mutable {
+ exit();
barrier = std::move(barrier_ref);
return seastar::now();
});
diff --git a/src/crimson/common/utility.h b/src/crimson/common/utility.h
index 86b30815585..fae53cb6bd0 100644
--- a/src/crimson/common/utility.h
+++ b/src/crimson/common/utility.h
@@ -5,6 +5,8 @@
#include <type_traits>
+#include <seastar/core/metrics_api.hh>
+
namespace _impl {
template <class T> struct always_false : std::false_type {};
};
@@ -36,3 +38,16 @@ auto apply_method_to_tuple(Obj &obj, Method method, ArgTuple &&tuple) {
obj, method, std::forward<ArgTuple>(tuple),
std::make_index_sequence<tuple_size>());
}
+
+inline double get_reactor_utilization() {
+ auto &value_map = seastar::metrics::impl::get_value_map();
+ auto found = value_map.find("reactor_utilization");
+ assert(found != value_map.end());
+ auto &[full_name, metric_family] = *found;
+ std::ignore = full_name;
+ assert(metric_family.size() == 1);
+ const auto& [labels, metric] = *metric_family.begin();
+ std::ignore = labels;
+ auto value = (*metric)();
+ return value.d();
+}
diff --git a/src/crimson/osd/osd.cc b/src/crimson/osd/osd.cc
index 939fbc59beb..005e8538ed1 100644
--- a/src/crimson/osd/osd.cc
+++ b/src/crimson/osd/osd.cc
@@ -387,6 +387,26 @@ seastar::future<> OSD::start()
std::ref(osd_states));
});
}).then([this, FNAME] {
+ auto stats_seconds = local_conf().get_val<int64_t>("crimson_osd_stat_interval");
+ if (stats_seconds > 0) {
+ shard_stats.resize(seastar::smp::count);
+ stats_timer.set_callback([this, FNAME] {
+ std::ignore = shard_services.invoke_on_all(
+ [this](auto &local_service) {
+ auto stats = local_service.report_stats();
+ shard_stats[seastar::this_shard_id()] = stats;
+ }).then([this, FNAME] {
+ std::ostringstream oss;
+ for (const auto &stats : shard_stats) {
+ oss << int(stats.reactor_utilization);
+ oss << ",";
+ }
+ INFO("reactor_utilizations: {}", oss.str());
+ });
+ });
+ stats_timer.arm_periodic(std::chrono::seconds(stats_seconds));
+ }
+
heartbeat.reset(new Heartbeat{
whoami, get_shard_services(),
*monc, *hb_front_msgr, *hb_back_msgr});
@@ -1320,6 +1340,7 @@ seastar::future<> OSD::restart()
{
beacon_timer.cancel();
tick_timer.cancel();
+ stats_timer.cancel();
return pg_shard_manager.set_up_epoch(
0
).then([this] {
diff --git a/src/crimson/osd/osd.h b/src/crimson/osd/osd.h
index fa3b0293072..7b0a08fc3b9 100644
--- a/src/crimson/osd/osd.h
+++ b/src/crimson/osd/osd.h
@@ -128,6 +128,9 @@ class OSD final : public crimson::net::Dispatcher,
std::unique_ptr<Heartbeat> heartbeat;
seastar::timer<seastar::lowres_clock> tick_timer;
+ seastar::timer<seastar::lowres_clock> stats_timer;
+ std::vector<ShardServices::shard_stats_t> shard_stats;
+
const char** get_tracked_conf_keys() const final;
void handle_conf_change(const ConfigProxy& conf,
const std::set<std::string> &changed) final;
diff --git a/src/crimson/osd/shard_services.h b/src/crimson/osd/shard_services.h
index 57dff9d2ee3..e00f3441319 100644
--- a/src/crimson/osd/shard_services.h
+++ b/src/crimson/osd/shard_services.h
@@ -402,6 +402,13 @@ public:
return local_state.store;
}
+ struct shard_stats_t {
+ double reactor_utilization;
+ };
+ shard_stats_t report_stats() {
+ return {get_reactor_utilization()};
+ }
+
auto remove_pg(spg_t pgid) {
local_state.pg_map.remove_pg(pgid);
return pg_to_shard_mapping.remove_pg_mapping(pgid);
diff --git a/src/include/rbd/librbd.hpp b/src/include/rbd/librbd.hpp
index 5d307cdedf5..6d97d1087ad 100644
--- a/src/include/rbd/librbd.hpp
+++ b/src/include/rbd/librbd.hpp
@@ -532,6 +532,14 @@ public:
Image();
~Image();
+ // non-copyable
+ Image(const Image& rhs) = delete;
+ Image& operator=(const Image& rhs) = delete;
+
+ // moveable
+ Image(Image&& rhs) noexcept;
+ Image& operator=(Image&& rhs) noexcept;
+
int close();
int aio_close(RBD::AioCompletion *c);
@@ -854,9 +862,6 @@ public:
private:
friend class RBD;
- Image(const Image& rhs);
- const Image& operator=(const Image& rhs);
-
image_ctx_t ctx;
};
diff --git a/src/librbd/librbd.cc b/src/librbd/librbd.cc
index 132a0084a9f..8749a04d2d5 100644
--- a/src/librbd/librbd.cc
+++ b/src/librbd/librbd.cc
@@ -51,6 +51,7 @@
#include "librbd/io/ReadResult.h"
#include <algorithm>
#include <string>
+#include <utility>
#include <vector>
#ifdef WITH_LTTNG
@@ -1609,6 +1610,17 @@ namespace librbd {
close();
}
+ Image::Image(Image&& rhs) noexcept : ctx{std::exchange(rhs.ctx, nullptr)}
+ {
+ }
+
+ Image& Image::operator=(Image&& rhs) noexcept
+ {
+ Image tmp(std::move(rhs));
+ std::swap(ctx, tmp.ctx);
+ return *this;
+ }
+
int Image::close()
{
int r = 0;
diff --git a/src/mds/MDCache.cc b/src/mds/MDCache.cc
index 17e89e5863b..f65f81d217e 100644
--- a/src/mds/MDCache.cc
+++ b/src/mds/MDCache.cc
@@ -12639,6 +12639,9 @@ void MDCache::force_readonly()
mds->mdlog->flush();
}
+void MDCache::maybe_fragment(CDir *dir) {
+ mds->balancer->maybe_fragment(dir, false);
+}
// ==============================================================
// debug crap
diff --git a/src/mds/MDCache.h b/src/mds/MDCache.h
index d4aed9be55e..b7229e10b2c 100644
--- a/src/mds/MDCache.h
+++ b/src/mds/MDCache.h
@@ -275,6 +275,8 @@ class MDCache {
bool is_readonly() { return readonly; }
void force_readonly();
+ void maybe_fragment(CDir* dir);
+
static file_layout_t gen_default_file_layout(const MDSMap &mdsmap);
static file_layout_t gen_default_log_layout(const MDSMap &mdsmap);
diff --git a/src/mds/MDSAuthCaps.cc b/src/mds/MDSAuthCaps.cc
index e5d8c485401..5e4bd995175 100644
--- a/src/mds/MDSAuthCaps.cc
+++ b/src/mds/MDSAuthCaps.cc
@@ -382,25 +382,46 @@ bool MDSAuthCaps::parse(string_view str, ostream *err)
}
}
-bool MDSAuthCaps::merge(MDSAuthCaps newcap)
+/* Check if the "cap grant" is already present in this cap object. If it is,
+ * return false. If not, add it and return true.
+ *
+ * ng = new grant, new mds cap grant.
+ */
+bool MDSAuthCaps::merge_one_cap_grant(MDSCapGrant ng)
{
- ceph_assert(newcap.grants.size() == 1);
- auto ng = newcap.grants[0];
-
+ // check if "ng" is already present in this cap object.
for (auto& g : grants) {
if (g.match.fs_name == ng.match.fs_name && g.match.path == ng.match.path) {
- if (g.spec.get_caps() == ng.spec.get_caps()) {
- // no update required. maintaining idempotency.
+ if (g.spec.get_caps() == ng.spec.get_caps() &&
+ g.match.root_squash == ng.match.root_squash) {
+ // Since all components of MDS caps (fsname, path, perm/spec and
+ // root_squash) matched, it means cap same as "ng" is present in MDS
+ // cap grant list. No need to look further in MDS cap grant list.
+ // No update is required. Maintain idempotency.
return false;
- } else {
- // cap for given fs name is present, let's update it.
+ }
+
+ // fsname and path match but perm/spec is different. update the cap
+ // with new perm/spec.
+ if (g.spec.get_caps() != ng.spec.get_caps()) {
g.spec.set_caps(ng.spec.get_caps());
- return true;
}
+
+ // fsname and path match but value of root_squash is different. update
+ // its value.
+ if (g.match.root_squash != ng.match.root_squash) {
+ g.match.root_squash = ng.match.root_squash;
+ }
+
+ // Since fsname and path matched and either perm/spec or root_squash
+ // or both has been updated, cap from "ng" has been incorporated
+ // into this cap grant list. Time to return.
+ return true;
}
}
- // cap for given fs name and/or path is absent, let's add a new cap for it.
+ // Since a cap grant like "ng" is absent in this cap object's grant list,
+ // add "ng" to the cap grant list.
grants.push_back(MDSCapGrant(
MDSCapSpec(ng.spec.get_caps()),
MDSCapMatch(ng.match.fs_name, ng.match.path, ng.match.root_squash),
@@ -409,6 +430,29 @@ bool MDSAuthCaps::merge(MDSAuthCaps newcap)
return true;
}
+/* User can pass one or MDS caps that it wishes to add to entity's keyring.
+ * Merge all of these caps one by one. Return value indicates whether or not
+ * AuthMonitor must update the entity's keyring.
+ *
+ * If all caps do not merge (that is, underlying helper method returns false
+ * after attempting merge), no update is required. Return false so that
+ * AuthMonitor doesn't run the update procedure for caps.
+ *
+ * If even one cap is merged (that is, underlying method returns true even
+ * once), an update to the entity's keyring is required. Return true so that
+ * AuthMonitor runs the update procedure.
+ */
+bool MDSAuthCaps::merge(MDSAuthCaps newcaps)
+{
+ bool were_caps_merged = false;
+
+ for (auto& ng : newcaps.grants) {
+ were_caps_merged |= merge_one_cap_grant(ng);
+ }
+
+ return were_caps_merged;
+}
+
string MDSCapMatch::to_string()
{
string str = "";
diff --git a/src/mds/MDSAuthCaps.h b/src/mds/MDSAuthCaps.h
index c1d410eaf76..2b696085d5b 100644
--- a/src/mds/MDSAuthCaps.h
+++ b/src/mds/MDSAuthCaps.h
@@ -259,7 +259,8 @@ public:
void set_allow_all();
bool parse(std::string_view str, std::ostream *err);
- bool merge(MDSAuthCaps newcap);
+ bool merge_one_cap_grant(MDSCapGrant ng);
+ bool merge(MDSAuthCaps newcaps);
bool allow_all() const;
bool is_capable(std::string_view inode_path,
diff --git a/src/mds/ScrubStack.cc b/src/mds/ScrubStack.cc
index bc70939784b..28392f53366 100644
--- a/src/mds/ScrubStack.cc
+++ b/src/mds/ScrubStack.cc
@@ -470,6 +470,7 @@ void ScrubStack::scrub_dirfrag(CDir *dir, bool *done)
<< " log and `damage ls` output for details";
}
+ mdcache->maybe_fragment(dir);
dir->scrub_finished();
dir->auth_unpin(this);
diff --git a/src/mon/AuthMonitor.cc b/src/mon/AuthMonitor.cc
index 88f843f3e4d..b20eac8399e 100644
--- a/src/mon/AuthMonitor.cc
+++ b/src/mon/AuthMonitor.cc
@@ -1867,6 +1867,9 @@ AuthMonitor::caps_update AuthMonitor::_gen_wanted_caps(EntityAuth& e_auth,
map<string, string>& newcaps, ostream& out)
{
caps_update is_caps_update_reqd = CAPS_UPDATE_NOT_REQD;
+ caps_update is_caps_update_reqd_mon = CAPS_UPDATE_NOT_REQD;
+ caps_update is_caps_update_reqd_osd = CAPS_UPDATE_NOT_REQD;
+ caps_update is_caps_update_reqd_mds = CAPS_UPDATE_NOT_REQD;
if (e_auth.caps.empty()) {
return CAPS_UPDATE_REQD;
@@ -1888,15 +1891,29 @@ AuthMonitor::caps_update AuthMonitor::_gen_wanted_caps(EntityAuth& e_auth,
}
if (cap_entity == "mon") {
- is_caps_update_reqd = _merge_caps<MonCap>(cap_entity, new_cap_str,
+ is_caps_update_reqd_mon = _merge_caps<MonCap>(cap_entity, new_cap_str,
cur_cap_str, newcaps, out);
} else if (cap_entity == "osd") {
- is_caps_update_reqd = _merge_caps<OSDCap>(cap_entity, new_cap_str,
+ is_caps_update_reqd_osd = _merge_caps<OSDCap>(cap_entity, new_cap_str,
cur_cap_str, newcaps, out);
} else if (cap_entity == "mds") {
- is_caps_update_reqd = _merge_caps<MDSAuthCaps>(cap_entity, new_cap_str,
- cur_cap_str, newcaps, out);
- }
+ is_caps_update_reqd_mds = _merge_caps<MDSAuthCaps>(cap_entity,
+ new_cap_str, cur_cap_str, newcaps, out);
+ }
+ }
+
+ // if any one of MON, OSD or MDS caps failed to parse, it is pointless
+ // to run the update procedure.
+ if (is_caps_update_reqd_mon == CAPS_PARSING_ERR ||
+ is_caps_update_reqd_osd == CAPS_PARSING_ERR ||
+ is_caps_update_reqd_mds == CAPS_PARSING_ERR) {
+ is_caps_update_reqd = CAPS_PARSING_ERR;
+ // even if any one of MON, OSD or MDS caps needs an update, the update
+ // procedure needs to be executed.
+ } else if (is_caps_update_reqd_mon == CAPS_UPDATE_REQD ||
+ is_caps_update_reqd_osd == CAPS_UPDATE_REQD ||
+ is_caps_update_reqd_mds == CAPS_UPDATE_REQD) {
+ is_caps_update_reqd = CAPS_UPDATE_REQD;
}
return is_caps_update_reqd;
diff --git a/src/mon/LogMonitor.cc b/src/mon/LogMonitor.cc
index ad18a3aa1d1..654ee4c6910 100644
--- a/src/mon/LogMonitor.cc
+++ b/src/mon/LogMonitor.cc
@@ -208,11 +208,10 @@ ceph::logging::JournaldClusterLogger &LogMonitor::log_channel_info::get_journald
void LogMonitor::log_channel_info::clear()
{
log_to_syslog.clear();
- syslog_level.clear();
syslog_facility.clear();
log_file.clear();
expanded_log_file.clear();
- log_file_level.clear();
+ log_level.clear();
log_to_graylog.clear();
log_to_graylog_host.clear();
log_to_graylog_port.clear();
@@ -356,16 +355,25 @@ void LogMonitor::log_external(const LogEntry& le)
channel = CLOG_CHANNEL_CLUSTER;
}
+ string level = channels.get_log_level(channel);
+ if (int log_level = LogEntry::str_to_level(level);log_level > le.prio) {
+ // Do not log LogEntry to any external entity if le.prio is
+ // less than channel log level.
+ return;
+ }
+
+ if (g_conf().get_val<bool>("mon_cluster_log_to_stderr")) {
+ cerr << channel << " " << le << std::endl;
+ }
+
if (channels.do_log_to_syslog(channel)) {
- string level = channels.get_level(channel);
string facility = channels.get_facility(channel);
if (level.empty() || facility.empty()) {
derr << __func__ << " unable to log to syslog -- level or facility"
<< " not defined (level: " << level << ", facility: "
<< facility << ")" << dendl;
} else {
- le.log_to_syslog(channels.get_level(channel),
- channels.get_facility(channel));
+ le.log_to_syslog(level, facility);
}
}
@@ -1192,16 +1200,6 @@ void LogMonitor::update_log_channels()
}
r = get_conf_str_map_helper(
- g_conf().get_val<string>("mon_cluster_log_to_syslog_level"),
- oss, &channels.syslog_level,
- CLOG_CONFIG_DEFAULT_KEY);
- if (r < 0) {
- derr << __func__ << " error parsing 'mon_cluster_log_to_syslog_level'"
- << dendl;
- return;
- }
-
- r = get_conf_str_map_helper(
g_conf().get_val<string>("mon_cluster_log_to_syslog_facility"),
oss, &channels.syslog_facility,
CLOG_CONFIG_DEFAULT_KEY);
@@ -1221,11 +1219,11 @@ void LogMonitor::update_log_channels()
}
r = get_conf_str_map_helper(
- g_conf().get_val<string>("mon_cluster_log_file_level"), oss,
- &channels.log_file_level,
+ g_conf().get_val<string>("mon_cluster_log_level"), oss,
+ &channels.log_level,
CLOG_CONFIG_DEFAULT_KEY);
if (r < 0) {
- derr << __func__ << " error parsing 'mon_cluster_log_file_level'"
+ derr << __func__ << " error parsing 'mon_cluster_log_level'"
<< dendl;
return;
}
@@ -1279,10 +1277,9 @@ void LogMonitor::handle_conf_change(const ConfigProxy& conf,
const std::set<std::string> &changed)
{
if (changed.count("mon_cluster_log_to_syslog") ||
- changed.count("mon_cluster_log_to_syslog_level") ||
changed.count("mon_cluster_log_to_syslog_facility") ||
changed.count("mon_cluster_log_file") ||
- changed.count("mon_cluster_log_file_level") ||
+ changed.count("mon_cluster_log_level") ||
changed.count("mon_cluster_log_to_graylog") ||
changed.count("mon_cluster_log_to_graylog_host") ||
changed.count("mon_cluster_log_to_graylog_port") ||
diff --git a/src/mon/LogMonitor.h b/src/mon/LogMonitor.h
index 1eccaa5c203..e9858523a73 100644
--- a/src/mon/LogMonitor.h
+++ b/src/mon/LogMonitor.h
@@ -57,11 +57,10 @@ private:
struct log_channel_info {
std::map<std::string,std::string> log_to_syslog;
- std::map<std::string,std::string> syslog_level;
std::map<std::string,std::string> syslog_facility;
std::map<std::string,std::string> log_file;
std::map<std::string,std::string> expanded_log_file;
- std::map<std::string,std::string> log_file_level;
+ std::map<std::string,std::string> log_level;
std::map<std::string,std::string> log_to_graylog;
std::map<std::string,std::string> log_to_graylog_host;
std::map<std::string,std::string> log_to_graylog_port;
@@ -84,9 +83,8 @@ private:
*/
void expand_channel_meta() {
expand_channel_meta(log_to_syslog);
- expand_channel_meta(syslog_level);
expand_channel_meta(syslog_facility);
- expand_channel_meta(log_file_level);
+ expand_channel_meta(log_level);
}
void expand_channel_meta(std::map<std::string,std::string> &m);
std::string expand_channel_meta(const std::string &input,
@@ -99,15 +97,10 @@ private:
&CLOG_CONFIG_DEFAULT_KEY);
}
- std::string get_level(const std::string &channel) {
- return get_str_map_key(syslog_level, channel,
- &CLOG_CONFIG_DEFAULT_KEY);
- }
-
std::string get_log_file(const std::string &channel);
- std::string get_log_file_level(const std::string &channel) {
- return get_str_map_key(log_file_level, channel,
+ std::string get_log_level(const std::string &channel) {
+ return get_str_map_key(log_level, channel,
&CLOG_CONFIG_DEFAULT_KEY);
}
@@ -192,10 +185,9 @@ private:
const char **get_tracked_conf_keys() const override {
static const char* KEYS[] = {
"mon_cluster_log_to_syslog",
- "mon_cluster_log_to_syslog_level",
"mon_cluster_log_to_syslog_facility",
"mon_cluster_log_file",
- "mon_cluster_log_file_level",
+ "mon_cluster_log_level",
"mon_cluster_log_to_graylog",
"mon_cluster_log_to_graylog_host",
"mon_cluster_log_to_graylog_port",
diff --git a/src/os/bluestore/bluestore_types.h b/src/os/bluestore/bluestore_types.h
index 500a78a143f..7032ae904e9 100644
--- a/src/os/bluestore/bluestore_types.h
+++ b/src/os/bluestore/bluestore_types.h
@@ -1382,7 +1382,7 @@ struct sb_info_space_efficient_map_t {
[](const sb_info_t& a, const uint64_t& b) {
return a < b;
});
- if (it->get_sbid() == id) {
+ if (it != aux_items.end() && it->get_sbid() == id) {
return it;
}
}
diff --git a/src/osd/scrubber/osd_scrub.cc b/src/osd/scrubber/osd_scrub.cc
index d2d2db3ff72..48f978b18e3 100644
--- a/src/osd/scrubber/osd_scrub.cc
+++ b/src/osd/scrubber/osd_scrub.cc
@@ -266,10 +266,10 @@ OsdScrub::LoadTracker::LoadTracker(
///\todo replace with Knuth's algo (to reduce the numerical error)
std::optional<double> OsdScrub::LoadTracker::update_load_average()
{
- int hb_interval = conf->osd_heartbeat_interval;
+ auto hb_interval = conf->osd_heartbeat_interval;
int n_samples = std::chrono::duration_cast<seconds>(24h).count();
if (hb_interval > 1) {
- n_samples = std::max(n_samples / hb_interval, 1);
+ n_samples = std::max(n_samples / hb_interval, 1L);
}
double loadavg;
diff --git a/src/osd/scrubber/pg_scrubber.cc b/src/osd/scrubber/pg_scrubber.cc
index 9fe7295201d..e1604222c2c 100644
--- a/src/osd/scrubber/pg_scrubber.cc
+++ b/src/osd/scrubber/pg_scrubber.cc
@@ -1504,7 +1504,7 @@ void PgScrubber::apply_snap_mapper_fixes(
void PgScrubber::maps_compare_n_cleanup()
{
- m_pg->add_objects_scrubbed_count(m_be->get_primary_scrubmap().objects.size());
+ m_pg->add_objects_scrubbed_count(std::ssize(m_be->get_primary_scrubmap().objects));
auto required_fixes =
m_be->scrub_compare_maps(m_end.is_max(), get_snap_mapper_accessor());
diff --git a/src/osd/scrubber/scrub_machine.cc b/src/osd/scrubber/scrub_machine.cc
index 66ba0751d0e..acdddbd18eb 100644
--- a/src/osd/scrubber/scrub_machine.cc
+++ b/src/osd/scrubber/scrub_machine.cc
@@ -214,6 +214,7 @@ sc::result Session::react(const IntervalChanged&)
DECLARE_LOCALS; // 'scrbr' & 'pg_id' aliases
dout(10) << "Session::react(const IntervalChanged&)" << dendl;
+ ceph_assert(m_reservations);
m_reservations->discard_remote_reservations();
return transit<NotActive>();
}
@@ -267,7 +268,9 @@ sc::result ReservingReplicas::react(const ReplicaGrant& ev)
dout(10) << "ReservingReplicas::react(const ReplicaGrant&)" << dendl;
const auto& m = ev.m_op->get_req<MOSDScrubReserve>();
- if (context<Session>().m_reservations->handle_reserve_grant(*m, ev.m_from)) {
+ auto& session = context<Session>();
+ ceph_assert(session.m_reservations);
+ if (session.m_reservations->handle_reserve_grant(*m, ev.m_from)) {
// we are done with the reservation process
return transit<ActiveScrubbing>();
}
@@ -279,6 +282,7 @@ sc::result ReservingReplicas::react(const ReplicaReject& ev)
DECLARE_LOCALS; // 'scrbr' & 'pg_id' aliases
auto& session = context<Session>();
dout(10) << "ReservingReplicas::react(const ReplicaReject&)" << dendl;
+ ceph_assert(session.m_reservations);
const auto m = ev.m_op->get_req<MOSDScrubReserve>();
// Verify that the message is from the replica we were expecting a reply from,
@@ -306,6 +310,8 @@ sc::result ReservingReplicas::react(const ReservationTimeout&)
DECLARE_LOCALS; // 'scrbr' & 'pg_id' aliases
auto& session = context<Session>();
dout(10) << "ReservingReplicas::react(const ReservationTimeout&)" << dendl;
+ ceph_assert(session.m_reservations);
+
session.m_reservations->log_failure_and_duration(scrbcnt_resrv_timed_out);
const auto msg = fmt::format(
diff --git a/src/osd/scrubber/scrub_reservations.cc b/src/osd/scrubber/scrub_reservations.cc
index b9cc2d096ad..ec9ac598ea5 100644
--- a/src/osd/scrubber/scrub_reservations.cc
+++ b/src/osd/scrubber/scrub_reservations.cc
@@ -94,11 +94,12 @@ void ReplicaReservations::discard_remote_reservations()
void ReplicaReservations::log_success_and_duration()
{
+ ceph_assert(m_process_started_at.has_value());
auto logged_duration = ScrubClock::now() - m_process_started_at.value();
m_perf_set.tinc(scrbcnt_resrv_successful_elapsed, logged_duration);
m_perf_set.inc(scrbcnt_resrv_success);
m_osds->logger->hinc(
- l_osd_scrub_reservation_dur_hist, m_sorted_secondaries.size(),
+ l_osd_scrub_reservation_dur_hist, std::ssize(m_sorted_secondaries),
logged_duration.count());
m_process_started_at.reset();
}
diff --git a/src/pybind/mgr/cephadm/templates/services/prometheus/prometheus.yml.j2 b/src/pybind/mgr/cephadm/templates/services/prometheus/prometheus.yml.j2
index 931913668ae..faccc8f6de2 100644
--- a/src/pybind/mgr/cephadm/templates/services/prometheus/prometheus.yml.j2
+++ b/src/pybind/mgr/cephadm/templates/services/prometheus/prometheus.yml.j2
@@ -41,6 +41,10 @@ scrape_configs:
tls_config:
ca_file: mgr_prometheus_cert.pem
honor_labels: true
+ relabel_configs:
+ - source_labels: [instance]
+ target_label: instance
+ replacement: 'ceph_cluster'
http_sd_configs:
- url: {{ mgr_prometheus_sd_url }}
basic_auth:
@@ -54,6 +58,9 @@ scrape_configs:
- source_labels: [__address__]
target_label: cluster
replacement: {{ cluster_fsid }}
+ - source_labels: [instance]
+ target_label: instance
+ replacement: 'ceph_cluster'
http_sd_configs:
- url: {{ mgr_prometheus_sd_url }}
{% endif %}
diff --git a/src/pybind/mgr/cephadm/tests/test_services.py b/src/pybind/mgr/cephadm/tests/test_services.py
index b62cc68a6a5..325c8197b18 100644
--- a/src/pybind/mgr/cephadm/tests/test_services.py
+++ b/src/pybind/mgr/cephadm/tests/test_services.py
@@ -744,6 +744,9 @@ class TestMonitoring:
- source_labels: [__address__]
target_label: cluster
replacement: fsid
+ - source_labels: [instance]
+ target_label: instance
+ replacement: 'ceph_cluster'
http_sd_configs:
- url: http://[::1]:8765/sd/prometheus/sd-config?service=mgr-prometheus
@@ -904,6 +907,10 @@ class TestMonitoring:
tls_config:
ca_file: mgr_prometheus_cert.pem
honor_labels: true
+ relabel_configs:
+ - source_labels: [instance]
+ target_label: instance
+ replacement: 'ceph_cluster'
http_sd_configs:
- url: https://[::1]:8765/sd/prometheus/sd-config?service=mgr-prometheus
basic_auth:
diff --git a/src/pybind/mgr/dashboard/controllers/multi_cluster.py b/src/pybind/mgr/dashboard/controllers/multi_cluster.py
index de29496e5e7..75095dcbd46 100644
--- a/src/pybind/mgr/dashboard/controllers/multi_cluster.py
+++ b/src/pybind/mgr/dashboard/controllers/multi_cluster.py
@@ -2,13 +2,16 @@
import base64
import json
+import re
import time
+from urllib.parse import urlparse
import requests
from .. import mgr
from ..exceptions import DashboardException
from ..security import Scope
+from ..services.orchestrator import OrchClient
from ..settings import Settings
from ..tools import configure_cors
from . import APIDoc, APIRouter, CreatePermission, DeletePermission, Endpoint, \
@@ -69,9 +72,11 @@ class MultiCluster(RESTController):
cluster_token = self.check_cluster_connection(url, payload, username,
ssl_verify, ssl_certificate)
+ cors_endpoints_string = self.get_cors_endpoints_string(hub_url)
+
self._proxy('PUT', url, 'ui-api/multi-cluster/set_cors_endpoint',
- payload={'url': hub_url}, token=cluster_token, verify=ssl_verify,
- cert=ssl_certificate)
+ payload={'url': cors_endpoints_string}, token=cluster_token,
+ verify=ssl_verify, cert=ssl_certificate)
fsid = self._proxy('GET', url, 'api/health/get_cluster_fsid', token=cluster_token)
@@ -89,18 +94,38 @@ class MultiCluster(RESTController):
verify=ssl_verify, cert=ssl_certificate)
# add prometheus targets
- prometheus_url = self._proxy('GET', url, 'api/settings/PROMETHEUS_API_HOST',
+ prometheus_url = self._proxy('GET', url, 'api/multi-cluster/get_prometheus_api_url',
token=cluster_token)
- _set_prometheus_targets(prometheus_url['value'])
+ _set_prometheus_targets(prometheus_url)
self.set_multi_cluster_config(fsid, username, url, cluster_alias,
- cluster_token, prometheus_url['value'],
+ cluster_token, prometheus_url,
ssl_verify, ssl_certificate)
return True
return False
+ def get_cors_endpoints_string(self, hub_url):
+ parsed_url = urlparse(hub_url)
+ hostname = parsed_url.hostname
+ cors_endpoints_set = set()
+ cors_endpoints_set.add(hub_url)
+
+ orch = OrchClient.instance()
+ inventory_hosts = [host.to_json() for host in orch.hosts.list()]
+
+ for host in inventory_hosts:
+ host_addr = host['addr']
+ host_ip_url = hub_url.replace(hostname, host_addr)
+ host_hostname_url = hub_url.replace(hostname, host['hostname'])
+
+ cors_endpoints_set.add(host_ip_url)
+ cors_endpoints_set.add(host_hostname_url)
+
+ cors_endpoints_string = ", ".join(cors_endpoints_set)
+ return cors_endpoints_string
+
def check_cluster_connection(self, url, payload, username, ssl_verify, ssl_certificate):
try:
content = self._proxy('POST', url, 'api/auth', payload=payload,
@@ -320,6 +345,25 @@ class MultiCluster(RESTController):
clusters_token_map = json.loads(clustersTokenMap)
return self.check_token_status_array(clusters_token_map)
+ @Endpoint()
+ @ReadPermission
+ def get_prometheus_api_url(self):
+ prometheus_url = Settings.PROMETHEUS_API_HOST
+ if prometheus_url is not None:
+ # check if is url is already in IP format
+ pattern = r'^(?:https?|http):\/\/(?:\d{1,3}\.){3}\d{1,3}:\d+$'
+ valid_ip_url = bool(re.match(pattern, prometheus_url))
+ if not valid_ip_url:
+ parsed_url = urlparse(prometheus_url)
+ hostname = parsed_url.hostname
+ orch = OrchClient.instance()
+ inventory_hosts = [host.to_json() for host in orch.hosts.list()]
+ for host in inventory_hosts:
+ if host['hostname'] == hostname or host['hostname'] in hostname:
+ node_ip = host['addr']
+ prometheus_url = prometheus_url.replace(hostname, node_ip)
+ return prometheus_url
+
@UIRouter('/multi-cluster', Scope.CONFIG_OPT)
class MultiClusterUi(RESTController):
diff --git a/src/pybind/mgr/dashboard/controllers/rgw.py b/src/pybind/mgr/dashboard/controllers/rgw.py
index 4df642a9c73..4455fbee669 100644
--- a/src/pybind/mgr/dashboard/controllers/rgw.py
+++ b/src/pybind/mgr/dashboard/controllers/rgw.py
@@ -858,9 +858,12 @@ edit_role_form = Form(path='/edit',
"MaxSessionDuration": {'cellTemplate': 'duration'},
"RoleId": {'isHidden': True},
"AssumeRolePolicyDocument": {'isHidden': True},
- "PermissionPolicies": {'isHidden': True}
+ "PermissionPolicies": {'isHidden': True},
+ "Description": {'isHidden': True},
+ "AccountId": {'isHidden': True}
},
- detail_columns=['RoleId', 'AssumeRolePolicyDocument', 'PermissionPolicies'],
+ detail_columns=['RoleId', 'Description',
+ 'AssumeRolePolicyDocument', 'PermissionPolicies', 'AccountId'],
meta=CRUDMeta()
)
class RgwUserRole(NamedTuple):
@@ -872,6 +875,8 @@ class RgwUserRole(NamedTuple):
MaxSessionDuration: int
AssumeRolePolicyDocument: str
PermissionPolicies: List
+ Description: str
+ AccountId: str
@APIRouter('/rgw/realm', Scope.RGW)
diff --git a/src/pybind/mgr/dashboard/frontend/cypress/e2e/pools/pools.po.ts b/src/pybind/mgr/dashboard/frontend/cypress/e2e/pools/pools.po.ts
index 7cca96aa8f4..af46355ff1c 100644
--- a/src/pybind/mgr/dashboard/frontend/cypress/e2e/pools/pools.po.ts
+++ b/src/pybind/mgr/dashboard/frontend/cypress/e2e/pools/pools.po.ts
@@ -44,7 +44,6 @@ export class PoolPageHelper extends PageHelper {
edit_pool_configuration(name: string, bpsLimit: string) {
this.navigateEdit(name);
- cy.get('.collapsible').click();
cy.get('cd-rbd-configuration-form')
.get('input[name=rbd_qos_bps_limit]')
.clear()
@@ -53,7 +52,6 @@ export class PoolPageHelper extends PageHelper {
this.navigateEdit(name);
- cy.get('.collapsible').click();
cy.get('cd-rbd-configuration-form')
.get('input[name=rbd_qos_bps_limit]')
.should('have.value', bpsLimit);
diff --git a/src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-configuration-form/rbd-configuration-form.component.ts b/src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-configuration-form/rbd-configuration-form.component.ts
index 7b5fe992f28..27408909b12 100644
--- a/src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-configuration-form/rbd-configuration-form.component.ts
+++ b/src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-configuration-form/rbd-configuration-form.component.ts
@@ -69,7 +69,7 @@ export class RbdConfigurationFormComponent implements OnInit {
this.rbdConfigurationService
.getWritableSections()
- .forEach((section) => (this.sectionVisibility[section.class] = false));
+ .forEach((section) => (this.sectionVisibility[section.class] = true));
}
getDirtyValues(includeLocalValues = false, localFieldType?: RbdConfigurationSourceField) {
diff --git a/src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-form/rbd-form.component.html b/src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-form/rbd-form.component.html
index 690eb9c1282..af6cd396365 100644
--- a/src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-form/rbd-form.component.html
+++ b/src/pybind/mgr/dashboard/frontend/src/app/ceph/block/rbd-form/rbd-form.component.html
@@ -219,29 +219,6 @@
</div>
</div>
- <!-- Features -->
- <div class="form-group row"
- formGroupName="features">
- <label i18n
- class="cd-col-form-label"
- for="features">Features</label>
- <div class="cd-col-form-input">
- <div class="custom-control custom-checkbox"
- *ngFor="let feature of featuresList">
- <input type="checkbox"
- class="custom-control-input"
- id="{{ feature.key }}"
- name="{{ feature.key }}"
- formControlName="{{ feature.key }}">
- <label class="custom-control-label"
- for="{{ feature.key }}">{{ feature.desc }}</label>
- <cd-helper *ngIf="feature.helperHtml"
- html="{{ feature.helperHtml }}">
- </cd-helper>
- </div>
- </div>
- </div>
-
<!-- Mirroring -->
<div class="form-group row">
<div class="cd-col-form-offset">
@@ -300,9 +277,31 @@
<!-- Advanced -->
<cd-form-advanced-fieldset>
+ <!-- Features -->
+ <div class="form-group row"
+ formGroupName="features">
+ <label i18n
+ class="cd-col-form-label"
+ for="features">Features</label>
+ <div class="cd-col-form-input">
+ <div class="custom-control custom-checkbox"
+ *ngFor="let feature of featuresList">
+ <input type="checkbox"
+ class="custom-control-input"
+ id="{{ feature.key }}"
+ name="{{ feature.key }}"
+ formControlName="{{ feature.key }}">
+ <label class="custom-control-label"
+ for="{{ feature.key }}">{{ feature.desc }}</label>
+ <cd-helper *ngIf="feature.helperHtml"
+ html="{{ feature.helperHtml }}">
+ </cd-helper>
+ </div>
+ </div>
+ </div>
+
<h4 class="cd-header"
i18n>Striping</h4>
-
<!-- Object Size -->
<div class="form-group row">
<label i18n
diff --git a/src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-snapshotschedule-form/cephfs-snapshotschedule-form.component.spec.ts b/src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-snapshotschedule-form/cephfs-snapshotschedule-form.component.spec.ts
index 6a9fbcb942a..dedbd3bafc1 100644
--- a/src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-snapshotschedule-form/cephfs-snapshotschedule-form.component.spec.ts
+++ b/src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-snapshotschedule-form/cephfs-snapshotschedule-form.component.spec.ts
@@ -1,5 +1,11 @@
import { HttpClientTestingModule } from '@angular/common/http/testing';
-import { ComponentFixture, TestBed } from '@angular/core/testing';
+import {
+ ComponentFixture,
+ TestBed,
+ discardPeriodicTasks,
+ fakeAsync,
+ tick
+} from '@angular/core/testing';
import { CephfsSnapshotscheduleFormComponent } from './cephfs-snapshotschedule-form.component';
import {
@@ -13,12 +19,12 @@ import { RouterTestingModule } from '@angular/router/testing';
import { ReactiveFormsModule } from '@angular/forms';
import { FormHelper, configureTestBed } from '~/testing/unit-test-helper';
import { CephfsSnapshotScheduleService } from '~/app/shared/api/cephfs-snapshot-schedule.service';
+import { of } from 'rxjs';
describe('CephfsSnapshotscheduleFormComponent', () => {
let component: CephfsSnapshotscheduleFormComponent;
let fixture: ComponentFixture<CephfsSnapshotscheduleFormComponent>;
let formHelper: FormHelper;
- let createSpy: jasmine.Spy;
configureTestBed({
declarations: [CephfsSnapshotscheduleFormComponent],
@@ -40,7 +46,6 @@ describe('CephfsSnapshotscheduleFormComponent', () => {
component.fsName = 'test_fs';
component.ngOnInit();
formHelper = new FormHelper(component.snapScheduleForm);
- createSpy = spyOn(TestBed.inject(CephfsSnapshotScheduleService), 'create').and.stub();
fixture.detectChanges();
});
@@ -53,7 +58,12 @@ describe('CephfsSnapshotscheduleFormComponent', () => {
expect(nativeEl.querySelector('cd-modal')).not.toBe(null);
});
- it('should submit the form', () => {
+ it('should submit the form', fakeAsync(() => {
+ const createSpy = spyOn(TestBed.inject(CephfsSnapshotScheduleService), 'create').and.stub();
+ const checkScheduleExistsSpy = spyOn(
+ TestBed.inject(CephfsSnapshotScheduleService),
+ 'checkScheduleExists'
+ ).and.returnValue(of(false));
const input = {
directory: '/test',
startDate: {
@@ -73,7 +83,10 @@ describe('CephfsSnapshotscheduleFormComponent', () => {
formHelper.setMultipleValues(input);
component.snapScheduleForm.get('directory').setValue('/test');
component.submit();
+ tick(400);
+ expect(checkScheduleExistsSpy).toHaveBeenCalled();
expect(createSpy).toHaveBeenCalled();
- });
+ discardPeriodicTasks();
+ }));
});
diff --git a/src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-snapshotschedule-form/cephfs-snapshotschedule-form.component.ts b/src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-snapshotschedule-form/cephfs-snapshotschedule-form.component.ts
index a34226c563b..61743caa728 100644
--- a/src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-snapshotschedule-form/cephfs-snapshotschedule-form.component.ts
+++ b/src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-snapshotschedule-form/cephfs-snapshotschedule-form.component.ts
@@ -115,6 +115,10 @@ export class CephfsSnapshotscheduleFormComponent extends CdForm implements OnIni
this.subvolume = value?.split?.('/')?.[3];
}),
filter(() => !!this.subvolume && !!this.subvolumeGroup),
+ tap(() => {
+ this.isSubvolume = !!this.subvolume && !!this.subvolumeGroup;
+ this.snapScheduleForm.get('repeatFrequency').setErrors(null);
+ }),
mergeMap(() =>
this.subvolumeService
.exists(
@@ -282,84 +286,92 @@ export class CephfsSnapshotscheduleFormComponent extends CdForm implements OnIni
}
submit() {
- if (this.snapScheduleForm.invalid) {
- this.snapScheduleForm.setErrors({ cdSubmitButton: true });
- return;
- }
-
- const values = this.snapScheduleForm.value as SnapshotScheduleFormValue;
-
- if (this.isEdit) {
- const retentionPoliciesToAdd = (this.snapScheduleForm.get(
- 'retentionPolicies'
- ) as FormArray).controls
- ?.filter(
- (ctrl) =>
- !ctrl.get('retentionInterval').disabled && !ctrl.get('retentionFrequency').disabled
- )
- .map((ctrl) => ({
- retentionInterval: ctrl.get('retentionInterval').value,
- retentionFrequency: ctrl.get('retentionFrequency').value
- }));
-
- const updateObj = {
- fs: this.fsName,
- path: this.path,
- subvol: this.subvol,
- group: this.group,
- retention_to_add: this.parseRetentionPolicies(retentionPoliciesToAdd) || null,
- retention_to_remove: this.parseRetentionPolicies(this.retentionPoliciesToRemove) || null
- };
-
- this.taskWrapper
- .wrapTaskAroundCall({
- task: new FinishedTask('cephfs/snapshot/schedule/' + URLVerbs.EDIT, {
- path: this.path
- }),
- call: this.snapScheduleService.update(updateObj)
- })
- .subscribe({
- error: () => {
- this.snapScheduleForm.setErrors({ cdSubmitButton: true });
- },
- complete: () => {
- this.activeModal.close();
+ this.validateSchedule()(this.snapScheduleForm).subscribe({
+ next: () => {
+ if (this.snapScheduleForm.invalid) {
+ this.snapScheduleForm.setErrors({ cdSubmitButton: true });
+ return;
+ }
+
+ const values = this.snapScheduleForm.value as SnapshotScheduleFormValue;
+
+ if (this.isEdit) {
+ const retentionPoliciesToAdd = (this.snapScheduleForm.get(
+ 'retentionPolicies'
+ ) as FormArray).controls
+ ?.filter(
+ (ctrl) =>
+ !ctrl.get('retentionInterval').disabled && !ctrl.get('retentionFrequency').disabled
+ )
+ .map((ctrl) => ({
+ retentionInterval: ctrl.get('retentionInterval').value,
+ retentionFrequency: ctrl.get('retentionFrequency').value
+ }));
+
+ const updateObj = {
+ fs: this.fsName,
+ path: this.path,
+ subvol: this.subvol,
+ group: this.group,
+ retention_to_add: this.parseRetentionPolicies(retentionPoliciesToAdd) || null,
+ retention_to_remove: this.parseRetentionPolicies(this.retentionPoliciesToRemove) || null
+ };
+
+ this.taskWrapper
+ .wrapTaskAroundCall({
+ task: new FinishedTask('cephfs/snapshot/schedule/' + URLVerbs.EDIT, {
+ path: this.path
+ }),
+ call: this.snapScheduleService.update(updateObj)
+ })
+ .subscribe({
+ error: () => {
+ this.snapScheduleForm.setErrors({ cdSubmitButton: true });
+ },
+ complete: () => {
+ this.activeModal.close();
+ }
+ });
+ } else {
+ const snapScheduleObj = {
+ fs: this.fsName,
+ path: values.directory,
+ snap_schedule: this.parseSchedule(values?.repeatInterval, values?.repeatFrequency),
+ start: this.parseDatetime(values?.startDate, values?.startTime)
+ };
+
+ const retentionPoliciesValues = this.parseRetentionPolicies(values?.retentionPolicies);
+
+ if (retentionPoliciesValues) {
+ snapScheduleObj['retention_policy'] = retentionPoliciesValues;
}
- });
- } else {
- const snapScheduleObj = {
- fs: this.fsName,
- path: values.directory,
- snap_schedule: this.parseSchedule(values?.repeatInterval, values?.repeatFrequency),
- start: this.parseDatetime(values?.startDate, values?.startTime)
- };
- const retentionPoliciesValues = this.parseRetentionPolicies(values?.retentionPolicies);
-
- if (retentionPoliciesValues) snapScheduleObj['retention_policy'] = retentionPoliciesValues;
+ if (this.isSubvolume) {
+ snapScheduleObj['subvol'] = this.subvolume;
+ }
- if (this.isSubvolume) snapScheduleObj['subvol'] = this.subvolume;
+ if (this.isSubvolume && !this.isDefaultSubvolumeGroup) {
+ snapScheduleObj['group'] = this.subvolumeGroup;
+ }
- if (this.isSubvolume && !this.isDefaultSubvolumeGroup) {
- snapScheduleObj['group'] = this.subvolumeGroup;
+ this.taskWrapper
+ .wrapTaskAroundCall({
+ task: new FinishedTask('cephfs/snapshot/schedule/' + URLVerbs.CREATE, {
+ path: snapScheduleObj.path
+ }),
+ call: this.snapScheduleService.create(snapScheduleObj)
+ })
+ .subscribe({
+ error: () => {
+ this.snapScheduleForm.setErrors({ cdSubmitButton: true });
+ },
+ complete: () => {
+ this.activeModal.close();
+ }
+ });
+ }
}
-
- this.taskWrapper
- .wrapTaskAroundCall({
- task: new FinishedTask('cephfs/snapshot/schedule/' + URLVerbs.CREATE, {
- path: snapScheduleObj.path
- }),
- call: this.snapScheduleService.create(snapScheduleObj)
- })
- .subscribe({
- error: () => {
- this.snapScheduleForm.setErrors({ cdSubmitButton: true });
- },
- complete: () => {
- this.activeModal.close();
- }
- });
- }
+ });
}
validateSchedule() {
@@ -379,11 +391,13 @@ export class CephfsSnapshotscheduleFormComponent extends CdForm implements OnIni
directory?.value,
this.fsName,
repeatInterval?.value,
- repeatFrequency?.value
+ repeatFrequency?.value,
+ this.isSubvolume
)
.pipe(
map((exists: boolean) => {
if (exists) {
+ repeatFrequency?.markAsDirty();
repeatFrequency?.setErrors({ notUnique: true }, { emitEvent: true });
} else {
repeatFrequency?.setErrors(null);
diff --git a/src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-snapshotschedule-list/cephfs-snapshotschedule-list.component.ts b/src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-snapshotschedule-list/cephfs-snapshotschedule-list.component.ts
index 9a131a1e80b..53c8e924e82 100644
--- a/src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-snapshotschedule-list/cephfs-snapshotschedule-list.component.ts
+++ b/src/pybind/mgr/dashboard/frontend/src/app/ceph/cephfs/cephfs-snapshotschedule-list/cephfs-snapshotschedule-list.component.ts
@@ -300,8 +300,7 @@ export class CephfsSnapshotscheduleListComponent
const interval = r.substring(0, r.length - 1);
return `${interval}-${frequency}`;
})
- ?.join('|')
- ?.toLocaleLowerCase();
+ ?.join('|');
this.modalRef = this.modalService.show(CriticalConfirmationModalComponent, {
itemDescription: $localize`snapshot schedule`,
diff --git a/src/pybind/mgr/dashboard/frontend/src/app/shared/api/cephfs-snapshot-schedule.service.ts b/src/pybind/mgr/dashboard/frontend/src/app/shared/api/cephfs-snapshot-schedule.service.ts
index ade935a9299..2a3b1294259 100644
--- a/src/pybind/mgr/dashboard/frontend/src/app/shared/api/cephfs-snapshot-schedule.service.ts
+++ b/src/pybind/mgr/dashboard/frontend/src/app/shared/api/cephfs-snapshot-schedule.service.ts
@@ -73,13 +73,14 @@ export class CephfsSnapshotScheduleService {
path: string,
fs: string,
interval: number,
- frequency: RepeatFrequency
+ frequency: RepeatFrequency,
+ isSubvolume = false
): Observable<boolean> {
return this.getSnapshotScheduleList(path, fs, false).pipe(
map((response) => {
- const index = response.findIndex(
- (x) => x.path === path && x.schedule === `${interval}${frequency}`
- );
+ const index = response
+ .filter((x) => (isSubvolume ? x.path.startsWith(path) : x.path === path))
+ .findIndex((x) => x.schedule === `${interval}${frequency}`);
return index > -1;
}),
catchError(() => {
@@ -149,7 +150,7 @@ export class CephfsSnapshotScheduleService {
retentionCopy: this.parseRetentionCopy(snapItem?.retention),
retention: Object.values(snapItem?.retention || [])?.length
? Object.entries(snapItem.retention)
- ?.map?.(([frequency, interval]) => `${interval}${frequency.toLocaleUpperCase()}`)
+ ?.map?.(([frequency, interval]) => `${interval}${frequency}`)
.join(' ')
: '-'
})),
diff --git a/src/pybind/mgr/dashboard/frontend/src/app/shared/components/grafana/grafana.component.ts b/src/pybind/mgr/dashboard/frontend/src/app/shared/components/grafana/grafana.component.ts
index b650f6694cf..d3c7012bcbb 100644
--- a/src/pybind/mgr/dashboard/frontend/src/app/shared/components/grafana/grafana.component.ts
+++ b/src/pybind/mgr/dashboard/frontend/src/app/shared/components/grafana/grafana.component.ts
@@ -172,7 +172,7 @@ export class GrafanaComponent implements OnInit, OnChanges {
getFrame() {
this.settingsService
.validateGrafanaDashboardUrl(this.uid)
- .subscribe((data: any) => (this.dashboardExist = data === 200));
+ .subscribe((data: any) => (this.dashboardExist = data === 200 || data === 401)); // 401 because grafana API shows unauthorized when anonymous access is disabled
if (this.type === 'metrics') {
this.url = `${this.baseUrl}${this.uid}/${this.grafanaPath}&refresh=2s&var-datasource=${this.datasource}${this.mode}&${this.time}`;
} else {
diff --git a/src/pybind/mgr/dashboard/frontend/src/app/shared/enum/retention-frequency.enum.ts b/src/pybind/mgr/dashboard/frontend/src/app/shared/enum/retention-frequency.enum.ts
index b24a337ee1c..857c08c0c56 100644
--- a/src/pybind/mgr/dashboard/frontend/src/app/shared/enum/retention-frequency.enum.ts
+++ b/src/pybind/mgr/dashboard/frontend/src/app/shared/enum/retention-frequency.enum.ts
@@ -1,5 +1,4 @@
export enum RetentionFrequency {
- Minutely = 'm',
Hourly = 'h',
Daily = 'd',
Weekly = 'w',
diff --git a/src/pybind/mgr/dashboard/openapi.yaml b/src/pybind/mgr/dashboard/openapi.yaml
index 36d61f82c77..d43f48fcbf7 100644
--- a/src/pybind/mgr/dashboard/openapi.yaml
+++ b/src/pybind/mgr/dashboard/openapi.yaml
@@ -7277,6 +7277,28 @@ paths:
- jwt: []
tags:
- Multi-cluster
+ /api/multi-cluster/get_prometheus_api_url:
+ get:
+ parameters: []
+ responses:
+ '200':
+ content:
+ application/vnd.ceph.api.v1.0+json:
+ type: object
+ description: OK
+ '400':
+ description: Operation exception. Please check the response body for details.
+ '401':
+ description: Unauthenticated access. Please login first.
+ '403':
+ description: Unauthorized access. Please check your permissions.
+ '500':
+ description: Unexpected error. Please check the response body for the stack
+ trace.
+ security:
+ - jwt: []
+ tags:
+ - Multi-cluster
/api/multi-cluster/reconnect_cluster:
put:
parameters: []
diff --git a/src/pybind/mgr/tox.ini b/src/pybind/mgr/tox.ini
index 082d0dbd53c..61f3643e890 100644
--- a/src/pybind/mgr/tox.ini
+++ b/src/pybind/mgr/tox.ini
@@ -146,7 +146,6 @@ commands =
{posargs:{[testenv:fix]modules}}
[testenv:pylint]
-basepython = python3
deps =
pylint
modules =
@@ -155,7 +154,6 @@ commands =
pylint {[pylint]addopts} {posargs:{[testenv:pylint]modules}}
[testenv:flake8]
-basepython = python3
deps =
flake8
allowlist_externals = bash
@@ -182,7 +180,6 @@ commands =
bash -c 'test $(git ls-files cephadm | grep ".py$" | grep -v tests | xargs grep "docker.io" | wc -l) == 13'
[testenv:jinjalint]
-basepython = python3
deps =
jinjaninja
commands =
diff --git a/src/rgw/CMakeLists.txt b/src/rgw/CMakeLists.txt
index 2c18ff1aadd..cf214b39c95 100644
--- a/src/rgw/CMakeLists.txt
+++ b/src/rgw/CMakeLists.txt
@@ -57,6 +57,7 @@ set(librgw_common_srcs
services/svc_user_rados.cc
services/svc_zone.cc
services/svc_zone_utils.cc
+ rgw_account.cc
rgw_acl.cc
rgw_acl_s3.cc
rgw_acl_swift.cc
@@ -103,12 +104,15 @@ set(librgw_common_srcs
rgw_quota.cc
rgw_resolve.cc
rgw_rest.cc
+ rgw_rest_account.cc
rgw_rest_client.cc
rgw_rest_config.cc
rgw_rest_conn.cc
rgw_rest_metadata.cc
rgw_rest_ratelimit.cc
rgw_rest_role.cc
+ rgw_rest_iam_group.cc
+ rgw_rest_iam_user.cc
rgw_rest_s3.cc
rgw_rest_pubsub.cc
rgw_rest_zero.cc
@@ -127,6 +131,7 @@ set(librgw_common_srcs
rgw_crypt.cc
rgw_crypt_sanitize.cc
rgw_iam_policy.cc
+ rgw_iam_managed_policy.cc
rgw_rest_user_policy.cc
rgw_zone.cc
rgw_sts.cc
@@ -148,7 +153,11 @@ set(librgw_common_srcs
rgw_tracer.cc
rgw_lua_background.cc
rgw_data_access.cc
+ driver/rados/account.cc
+ driver/rados/buckets.cc
driver/rados/cls_fifo_legacy.cc
+ driver/rados/group.cc
+ driver/rados/groups.cc
driver/rados/rgw_bucket.cc
driver/rados/rgw_bucket_sync.cc
driver/rados/rgw_cr_rados.cc
@@ -193,9 +202,12 @@ set(librgw_common_srcs
driver/rados/rgw_trim_mdlog.cc
driver/rados/rgw_user.cc
driver/rados/rgw_zone.cc
+ driver/rados/roles.cc
driver/rados/sync_fairness.cc
driver/rados/topic.cc
- driver/rados/topic_migration.cc)
+ driver/rados/topic_migration.cc
+ driver/rados/topics.cc
+ driver/rados/users.cc)
list(APPEND librgw_common_srcs
driver/immutable_config/store.cc
@@ -290,7 +302,8 @@ target_link_libraries(rgw_common
${LUA_LIBRARIES}
RapidJSON::RapidJSON
spawn
- ${FMT_LIB})
+ ${FMT_LIB}
+ OpenSSL::SSL)
target_include_directories(rgw_common
PUBLIC "${CMAKE_SOURCE_DIR}/src/rgw/services"
PUBLIC "${CMAKE_SOURCE_DIR}/src/rgw"
@@ -310,8 +323,7 @@ if(WITH_RADOSGW_AMQP_ENDPOINT)
# used by rgw_amqp.cc
target_link_libraries(rgw_common
PRIVATE
- RabbitMQ::RabbitMQ
- OpenSSL::SSL)
+ RabbitMQ::RabbitMQ)
endif()
if(WITH_OPENLDAP)
target_link_libraries(rgw_common
@@ -467,10 +479,6 @@ target_link_libraries(radosgw PRIVATE
kmip
${ALLOC_LIBS})
-if(WITH_RADOSGW_BEAST_OPENSSL)
- # used by rgw_asio_frontend.cc
- target_link_libraries(radosgw PRIVATE OpenSSL::SSL)
-endif()
install(TARGETS radosgw DESTINATION bin)
set(radosgw_admin_srcs
diff --git a/src/rgw/driver/d4n/rgw_sal_d4n.cc b/src/rgw/driver/d4n/rgw_sal_d4n.cc
index 1e297b01e61..1afd6822fe2 100644
--- a/src/rgw/driver/d4n/rgw_sal_d4n.cc
+++ b/src/rgw/driver/d4n/rgw_sal_d4n.cc
@@ -110,7 +110,8 @@ int D4NFilterBucket::create(const DoutPrefixProvider* dpp,
return next->create(dpp, params, y);
}
-int D4NFilterObject::copy_object(User* user,
+int D4NFilterObject::copy_object(const ACLOwner& owner,
+ const rgw_user& remote_user,
req_info* info,
const rgw_zone_id& source_zone,
rgw::sal::Object* dest_object,
@@ -197,7 +198,7 @@ int D4NFilterObject::copy_object(User* user,
}
}*/
- return next->copy_object(user, info, source_zone,
+ return next->copy_object(owner, remote_user, info, source_zone,
nextObject(dest_object),
nextBucket(dest_bucket),
nextBucket(src_bucket),
@@ -255,7 +256,6 @@ int D4NFilterObject::get_obj_attrs(optional_yield y, const DoutPrefixProvider* d
/* Set metadata locally */
RGWQuotaInfo quota_info;
RGWObjState* astate;
- std::unique_ptr<rgw::sal::User> user = this->driver->get_user(this->get_bucket()->get_owner());
this->get_obj_state(dpp, &astate, y);
for (auto it = attrs.begin(); it != attrs.end(); ++it) {
@@ -285,7 +285,6 @@ int D4NFilterObject::get_obj_attrs(optional_yield y, const DoutPrefixProvider* d
quota_info.max_objects = std::stoull(it->second.c_str());
attrs.erase(it->first);
} else if (it->first == "max_buckets") {
- user->set_max_buckets(std::stoull(it->second.c_str()));
attrs.erase(it->first);
} else {
ldpp_dout(dpp, 20) << "D4N Filter: Unexpected attribute; not locally set." << dendl;
@@ -294,7 +293,6 @@ int D4NFilterObject::get_obj_attrs(optional_yield y, const DoutPrefixProvider* d
}
}
- user->set_info(quota_info);
this->set_obj_state(*astate);
/* Set attributes locally */
@@ -350,7 +348,7 @@ std::unique_ptr<Object> D4NFilterDriver::get_object(const rgw_obj_key& k)
std::unique_ptr<Writer> D4NFilterDriver::get_atomic_writer(const DoutPrefixProvider *dpp,
optional_yield y,
rgw::sal::Object* obj,
- const rgw_user& owner,
+ const ACLOwner& owner,
const rgw_placement_rule *ptail_placement_rule,
uint64_t olh_epoch,
const std::string& unique_tag)
@@ -394,7 +392,6 @@ int D4NFilterObject::D4NFilterReadOp::prepare(optional_yield y, const DoutPrefix
/* Set metadata locally */
RGWObjState* astate;
RGWQuotaInfo quota_info;
- std::unique_ptr<rgw::sal::User> user = source->driver->get_user(source->get_bucket()->get_owner());
source->get_obj_state(dpp, &astate, y);
for (auto& attr : attrs) {
@@ -424,13 +421,11 @@ int D4NFilterObject::D4NFilterReadOp::prepare(optional_yield y, const DoutPrefix
quota_info.max_objects = std::stoull(attr.second.c_str());
attrs.erase(attr.first);
} else if (attr.first == "max_buckets") {
- user->set_max_buckets(std::stoull(attr.second.c_str()));
attrs.erase(attr.first);
} else {
ldpp_dout(dpp, 20) << "D4NFilterObject::D4NFilterReadOp::" << __func__ << "(): Unexpected attribute; not locally set." << dendl;
}
}
- user->set_info(quota_info);
source->set_obj_state(*astate);
/* Set attributes locally */
diff --git a/src/rgw/driver/d4n/rgw_sal_d4n.h b/src/rgw/driver/d4n/rgw_sal_d4n.h
index 5986cfaf20c..42436b92d1d 100644
--- a/src/rgw/driver/d4n/rgw_sal_d4n.h
+++ b/src/rgw/driver/d4n/rgw_sal_d4n.h
@@ -17,7 +17,6 @@
#include "rgw_sal_filter.h"
#include "rgw_sal.h"
-#include "rgw_oidc_provider.h"
#include "rgw_role.h"
#include "common/dout.h"
#include "rgw_aio_throttle.h"
@@ -61,7 +60,7 @@ class D4NFilterDriver : public FilterDriver {
virtual std::unique_ptr<Writer> get_atomic_writer(const DoutPrefixProvider *dpp,
optional_yield y,
rgw::sal::Object* obj,
- const rgw_user& owner,
+ const ACLOwner& owner,
const rgw_placement_rule *ptail_placement_rule,
uint64_t olh_epoch,
const std::string& unique_tag) override;
@@ -178,7 +177,8 @@ class D4NFilterObject : public FilterObject {
driver(_driver) {}
virtual ~D4NFilterObject() = default;
- virtual int copy_object(User* user,
+ virtual int copy_object(const ACLOwner& owner,
+ const rgw_user& remote_user,
req_info* info, const rgw_zone_id& source_zone,
rgw::sal::Object* dest_object, rgw::sal::Bucket* dest_bucket,
rgw::sal::Bucket* src_bucket,
diff --git a/src/rgw/driver/daos/rgw_sal_daos.cc b/src/rgw/driver/daos/rgw_sal_daos.cc
index a6136d90599..f8f60d82d02 100644
--- a/src/rgw/driver/daos/rgw_sal_daos.cc
+++ b/src/rgw/driver/daos/rgw_sal_daos.cc
@@ -45,10 +45,11 @@ namespace rgw::sal {
using ::ceph::decode;
using ::ceph::encode;
-int DaosUser::list_buckets(const DoutPrefixProvider* dpp, const string& marker,
- const string& end_marker, uint64_t max,
- bool need_stats, BucketList& buckets,
- optional_yield y) {
+int DaosStore::list_buckets(const DoutPrefixProvider* dpp,
+ const rgw_owner& owner, const std::string& tenant,
+ const string& marker, const string& end_marker,
+ uint64_t max, bool need_stats, BucketList& buckets,
+ optional_yield y) {
ldpp_dout(dpp, 20) << "DEBUG: list_user_buckets: marker=" << marker
<< " end_marker=" << end_marker << " max=" << max << dendl;
int ret = 0;
@@ -65,7 +66,7 @@ int DaosUser::list_buckets(const DoutPrefixProvider* dpp, const string& marker,
char daos_marker[DS3_MAX_BUCKET_NAME];
std::strncpy(daos_marker, marker.c_str(), sizeof(daos_marker));
ret = ds3_bucket_list(&bcount, bucket_infos.data(), daos_marker,
- &is_truncated, store->ds3, nullptr);
+ &is_truncated, ds3, nullptr);
ldpp_dout(dpp, 20) << "DEBUG: ds3_bucket_list: bcount=" << bcount
<< " ret=" << ret << dendl;
if (ret != 0) {
@@ -82,7 +83,7 @@ int DaosUser::list_buckets(const DoutPrefixProvider* dpp, const string& marker,
bl.append(reinterpret_cast<char*>(bi.encoded), bi.encoded_length);
auto iter = bl.cbegin();
dbinfo.decode(iter);
- buckets.add(std::make_unique<DaosBucket>(this->store, dbinfo.info, this));
+ buckets.add(std::make_unique<DaosBucket>(this, dbinfo.info, this));
}
buckets.set_truncated(is_truncated);
@@ -499,8 +500,8 @@ int DaosBucket::read_stats_async(
return DAOS_NOT_IMPLEMENTED_LOG(dpp);
}
-int DaosBucket::sync_user_stats(const DoutPrefixProvider* dpp,
- optional_yield y) {
+int DaosBucket::sync_owner_stats(const DoutPrefixProvider* dpp,
+ optional_yield y) {
return DAOS_NOT_IMPLEMENTED_LOG(dpp);
}
@@ -508,7 +509,7 @@ int DaosBucket::check_bucket_shards(const DoutPrefixProvider* dpp) {
return DAOS_NOT_IMPLEMENTED_LOG(dpp);
}
-int DaosBucket::chown(const DoutPrefixProvider* dpp, User& new_user,
+int DaosBucket::chown(const DoutPrefixProvider* dpp, const rgw_owner& new_user,
optional_yield y) {
return DAOS_NOT_IMPLEMENTED_LOG(dpp);
}
@@ -1207,7 +1208,8 @@ int DaosObject::delete_object(const DoutPrefixProvider* dpp, optional_yield y,
}
int DaosObject::copy_object(
- User* user, req_info* info, const rgw_zone_id& source_zone,
+ const ACLOwner& owner, const rgw_user& remote_user,
+ req_info* info, const rgw_zone_id& source_zone,
rgw::sal::Object* dest_object, rgw::sal::Bucket* dest_bucket,
rgw::sal::Bucket* src_bucket, const rgw_placement_rule& dest_placement,
ceph::real_time* src_mtime, ceph::real_time* mtime,
@@ -1221,13 +1223,13 @@ int DaosObject::copy_object(
return DAOS_NOT_IMPLEMENTED_LOG(dpp);
}
-int DaosObject::swift_versioning_restore(bool& restored,
- const DoutPrefixProvider* dpp) {
+int DaosObject::swift_versioning_restore(const ACLOwner& owner, const rgw_user& remote_user, bool& restored,
+ const DoutPrefixProvider* dpp, optional_yield y) {
return DAOS_NOT_IMPLEMENTED_LOG(dpp);
}
-int DaosObject::swift_versioning_copy(const DoutPrefixProvider* dpp,
- optional_yield y) {
+int DaosObject::swift_versioning_copy(const ACLOwner& owner, const rgw_user& remote_user,
+ const DoutPrefixProvider* dpp, optional_yield y) {
return DAOS_NOT_IMPLEMENTED_LOG(dpp);
}
@@ -1976,7 +1978,7 @@ int DaosMultipartUpload::get_info(const DoutPrefixProvider* dpp,
std::unique_ptr<Writer> DaosMultipartUpload::get_writer(
const DoutPrefixProvider* dpp, optional_yield y,
- rgw::sal::Object* obj, const rgw_user& owner,
+ rgw::sal::Object* obj, const ACLOwner& owner,
const rgw_placement_rule* ptail_placement_rule, uint64_t part_num,
const std::string& part_num_str) {
ldpp_dout(dpp, 20) << "DaosMultipartUpload::get_writer(): enter part="
@@ -2076,8 +2078,8 @@ int DaosMultipartWriter::complete(
}
std::unique_ptr<RGWRole> DaosStore::get_role(
- std::string name, std::string tenant, std::string path,
- std::string trust_policy, std::string max_session_duration_str,
+ std::string name, std::string tenant, rgw_account_id account_id, std::string path,
+ std::string trust_policy, std::string description, std::string max_session_duration_str,
std::multimap<std::string, std::string> tags) {
RGWRole* p = nullptr;
return std::unique_ptr<RGWRole>(p);
@@ -2093,21 +2095,42 @@ std::unique_ptr<RGWRole> DaosStore::get_role(std::string id) {
return std::unique_ptr<RGWRole>(p);
}
-int DaosStore::get_roles(const DoutPrefixProvider* dpp, optional_yield y,
- const std::string& path_prefix,
- const std::string& tenant,
- vector<std::unique_ptr<RGWRole>>& roles) {
+int DaosStore::list_roles(const DoutPrefixProvider *dpp,
+ optional_yield y,
+ const std::string& tenant,
+ const std::string& path_prefix,
+ const std::string& marker,
+ uint32_t max_items,
+ RoleList& listing) {
+ return DAOS_NOT_IMPLEMENTED_LOG(dpp);
+}
+
+int DaosStore::store_oidc_provider(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ const RGWOIDCProviderInfo& info,
+ bool exclusive) {
return DAOS_NOT_IMPLEMENTED_LOG(dpp);
}
-std::unique_ptr<RGWOIDCProvider> DaosStore::get_oidc_provider() {
- RGWOIDCProvider* p = nullptr;
- return std::unique_ptr<RGWOIDCProvider>(p);
+int DaosStore::load_oidc_provider(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view tenant,
+ std::string_view url,
+ RGWOIDCProviderInfo& info) {
+ return DAOS_NOT_IMPLEMENTED_LOG(dpp);
+}
+
+int DaosStore::delete_oidc_provider(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view tenant,
+ std::string_view url) {
+ return DAOS_NOT_IMPLEMENTED_LOG(dpp);
}
-int DaosStore::get_oidc_providers(
- const DoutPrefixProvider* dpp, const std::string& tenant,
- vector<std::unique_ptr<RGWOIDCProvider>>& providers) {
+int DaosStore::get_oidc_providers(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view tenant,
+ std::vector<RGWOIDCProviderInfo>& providers) {
return DAOS_NOT_IMPLEMENTED_LOG(dpp);
}
@@ -2120,7 +2143,7 @@ std::unique_ptr<MultipartUpload> DaosBucket::get_multipart_upload(
std::unique_ptr<Writer> DaosStore::get_append_writer(
const DoutPrefixProvider* dpp, optional_yield y,
- rgw::sal::Object* obj, const rgw_user& owner,
+ rgw::sal::Object* obj, const ACLOwner& owner,
const rgw_placement_rule* ptail_placement_rule,
const std::string& unique_tag, uint64_t position,
uint64_t* cur_accounted_size) {
@@ -2130,7 +2153,7 @@ std::unique_ptr<Writer> DaosStore::get_append_writer(
std::unique_ptr<Writer> DaosStore::get_atomic_writer(
const DoutPrefixProvider* dpp, optional_yield y,
- rgw::sal::Object* obj, const rgw_user& owner,
+ rgw::sal::Object* obj, const ACLOwner& owner,
const rgw_placement_rule* ptail_placement_rule, uint64_t olh_epoch,
const std::string& unique_tag) {
ldpp_dout(dpp, 20) << "get_atomic_writer" << dendl;
diff --git a/src/rgw/driver/daos/rgw_sal_daos.h b/src/rgw/driver/daos/rgw_sal_daos.h
index 2d74f9c17cc..cf1583fc174 100644
--- a/src/rgw/driver/daos/rgw_sal_daos.h
+++ b/src/rgw/driver/daos/rgw_sal_daos.h
@@ -28,7 +28,6 @@
#include "rgw_multi.h"
#include "rgw_notify.h"
-#include "rgw_oidc_provider.h"
#include "rgw_putobj_processor.h"
#include "rgw_rados.h"
#include "rgw_role.h"
@@ -168,9 +167,6 @@ class DaosUser : public StoreUser {
virtual std::unique_ptr<User> clone() override {
return std::make_unique<DaosUser>(*this);
}
- int list_buckets(const DoutPrefixProvider* dpp, const std::string& marker,
- const std::string& end_marker, uint64_t max, bool need_stats,
- BucketList& buckets, optional_yield y) override;
virtual int create_bucket(
const DoutPrefixProvider* dpp, const rgw_bucket& b,
const std::string& zonegroup_id, rgw_placement_rule& placement_rule,
@@ -312,10 +308,10 @@ class DaosBucket : public StoreBucket {
const bucket_index_layout_generation& idx_layout,
int shard_id,
boost::intrusive_ptr<ReadStatsCB> ctx) override;
- virtual int sync_user_stats(const DoutPrefixProvider* dpp,
- optional_yield y) override;
+ virtual int sync_owner_stats(const DoutPrefixProvider* dpp,
+ optional_yield y) override;
virtual int check_bucket_shards(const DoutPrefixProvider* dpp) override;
- virtual int chown(const DoutPrefixProvider* dpp, User& new_user,
+ virtual int chown(const DoutPrefixProvider* dpp, const rgw_owner& new_user,
optional_yield y) override;
virtual int put_info(const DoutPrefixProvider* dpp, bool exclusive,
ceph::real_time mtime) override;
@@ -602,7 +598,8 @@ class DaosObject : public StoreObject {
virtual int delete_object(const DoutPrefixProvider* dpp, optional_yield y,
uint32_t flags) override;
virtual int copy_object(
- User* user, req_info* info, const rgw_zone_id& source_zone,
+ const ACLOwner& owner, const rgw_user& remote_user,
+ req_info* info, const rgw_zone_id& source_zone,
rgw::sal::Object* dest_object, rgw::sal::Bucket* dest_bucket,
rgw::sal::Bucket* src_bucket, const rgw_placement_rule& dest_placement,
ceph::real_time* src_mtime, ceph::real_time* mtime,
@@ -657,10 +654,10 @@ class DaosObject : public StoreObject {
Formatter* f) override;
/* Swift versioning */
- virtual int swift_versioning_restore(bool& restored,
- const DoutPrefixProvider* dpp) override;
- virtual int swift_versioning_copy(const DoutPrefixProvider* dpp,
- optional_yield y) override;
+ virtual int swift_versioning_restore(const ACLOwner& owner, const rgw_user& remote_user, bool& restored,
+ const DoutPrefixProvider* dpp, optional_yield y) override;
+ virtual int swift_versioning_copy(const ACLOwner& owner, const rgw_user& remote_user,
+ const DoutPrefixProvider* dpp, optional_yield y) override;
/* OPs */
virtual std::unique_ptr<ReadOp> get_read_op() override;
@@ -720,7 +717,7 @@ class MPDaosSerializer : public StoreMPSerializer {
class DaosAtomicWriter : public StoreWriter {
protected:
rgw::sal::DaosStore* store;
- const rgw_user& owner;
+ const ACLOwner& owner;
const rgw_placement_rule* ptail_placement_rule;
uint64_t olh_epoch;
const std::string& unique_tag;
@@ -770,7 +767,7 @@ class DaosMultipartWriter : public StoreWriter {
DaosMultipartWriter(const DoutPrefixProvider* dpp, optional_yield y,
MultipartUpload* _upload,
rgw::sal::Object* obj,
- DaosStore* _store, const rgw_user& owner,
+ DaosStore* _store, const ACLOwner& owner,
const rgw_placement_rule* ptail_placement_rule,
uint64_t _part_num, const std::string& part_num_str)
: StoreWriter(dpp, y),
@@ -864,7 +861,7 @@ class DaosMultipartUpload : public StoreMultipartUpload {
rgw::sal::Attrs* attrs = nullptr) override;
virtual std::unique_ptr<Writer> get_writer(
const DoutPrefixProvider* dpp, optional_yield y,
- rgw::sal::Object* obj, const rgw_user& owner,
+ rgw::sal::Object* obj, const ACLOwner& owner,
const rgw_placement_rule* ptail_placement_rule, uint64_t part_num,
const std::string& part_num_str) override;
const std::string& get_bucket_name() { return bucket->get_name(); }
@@ -902,6 +899,11 @@ class DaosStore : public StoreDriver {
int load_bucket(const DoutPrefixProvider* dpp, User* u,
const rgw_bucket& b, std::unique_ptr<Bucket>* bucket,
optional_yield y) override;
+ int list_buckets(const DoutPrefixProvider* dpp,
+ const rgw_owner& owner, const std::string& tenant,
+ const std::string& marker, const std::string& end_marker,
+ uint64_t max, bool need_stats,
+ BucketList& buckets, optional_yield y) override;
virtual bool is_meta_master() override;
virtual Zone* get_zone() { return &zone; }
virtual std::string zone_unique_id(uint64_t unique_num) override;
@@ -991,28 +993,44 @@ class DaosStore : public StoreDriver {
std::unique_ptr<LuaManager> get_lua_manager(const DoutPrefixProvider *dpp = nullptr, const std::string& luarocks_path = "") override;
virtual std::unique_ptr<RGWRole> get_role(
- std::string name, std::string tenant, std::string path = "",
- std::string trust_policy = "", std::string max_session_duration_str = "",
+ std::string name, std::string tenant, rgw_account_id account_id, std::string path = "",
+ std::string trust_policy = "", std::string description = "", std::string max_session_duration_str = "",
std::multimap<std::string, std::string> tags = {}) override;
virtual std::unique_ptr<RGWRole> get_role(const RGWRoleInfo& info) override;
virtual std::unique_ptr<RGWRole> get_role(std::string id) override;
- virtual int get_roles(const DoutPrefixProvider* dpp, optional_yield y,
- const std::string& path_prefix,
- const std::string& tenant,
- std::vector<std::unique_ptr<RGWRole>>& roles) override;
- virtual std::unique_ptr<RGWOIDCProvider> get_oidc_provider() override;
- virtual int get_oidc_providers(
- const DoutPrefixProvider* dpp, const std::string& tenant,
- std::vector<std::unique_ptr<RGWOIDCProvider>>& providers) override;
+ int list_roles(const DoutPrefixProvider *dpp,
+ optional_yield y,
+ const std::string& tenant,
+ const std::string& path_prefix,
+ const std::string& marker,
+ uint32_t max_items,
+ RoleList& listing) override;
+ int store_oidc_provider(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ const RGWOIDCProviderInfo& info,
+ bool exclusive) override;
+ int load_oidc_provider(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view tenant,
+ std::string_view url,
+ RGWOIDCProviderInfo& info) override;
+ int delete_oidc_provider(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view tenant,
+ std::string_view url) override;
+ int get_oidc_providers(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view tenant,
+ std::vector<RGWOIDCProviderInfo>& providers) override;
virtual std::unique_ptr<Writer> get_append_writer(
const DoutPrefixProvider* dpp, optional_yield y,
- rgw::sal::Object* obj, const rgw_user& owner,
+ rgw::sal::Object* obj, const ACLOwner& owner,
const rgw_placement_rule* ptail_placement_rule,
const std::string& unique_tag, uint64_t position,
uint64_t* cur_accounted_size) override;
virtual std::unique_ptr<Writer> get_atomic_writer(
const DoutPrefixProvider* dpp, optional_yield y,
- rgw::sal::Object* obj, const rgw_user& owner,
+ rgw::sal::Object* obj, const ACLOwner& owner,
const rgw_placement_rule* ptail_placement_rule, uint64_t olh_epoch,
const std::string& unique_tag) override;
virtual const std::string& get_compression_type(
diff --git a/src/rgw/driver/dbstore/common/dbstore.cc b/src/rgw/driver/dbstore/common/dbstore.cc
index 5a4ae021ead..d548bc4d8c0 100644
--- a/src/rgw/driver/dbstore/common/dbstore.cc
+++ b/src/rgw/driver/dbstore/common/dbstore.cc
@@ -474,7 +474,7 @@ out:
}
int DB::create_bucket(const DoutPrefixProvider *dpp,
- const rgw_user& owner, const rgw_bucket& bucket,
+ const rgw_owner& owner, const rgw_bucket& bucket,
const std::string& zonegroup_id,
const rgw_placement_rule& placement_rule,
const std::map<std::string, bufferlist>& attrs,
@@ -502,7 +502,7 @@ int DB::create_bucket(const DoutPrefixProvider *dpp,
orig_info.bucket.name = bucket.name;
ret = get_bucket_info(dpp, string("name"), "", orig_info, nullptr, nullptr, nullptr);
- if (!ret && !orig_info.owner.id.empty()) {
+ if (!ret && !orig_info.bucket.bucket_id.empty()) {
/* already exists. Return the old info */
info = std::move(orig_info);
return ret;
@@ -543,7 +543,7 @@ int DB::create_bucket(const DoutPrefixProvider *dpp,
params.op.bucket.info = info;
params.op.bucket.bucket_attrs = attrs;
params.op.bucket.mtime = ceph::real_time();
- params.op.user.uinfo.user_id.id = owner.id;
+ params.op.bucket.owner = to_string(owner);
ret = ProcessOp(dpp, "InsertBucket", &params);
@@ -576,7 +576,7 @@ out:
}
int DB::list_buckets(const DoutPrefixProvider *dpp, const std::string& query_str,
- rgw_user& user,
+ std::string& owner,
const string& marker,
const string& end_marker,
uint64_t max,
@@ -589,7 +589,7 @@ int DB::list_buckets(const DoutPrefixProvider *dpp, const std::string& query_str
DBOpParams params = {};
InitializeParams(dpp, &params);
- params.op.user.uinfo.user_id = user;
+ params.op.bucket.owner = owner;
params.op.bucket.min_marker = marker;
params.op.bucket.max_marker = end_marker;
params.op.list_max_count = max;
@@ -619,7 +619,7 @@ int DB::list_buckets(const DoutPrefixProvider *dpp, const std::string& query_str
if (query_str == "all") {
// userID/OwnerID may have changed. Update it.
- user.id = params.op.bucket.info.owner.id;
+ owner = to_string(params.op.bucket.info.owner);
}
out:
@@ -629,7 +629,7 @@ out:
int DB::update_bucket(const DoutPrefixProvider *dpp, const std::string& query_str,
RGWBucketInfo& info,
bool exclusive,
- const rgw_user* powner_id,
+ const rgw_owner* powner,
map<std::string, bufferlist>* pattrs,
ceph::real_time* pmtime,
RGWObjVersionTracker* pobjv)
@@ -650,7 +650,7 @@ int DB::update_bucket(const DoutPrefixProvider *dpp, const std::string& query_st
goto out;
}
- if (!orig_info.owner.id.empty() && exclusive) {
+ if (!orig_info.bucket.bucket_id.empty() && exclusive) {
/* already exists. Return the old info */
info = std::move(orig_info);
@@ -672,17 +672,17 @@ int DB::update_bucket(const DoutPrefixProvider *dpp, const std::string& query_st
params.op.bucket.info.bucket.name = info.bucket.name;
- if (powner_id) {
- params.op.user.uinfo.user_id.id = powner_id->id;
+ if (powner) {
+ params.op.bucket.owner = to_string(*powner);
} else {
- params.op.user.uinfo.user_id.id = orig_info.owner.id;
+ params.op.bucket.owner = to_string(orig_info.owner);
}
/* Update version & mtime */
params.op.bucket.bucket_version.ver = ++(bucket_version.ver);
if (pmtime) {
- params.op.bucket.mtime = *pmtime;;
+ params.op.bucket.mtime = *pmtime;
} else {
params.op.bucket.mtime = ceph::real_time();
}
@@ -1771,7 +1771,7 @@ int DB::Object::Write::_do_write_meta(const DoutPrefixProvider *dpp,
params.op.obj.state.exists = true;
params.op.obj.state.size = size;
params.op.obj.state.accounted_size = accounted_size;
- params.op.obj.owner = target->get_bucket_info().owner.id;
+ params.op.obj.owner = to_string(target->get_bucket_info().owner);
params.op.obj.category = meta.category;
if (meta.mtime) {
@@ -2201,12 +2201,11 @@ void *DB::GC::entry() {
do {
std::string& marker = bucket_marker;
- rgw_user user;
- user.id = user_marker;
+ std::string owner = user_marker;
buckets.clear();
is_truncated = false;
- int r = db->list_buckets(dpp, "all", user, marker, string(),
+ int r = db->list_buckets(dpp, "all", owner, marker, string(),
max, false, &buckets, &is_truncated);
if (r < 0) { //do nothing? retry later ?
@@ -2222,7 +2221,7 @@ void *DB::GC::entry() {
ldpp_dout(dpp, 2) << " delete_stale_objs failed for bucket( " << bname <<")" << dendl;
}
bucket_marker = bname;
- user_marker = user.id;
+ user_marker = owner;
/* XXX: If using locks, unlock here and reacquire in the next iteration */
cv.wait_for(lk, std::chrono::milliseconds(100));
diff --git a/src/rgw/driver/dbstore/common/dbstore.h b/src/rgw/driver/dbstore/common/dbstore.h
index 8cf6f70f751..3f8191f5a92 100644
--- a/src/rgw/driver/dbstore/common/dbstore.h
+++ b/src/rgw/driver/dbstore/common/dbstore.h
@@ -35,7 +35,7 @@ struct DBOpUserInfo {
struct DBOpBucketInfo {
RGWBucketEnt ent; // maybe not needed. not used in create/get_bucket
RGWBucketInfo info;
- RGWUser* owner = nullptr;
+ std::string owner;
rgw::sal::Attrs bucket_attrs;
obj_version bucket_version;
ceph::real_time mtime;
@@ -482,9 +482,7 @@ class DBOp {
BucketVersion INTEGER, \
BucketVersionTag TEXT, \
Mtime BLOB, \
- PRIMARY KEY (BucketName) \
- FOREIGN KEY (OwnerID) \
- REFERENCES '{}' (UserID) ON DELETE CASCADE ON UPDATE CASCADE \n);";
+ PRIMARY KEY (BucketName) \n);";
static constexpr std::string_view CreateObjectTableTriggerQ =
"CREATE TRIGGER IF NOT EXISTS '{}' \
@@ -931,22 +929,20 @@ class RemoveBucketOp: virtual public DBOp {
class GetBucketOp: virtual public DBOp {
private:
static constexpr std::string_view Query = "SELECT \
- BucketName, BucketTable.Tenant, Marker, BucketID, Size, SizeRounded, CreationTime, \
- Count, BucketTable.PlacementName, BucketTable.PlacementStorageClass, OwnerID, Flags, Zonegroup, \
+ BucketName, Tenant, Marker, BucketID, Size, SizeRounded, CreationTime, \
+ Count, PlacementName, PlacementStorageClass, OwnerID, Flags, Zonegroup, \
HasInstanceObj, Quota, RequesterPays, HasWebsite, WebsiteConf, \
SwiftVersioning, SwiftVerLocation, \
MdsearchConfig, NewBucketInstanceID, ObjectLock, \
- SyncPolicyInfoGroups, BucketAttrs, BucketVersion, BucketVersionTag, Mtime, NS \
- from '{}' as BucketTable INNER JOIN '{}' ON OwnerID = UserID where BucketName = {}";
+ SyncPolicyInfoGroups, BucketAttrs, BucketVersion, BucketVersionTag, Mtime \
+ from '{}' where BucketName = {}";
public:
virtual ~GetBucketOp() {}
static std::string Schema(DBOpPrepareParams &params) {
- //return fmt::format(Query, params.op.bucket.bucket_name,
- // params.bucket_table, params.user_table);
return fmt::format(Query,
- params.bucket_table, params.user_table,
+ params.bucket_table,
params.op.bucket.bucket_name);
}
};
@@ -1596,7 +1592,7 @@ class DB {
RGWBucketInfo& info, rgw::sal::Attrs* pattrs, ceph::real_time* pmtime,
obj_version* pbucket_version);
int create_bucket(const DoutPrefixProvider *dpp,
- const rgw_user& owner, const rgw_bucket& bucket,
+ const rgw_owner& owner, const rgw_bucket& bucket,
const std::string& zonegroup_id,
const rgw_placement_rule& placement_rule,
const std::map<std::string, bufferlist>& attrs,
@@ -1611,7 +1607,7 @@ class DB {
int remove_bucket(const DoutPrefixProvider *dpp, const RGWBucketInfo info);
int list_buckets(const DoutPrefixProvider *dpp, const std::string& query_str,
- rgw_user& user,
+ std::string& owner,
const std::string& marker,
const std::string& end_marker,
uint64_t max,
@@ -1620,7 +1616,7 @@ class DB {
bool *is_truncated);
int update_bucket(const DoutPrefixProvider *dpp, const std::string& query_str,
RGWBucketInfo& info, bool exclusive,
- const rgw_user* powner_id, std::map<std::string, bufferlist>* pattrs,
+ const rgw_owner* powner, std::map<std::string, bufferlist>* pattrs,
ceph::real_time* pmtime, RGWObjVersionTracker* pobjv);
uint64_t get_max_head_size() { return ObjHeadSize; }
@@ -1909,7 +1905,6 @@ class DB {
DB::Object *target;
struct DeleteParams {
- rgw_user bucket_owner;
int versioning_status;
ACLOwner obj_owner; /* needed for creation of deletion marker */
uint64_t olh_epoch;
diff --git a/src/rgw/driver/dbstore/sqlite/sqliteDB.cc b/src/rgw/driver/dbstore/sqlite/sqliteDB.cc
index 81c716c27f1..554d8fe94cf 100644
--- a/src/rgw/driver/dbstore/sqlite/sqliteDB.cc
+++ b/src/rgw/driver/dbstore/sqlite/sqliteDB.cc
@@ -2,6 +2,7 @@
// vim: ts=8 sw=2 smarttab
#include "sqliteDB.h"
+#include "rgw_account.h"
using namespace std;
@@ -421,12 +422,8 @@ static int list_bucket(const DoutPrefixProvider *dpp, DBOpInfo &op, sqlite3_stmt
op.bucket.info.placement_rule = op.bucket.ent.placement_rule;
op.bucket.info.creation_time = op.bucket.ent.creation_time;
- op.bucket.info.owner.id = (const char*)sqlite3_column_text(stmt, OwnerID);
- op.bucket.info.owner.tenant = op.bucket.ent.bucket.tenant;
-
- if (op.name == "GetBucket") {
- op.bucket.info.owner.ns = (const char*)sqlite3_column_text(stmt, Bucket_User_NS);
- }
+ const char* owner_id = (const char*)sqlite3_column_text(stmt, OwnerID);
+ op.bucket.info.owner = parse_owner(owner_id);
op.bucket.info.flags = sqlite3_column_int(stmt, Flags);
op.bucket.info.zonegroup = (const char*)sqlite3_column_text(stmt, Zonegroup);
@@ -1339,7 +1336,7 @@ int SQLInsertBucket::Bind(const DoutPrefixProvider *dpp, struct DBOpParams *para
// user_id here is copied as OwnerID in the bucket table.
SQL_BIND_INDEX(dpp, stmt, index, p_params.op.user.user_id, sdb);
- SQL_BIND_TEXT(dpp, stmt, index, params->op.user.uinfo.user_id.id.c_str(), sdb);
+ SQL_BIND_TEXT(dpp, stmt, index, params->op.bucket.owner.c_str(), sdb);
SQL_BIND_INDEX(dpp, stmt, index, p_params.op.bucket.bucket_name, sdb);
SQL_BIND_TEXT(dpp, stmt, index, params->op.bucket.info.bucket.name.c_str(), sdb);
@@ -1567,7 +1564,7 @@ int SQLUpdateBucket::Bind(const DoutPrefixProvider *dpp, struct DBOpParams *para
}
SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.user.user_id, sdb);
- SQL_BIND_TEXT(dpp, *stmt, index, params->op.user.uinfo.user_id.id.c_str(), sdb);
+ SQL_BIND_TEXT(dpp, *stmt, index, params->op.bucket.owner.c_str(), sdb);
SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.bucket.bucket_name, sdb);
SQL_BIND_TEXT(dpp, *stmt, index, params->op.bucket.info.bucket.name.c_str(), sdb);
@@ -1732,7 +1729,7 @@ int SQLListUserBuckets::Bind(const DoutPrefixProvider *dpp, struct DBOpParams *p
if (params->op.query_str != "all") {
SQL_BIND_INDEX(dpp, *pstmt, index, p_params.op.user.user_id, sdb);
- SQL_BIND_TEXT(dpp, *pstmt, index, params->op.user.uinfo.user_id.id.c_str(), sdb);
+ SQL_BIND_TEXT(dpp, *pstmt, index, params->op.bucket.owner.c_str(), sdb);
}
SQL_BIND_INDEX(dpp, *pstmt, index, p_params.op.bucket.min_marker, sdb);
diff --git a/src/rgw/driver/dbstore/tests/dbstore_tests.cc b/src/rgw/driver/dbstore/tests/dbstore_tests.cc
index 14fe9c37e75..c89addeade1 100644
--- a/src/rgw/driver/dbstore/tests/dbstore_tests.cc
+++ b/src/rgw/driver/dbstore/tests/dbstore_tests.cc
@@ -96,6 +96,7 @@ namespace {
GlobalParams.op.user.uinfo.display_name = user1;
GlobalParams.op.user.uinfo.user_id.id = user_id1;
GlobalParams.op.bucket.info.bucket.name = bucket1;
+ GlobalParams.op.bucket.owner = user_id1;
GlobalParams.op.obj.state.obj.bucket = GlobalParams.op.bucket.info.bucket;
GlobalParams.op.obj.state.obj.key.name = object1;
GlobalParams.op.obj.state.obj.key.instance = "inst1";
@@ -444,7 +445,7 @@ TEST_F(DBStoreTest, GetBucket) {
ASSERT_EQ(params.op.bucket.info.objv_tracker.read_version.ver, 3);
ASSERT_EQ(params.op.bucket.info.objv_tracker.read_version.tag, "read_tag");
ASSERT_EQ(params.op.bucket.mtime, bucket_mtime);
- ASSERT_EQ(params.op.bucket.info.owner.id, "user_id1");
+ ASSERT_EQ(to_string(params.op.bucket.info.owner), "user_id1");
bufferlist k, k2;
string acl;
map<std::string, bufferlist>::iterator it2 = params.op.bucket.bucket_attrs.begin();
@@ -507,7 +508,7 @@ TEST_F(DBStoreTest, GetBucketQueryByName) {
ASSERT_EQ(ret, 0);
ASSERT_EQ(binfo.bucket.name, "bucket2");
ASSERT_EQ(binfo.bucket.tenant, "tenant");
- ASSERT_EQ(binfo.owner.id, "user_id1");
+ ASSERT_EQ(to_string(binfo.owner), "user_id1");
ASSERT_EQ(binfo.objv_tracker.read_version.ver, 1);
ASSERT_FALSE(binfo.objv_tracker.read_version.tag.empty());
ASSERT_EQ(binfo.zonegroup, "zid");
@@ -523,14 +524,12 @@ TEST_F(DBStoreTest, GetBucketQueryByName) {
TEST_F(DBStoreTest, ListUserBuckets) {
struct DBOpParams params = GlobalParams;
int ret = -1;
- rgw_user owner;
+ std::string owner = "user_id1";
int max = 2;
bool need_stats = true;
bool is_truncated = false;
RGWUserBuckets ulist;
- owner.id = "user_id1";
-
marker1 = "";
do {
is_truncated = false;
@@ -560,8 +559,7 @@ TEST_F(DBStoreTest, ListUserBuckets) {
TEST_F(DBStoreTest, BucketChown) {
int ret = -1;
RGWBucketInfo info;
- rgw_user user;
- user.id = "user_id2";
+ rgw_owner user = rgw_user{"user_id2"};
info.bucket.name = "bucket5";
@@ -581,7 +579,7 @@ TEST_F(DBStoreTest, ListAllBuckets) {
TEST_F(DBStoreTest, ListAllBuckets2) {
struct DBOpParams params = GlobalParams;
int ret = -1;
- rgw_user owner;
+ std::string owner; // empty
int max = 2;
bool need_stats = true;
bool is_truncated = false;
@@ -595,7 +593,7 @@ TEST_F(DBStoreTest, ListAllBuckets2) {
ASSERT_EQ(ret, 0);
cout << "^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ \n";
- cout << "ownerID : " << owner.id << "\n";
+ cout << "ownerID : " << owner << "\n";
cout << "marker1 :" << marker1 << "\n";
cout << "is_truncated :" << is_truncated << "\n";
diff --git a/src/rgw/driver/motr/rgw_sal_motr.cc b/src/rgw/driver/motr/rgw_sal_motr.cc
index 3ee60c9c4d5..ae86ec9e7d3 100644
--- a/src/rgw/driver/motr/rgw_sal_motr.cc
+++ b/src/rgw/driver/motr/rgw_sal_motr.cc
@@ -157,9 +157,10 @@ void MotrMetaCache::set_enabled(bool status)
// TODO: properly handle the number of key/value pairs to get in
// one query. Now the POC simply tries to retrieve all `max` number of pairs
// with starting key `marker`.
-int MotrUser::list_buckets(const DoutPrefixProvider *dpp, const string& marker,
- const string& end_marker, uint64_t max, bool need_stats,
- BucketList &buckets, optional_yield y)
+int MotrStore::list_buckets(const DoutPrefixProvider *dpp,
+ const rgw_owner& owner, const std::string& tenant,
+ const string& marker, const string& end_marker, uint64_t max,
+ bool need_stats, BucketList &buckets, optional_yield y)
{
int rc;
vector<string> keys(max);
@@ -172,9 +173,9 @@ int MotrUser::list_buckets(const DoutPrefixProvider *dpp, const string& marker,
// Retrieve all `max` number of pairs.
buckets.clear();
- string user_info_iname = "motr.rgw.user.info." + info.user_id.to_str();
+ string user_info_iname = "motr.rgw.user.info." + to_string(owner);
keys[0] = marker;
- rc = store->next_query_by_name(user_info_iname, keys, vals);
+ rc = next_query_by_name(user_info_iname, keys, vals);
if (rc < 0) {
ldpp_dout(dpp, 0) << "ERROR: NEXT query failed. " << rc << dendl;
return rc;
@@ -197,7 +198,7 @@ int MotrUser::list_buckets(const DoutPrefixProvider *dpp, const string& marker,
end_marker.compare(ent.bucket.marker) <= 0)
break;
- buckets.add(std::make_unique<MotrBucket>(this->store, ent, this));
+ buckets.add(std::make_unique<MotrBucket>(this, ent, this));
bcount++;
}
if (bcount == max)
@@ -607,7 +608,7 @@ int MotrBucket::remove(const DoutPrefixProvider *dpp, bool delete_children, opti
}
// 4. Sync user stats.
- ret = this->sync_user_stats(dpp, y);
+ ret = this->sync_owner_stats(dpp, y);
if (ret < 0) {
ldout(store->ctx(), 1) << "WARNING: failed sync user stats before bucket delete. ret=" << ret << dendl;
}
@@ -808,8 +809,8 @@ int MotrBucket::read_stats_async(const DoutPrefixProvider *dpp,
return 0;
}
-int MotrBucket::sync_user_stats(const DoutPrefixProvider *dpp, optional_yield y,
- RGWBucketEnt* ent)
+int MotrBucket::sync_owner_stats(const DoutPrefixProvider *dpp, optional_yield y,
+ RGWBucketEnt* ent)
{
return 0;
}
@@ -820,7 +821,7 @@ int MotrBucket::check_bucket_shards(const DoutPrefixProvider *dpp,
return 0;
}
-int MotrBucket::chown(const DoutPrefixProvider *dpp, User& new_user, optional_yield y)
+int MotrBucket::chown(const DoutPrefixProvider *dpp, const rgw_owner& new_user, optional_yield y)
{
// TODO: update bucket with new owner
return 0;
@@ -1510,7 +1511,8 @@ int MotrObject::delete_object(const DoutPrefixProvider* dpp, optional_yield y, u
return del_op.delete_obj(dpp, y, flags);
}
-int MotrObject::copy_object(User* user,
+int MotrObject::copy_object(const ACLOwner& owner,
+ const rgw_user& remote_user,
req_info* info,
const rgw_zone_id& source_zone,
rgw::sal::Object* dest_object,
@@ -1541,14 +1543,14 @@ int MotrObject::copy_object(User* user,
return 0;
}
-int MotrObject::swift_versioning_restore(bool& restored,
- const DoutPrefixProvider* dpp)
+int MotrObject::swift_versioning_restore(const ACLOwner& owner, const rgw_user& remote_user, bool& restored,
+ const DoutPrefixProvider* dpp, optional_yield y)
{
return 0;
}
-int MotrObject::swift_versioning_copy(const DoutPrefixProvider* dpp,
- optional_yield y)
+int MotrObject::swift_versioning_copy(const ACLOwner& owner, const rgw_user& remote_user,
+ const DoutPrefixProvider* dpp, optional_yield y)
{
return 0;
}
@@ -1557,7 +1559,7 @@ MotrAtomicWriter::MotrAtomicWriter(const DoutPrefixProvider *dpp,
optional_yield y,
rgw::sal::Object* obj,
MotrStore* _store,
- const rgw_user& _owner,
+ const ACLOwner& _owner,
const rgw_placement_rule *_ptail_placement_rule,
uint64_t _olh_epoch,
const std::string& _unique_tag) :
@@ -2935,7 +2937,7 @@ std::unique_ptr<Writer> MotrMultipartUpload::get_writer(
const DoutPrefixProvider *dpp,
optional_yield y,
rgw::sal::Object* obj,
- const rgw_user& owner,
+ const ACLOwner& owner,
const rgw_placement_rule *ptail_placement_rule,
uint64_t part_num,
const std::string& part_num_str)
@@ -3027,8 +3029,10 @@ int MotrMultipartWriter::complete(size_t accounted_size, const std::string& etag
std::unique_ptr<RGWRole> MotrStore::get_role(std::string name,
std::string tenant,
+ rgw_account_id account_id,
std::string path,
std::string trust_policy,
+ std::string description,
std::string max_session_duration_str,
std::multimap<std::string,std::string> tags)
{
@@ -3048,26 +3052,48 @@ std::unique_ptr<RGWRole> MotrStore::get_role(std::string id)
return std::unique_ptr<RGWRole>(p);
}
-int MotrStore::get_roles(const DoutPrefixProvider *dpp,
- optional_yield y,
- const std::string& path_prefix,
- const std::string& tenant,
- vector<std::unique_ptr<RGWRole>>& roles)
+int MotrStore::list_roles(const DoutPrefixProvider *dpp,
+ optional_yield y,
+ const std::string& tenant,
+ const std::string& path_prefix,
+ const std::string& marker,
+ uint32_t max_items,
+ RoleList& listing)
{
return 0;
}
-std::unique_ptr<RGWOIDCProvider> MotrStore::get_oidc_provider()
+int DaosStore::store_oidc_provider(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ const RGWOIDCProviderInfo& info,
+ bool exclusive)
{
- RGWOIDCProvider* p = nullptr;
- return std::unique_ptr<RGWOIDCProvider>(p);
+ return -ENOTSUP;
}
-int MotrStore::get_oidc_providers(const DoutPrefixProvider *dpp,
- const std::string& tenant,
- vector<std::unique_ptr<RGWOIDCProvider>>& providers)
+int DaosStore::load_oidc_provider(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view tenant,
+ std::string_view url,
+ RGWOIDCProviderInfo& info)
{
- return 0;
+ return -ENOTSUP;
+}
+
+int DaosStore::delete_oidc_provider(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view tenant,
+ std::string_view url)
+{
+ return -ENOTSUP;
+}
+
+int DaosStore::get_oidc_providers(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view tenant,
+ std::vector<RGWOIDCProviderInfo>& providers)
+{
+ return -ENOTSUP;
}
std::unique_ptr<MultipartUpload> MotrBucket::get_multipart_upload(const std::string& oid,
@@ -3080,7 +3106,7 @@ std::unique_ptr<MultipartUpload> MotrBucket::get_multipart_upload(const std::str
std::unique_ptr<Writer> MotrStore::get_append_writer(const DoutPrefixProvider *dpp,
optional_yield y,
rgw::sal::Object* obj,
- const rgw_user& owner,
+ const ACLOwner& owner,
const rgw_placement_rule *ptail_placement_rule,
const std::string& unique_tag,
uint64_t position,
@@ -3091,7 +3117,7 @@ std::unique_ptr<Writer> MotrStore::get_append_writer(const DoutPrefixProvider *d
std::unique_ptr<Writer> MotrStore::get_atomic_writer(const DoutPrefixProvider *dpp,
optional_yield y,
rgw::sal::Object* obj,
- const rgw_user& owner,
+ const ACLOwner& owner,
const rgw_placement_rule *ptail_placement_rule,
uint64_t olh_epoch,
const std::string& unique_tag) {
diff --git a/src/rgw/driver/motr/rgw_sal_motr.h b/src/rgw/driver/motr/rgw_sal_motr.h
index 63c2b9d9dc8..e278728c7e7 100644
--- a/src/rgw/driver/motr/rgw_sal_motr.h
+++ b/src/rgw/driver/motr/rgw_sal_motr.h
@@ -29,7 +29,6 @@ extern "C" {
#include "rgw_sal_store.h"
#include "rgw_rados.h"
#include "rgw_notify.h"
-#include "rgw_oidc_provider.h"
#include "rgw_role.h"
#include "rgw_multi.h"
#include "rgw_putobj_processor.h"
@@ -219,8 +218,6 @@ class MotrUser : public StoreUser {
virtual std::unique_ptr<User> clone() override {
return std::unique_ptr<User>(new MotrUser(*this));
}
- int list_buckets(const DoutPrefixProvider *dpp, const std::string& marker, const std::string& end_marker,
- uint64_t max, bool need_stats, BucketList& buckets, optional_yield y) override;
virtual int create_bucket(const DoutPrefixProvider* dpp,
const rgw_bucket& b,
const std::string& zonegroup_id,
@@ -375,11 +372,11 @@ class MotrBucket : public StoreBucket {
virtual int read_stats_async(const DoutPrefixProvider *dpp,
const bucket_index_layout_generation& idx_layout,
int shard_id, boost::intrusive_ptr<ReadStatsCB> ctx) override;
- int sync_user_stats(const DoutPrefixProvider *dpp, optional_yield y,
- RGWBucketEnt* ent) override;
+ int sync_owner_stats(const DoutPrefixProvider *dpp, optional_yield y,
+ RGWBucketEnt* ent) override;
int check_bucket_shards(const DoutPrefixProvider *dpp,
uint64_t num_objs) override;
- virtual int chown(const DoutPrefixProvider *dpp, User& new_user, optional_yield y) override;
+ virtual int chown(const DoutPrefixProvider *dpp, const rgw_owner& new_user, optional_yield y) override;
virtual int put_info(const DoutPrefixProvider *dpp, bool exclusive, ceph::real_time mtime) override;
virtual bool is_owner(User* user) override;
virtual int check_empty(const DoutPrefixProvider *dpp, optional_yield y) override;
@@ -563,24 +560,6 @@ class MotrLuaManager : public StoreLuaManager {
virtual int reload_packages(const DoutPrefixProvider* dpp, optional_yield y) override;
};
-class MotrOIDCProvider : public RGWOIDCProvider {
- MotrStore* store;
- public:
- MotrOIDCProvider(MotrStore* _store) : store(_store) {}
- ~MotrOIDCProvider() = default;
-
- virtual int store_url(const DoutPrefixProvider *dpp, const std::string& url, bool exclusive, optional_yield y) override { return 0; }
- virtual int read_url(const DoutPrefixProvider *dpp, const std::string& url, const std::string& tenant) override { return 0; }
- virtual int delete_obj(const DoutPrefixProvider *dpp, optional_yield y) override { return 0;}
-
- void encode(bufferlist& bl) const {
- RGWOIDCProvider::encode(bl);
- }
- void decode(bufferlist::const_iterator& bl) {
- RGWOIDCProvider::decode(bl);
- }
-};
-
class MotrObject : public StoreObject {
private:
MotrStore *store;
@@ -679,7 +658,8 @@ class MotrObject : public StoreObject {
virtual int delete_object(const DoutPrefixProvider* dpp,
optional_yield y,
uint32_t flags) override;
- virtual int copy_object(User* user,
+ virtual int copy_object(const ACLOwner& owner,
+ const rgw_user& remote_user,
req_info* info, const rgw_zone_id& source_zone,
rgw::sal::Object* dest_object, rgw::sal::Bucket* dest_bucket,
rgw::sal::Bucket* src_bucket,
@@ -718,10 +698,10 @@ class MotrObject : public StoreObject {
virtual int dump_obj_layout(const DoutPrefixProvider *dpp, optional_yield y, Formatter* f) override;
/* Swift versioning */
- virtual int swift_versioning_restore(bool& restored,
- const DoutPrefixProvider* dpp) override;
- virtual int swift_versioning_copy(const DoutPrefixProvider* dpp,
- optional_yield y) override;
+ virtual int swift_versioning_restore(const ACLOwner& owner, const rgw_user& remote_user,
+ bool& restored, const DoutPrefixProvider* dpp, optional_yield y) override;
+ virtual int swift_versioning_copy(const ACLOwner& owner, const rgw_user& remote_user,
+ const DoutPrefixProvider* dpp, optional_yield y) override;
/* OPs */
virtual std::unique_ptr<ReadOp> get_read_op() override;
@@ -774,7 +754,7 @@ class MPMotrSerializer : public StoreMPSerializer {
class MotrAtomicWriter : public StoreWriter {
protected:
rgw::sal::MotrStore* store;
- const rgw_user& owner;
+ const ACLOwner& owner;
const rgw_placement_rule *ptail_placement_rule;
uint64_t olh_epoch;
const std::string& unique_tag;
@@ -793,7 +773,7 @@ class MotrAtomicWriter : public StoreWriter {
optional_yield y,
rgw::sal::Object* obj,
MotrStore* _store,
- const rgw_user& _owner,
+ const ACLOwner& _owner,
const rgw_placement_rule *_ptail_placement_rule,
uint64_t _olh_epoch,
const std::string& _unique_tag);
@@ -840,7 +820,7 @@ public:
optional_yield y, MultipartUpload* upload,
rgw::sal::Object* obj,
MotrStore* _store,
- const rgw_user& owner,
+ const ACLOwner& owner,
const rgw_placement_rule *ptail_placement_rule,
uint64_t _part_num, const std::string& part_num_str) :
StoreWriter(dpp, y), store(_store), head_obj(obj),
@@ -953,7 +933,7 @@ public:
virtual std::unique_ptr<Writer> get_writer(const DoutPrefixProvider *dpp,
optional_yield y,
rgw::sal::Object* obj,
- const rgw_user& owner,
+ const ACLOwner& owner,
const rgw_placement_rule *ptail_placement_rule,
uint64_t part_num,
const std::string& part_num_str) override;
@@ -998,6 +978,10 @@ class MotrStore : public StoreDriver {
std::unique_ptr<Bucket> get_bucket(User* u, const RGWBucketInfo& i) override;
int load_bucket(const DoutPrefixProvider *dpp, User* u, const rgw_bucket& b,
std::unique_ptr<Bucket>* bucket, optional_yield y) override;
+ int list_buckets(const DoutPrefixProvider *dpp,
+ const rgw_owner& owner, const std::string& tenant,
+ const std::string& marker, const std::string& end_marker,
+ uint64_t max, bool need_stats, BucketList& buckets, optional_yield y) override;
virtual bool is_meta_master() override;
virtual Zone* get_zone() { return &zone; }
virtual std::string zone_unique_id(uint64_t unique_num) override;
@@ -1055,25 +1039,42 @@ class MotrStore : public StoreDriver {
std::unique_ptr<LuaManager> get_lua_manager(const DoutPrefixProvider *dpp = nullptr, const std::string& luarocks_path = "") override;
virtual std::unique_ptr<RGWRole> get_role(std::string name,
std::string tenant,
+ rgw_account_id account_id,
std::string path="",
std::string trust_policy="",
+ std::string description="",
std::string max_session_duration_str="",
std::multimap<std::string, std::string> tags={}) override;
virtual std::unique_ptr<RGWRole> get_role(const RGWRoleInfo& info) override;
virtual std::unique_ptr<RGWRole> get_role(std::string id) override;
- virtual int get_roles(const DoutPrefixProvider *dpp,
- optional_yield y,
- const std::string& path_prefix,
- const std::string& tenant,
- std::vector<std::unique_ptr<RGWRole>>& roles) override;
- virtual std::unique_ptr<RGWOIDCProvider> get_oidc_provider() override;
- virtual int get_oidc_providers(const DoutPrefixProvider *dpp,
- const std::string& tenant,
- std::vector<std::unique_ptr<RGWOIDCProvider>>& providers) override;
+ int list_roles(const DoutPrefixProvider *dpp,
+ optional_yield y,
+ const std::string& tenant,
+ const std::string& path_prefix,
+ const std::string& marker,
+ uint32_t max_items,
+ RoleList& listing) override;
+ int store_oidc_provider(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ const RGWOIDCProviderInfo& info,
+ bool exclusive) override;
+ int load_oidc_provider(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view tenant,
+ std::string_view url,
+ RGWOIDCProviderInfo& info) override;
+ int delete_oidc_provider(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view tenant,
+ std::string_view url) override;
+ int get_oidc_providers(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view tenant,
+ std::vector<RGWOIDCProviderInfo>& providers) override;
virtual std::unique_ptr<Writer> get_append_writer(const DoutPrefixProvider *dpp,
optional_yield y,
rgw::sal::Object* obj,
- const rgw_user& owner,
+ const ACLOwner& owner,
const rgw_placement_rule *ptail_placement_rule,
const std::string& unique_tag,
uint64_t position,
@@ -1081,7 +1082,7 @@ class MotrStore : public StoreDriver {
virtual std::unique_ptr<Writer> get_atomic_writer(const DoutPrefixProvider *dpp,
optional_yield y,
rgw::sal::Object* obj,
- const rgw_user& owner,
+ const ACLOwner& owner,
const rgw_placement_rule *ptail_placement_rule,
uint64_t olh_epoch,
const std::string& unique_tag) override;
diff --git a/src/rgw/driver/posix/rgw_sal_posix.cc b/src/rgw/driver/posix/rgw_sal_posix.cc
index a8b3fe2f3e3..9b1b34fa9e4 100644
--- a/src/rgw/driver/posix/rgw_sal_posix.cc
+++ b/src/rgw/driver/posix/rgw_sal_posix.cc
@@ -373,7 +373,7 @@ std::string POSIXDriver::zone_unique_trans_id(const uint64_t unique_num)
std::unique_ptr<Writer> POSIXDriver::get_append_writer(const DoutPrefixProvider *dpp,
optional_yield y,
rgw::sal::Object* _head_obj,
- const rgw_user& owner,
+ const ACLOwner& owner,
const rgw_placement_rule *ptail_placement_rule,
const std::string& unique_tag,
uint64_t position,
@@ -390,7 +390,7 @@ std::unique_ptr<Writer> POSIXDriver::get_append_writer(const DoutPrefixProvider
std::unique_ptr<Writer> POSIXDriver::get_atomic_writer(const DoutPrefixProvider *dpp,
optional_yield y,
rgw::sal::Object* _head_obj,
- const rgw_user& owner,
+ const ACLOwner& owner,
const rgw_placement_rule *ptail_placement_rule,
uint64_t olh_epoch,
const std::string& unique_tag)
@@ -444,7 +444,8 @@ int POSIXDriver::close()
}
// TODO: marker and other params
-int POSIXUser::list_buckets(const DoutPrefixProvider* dpp, const std::string& marker,
+int POSIXDriver::list_buckets(const DoutPrefixProvider* dpp, const rgw_owner& owner,
+ const std::string& tenant, const std::string& marker,
const std::string& end_marker, uint64_t max,
bool need_stats, BucketList &result, optional_yield y)
{
@@ -457,7 +458,7 @@ int POSIXUser::list_buckets(const DoutPrefixProvider* dpp, const std::string& ma
/* it's not sufficient to dup(root_fd), as as the new fd would share
* the file position of root_fd */
- dfd = copy_dir_fd(driver->get_root_fd());
+ dfd = copy_dir_fd(get_root_fd());
if (dfd == -1) {
ret = errno;
ldpp_dout(dpp, 0) << "ERROR: could not open root to list buckets: "
@@ -470,7 +471,7 @@ int POSIXUser::list_buckets(const DoutPrefixProvider* dpp, const std::string& ma
ret = errno;
ldpp_dout(dpp, 0) << "ERROR: could not open root to list buckets: "
<< cpp_strerror(ret) << dendl;
- close(dfd);
+ ::close(dfd);
return -ret;
}
@@ -486,7 +487,7 @@ int POSIXUser::list_buckets(const DoutPrefixProvider* dpp, const std::string& ma
while ((entry = readdir(dir)) != NULL) {
struct statx stx;
- ret = statx(driver->get_root_fd(), entry->d_name, AT_SYMLINK_NOFOLLOW, STATX_ALL, &stx);
+ ret = statx(get_root_fd(), entry->d_name, AT_SYMLINK_NOFOLLOW, STATX_ALL, &stx);
if (ret < 0) {
ret = errno;
ldpp_dout(dpp, 0) << "ERROR: could not stat object " << entry->d_name << ": "
@@ -516,7 +517,7 @@ int POSIXUser::list_buckets(const DoutPrefixProvider* dpp, const std::string& ma
}
ret = errno;
if (ret != 0) {
- ldpp_dout(dpp, 0) << "ERROR: could not list buckets for " << get_display_name() << ": "
+ ldpp_dout(dpp, 0) << "ERROR: could not list buckets for " << owner << ": "
<< cpp_strerror(ret) << dendl;
return -ret;
}
@@ -967,8 +968,8 @@ int POSIXBucket::read_stats_async(const DoutPrefixProvider *dpp,
return 0;
}
-int POSIXBucket::sync_user_stats(const DoutPrefixProvider *dpp, optional_yield y,
- RGWBucketEnt* ent)
+int POSIXBucket::sync_owner_stats(const DoutPrefixProvider *dpp, optional_yield y,
+ RGWBucketEnt* ent)
{
return 0;
}
@@ -979,7 +980,7 @@ int POSIXBucket::check_bucket_shards(const DoutPrefixProvider* dpp,
return 0;
}
-int POSIXBucket::chown(const DoutPrefixProvider* dpp, const rgw_user& new_owner, optional_yield y)
+int POSIXBucket::chown(const DoutPrefixProvider* dpp, const rgw_owner& new_owner, optional_yield y)
{
/* TODO map user to UID/GID, and change it */
return 0;
@@ -1487,7 +1488,8 @@ int POSIXObject::delete_object(const DoutPrefixProvider* dpp,
return 0;
}
-int POSIXObject::copy_object(User* user,
+int POSIXObject::copy_object(const ACLOwner& owner,
+ const rgw_user& remote_user,
req_info* info,
const rgw_zone_id& source_zone,
rgw::sal::Object* dest_object,
@@ -1690,14 +1692,14 @@ int POSIXObject::dump_obj_layout(const DoutPrefixProvider *dpp, optional_yield y
return 0;
}
-int POSIXObject::swift_versioning_restore(bool& restored,
+int POSIXObject::swift_versioning_restore(const ACLOwner& owner, const rgw_user& remote_user, bool& restored,
const DoutPrefixProvider* dpp, optional_yield y)
{
return 0;
}
-int POSIXObject::swift_versioning_copy(const DoutPrefixProvider* dpp,
- optional_yield y)
+int POSIXObject::swift_versioning_copy(const ACLOwner& owner, const rgw_user& remote_user,
+ const DoutPrefixProvider* dpp, optional_yield y)
{
return 0;
}
@@ -2783,7 +2785,7 @@ std::unique_ptr<Writer> POSIXMultipartUpload::get_writer(
const DoutPrefixProvider *dpp,
optional_yield y,
rgw::sal::Object* _head_obj,
- const rgw_user& owner,
+ const ACLOwner& owner,
const rgw_placement_rule *ptail_placement_rule,
uint64_t part_num,
const std::string& part_num_str)
diff --git a/src/rgw/driver/posix/rgw_sal_posix.h b/src/rgw/driver/posix/rgw_sal_posix.h
index ed7630a7d65..587bf783e90 100644
--- a/src/rgw/driver/posix/rgw_sal_posix.h
+++ b/src/rgw/driver/posix/rgw_sal_posix.h
@@ -55,12 +55,17 @@ public:
virtual std::unique_ptr<Bucket> get_bucket(const RGWBucketInfo& i) override;
virtual int load_bucket(const DoutPrefixProvider* dpp, const rgw_bucket& b,
std::unique_ptr<Bucket>* bucket, optional_yield y) override;
+ virtual int list_buckets(const DoutPrefixProvider* dpp,
+ const rgw_owner& owner, const std::string& tenant,
+ const std::string& marker, const std::string& end_marker,
+ uint64_t max, bool need_stats, BucketList& buckets,
+ optional_yield y) override;
virtual std::string zone_unique_trans_id(const uint64_t unique_num) override;
virtual std::unique_ptr<Writer> get_append_writer(const DoutPrefixProvider *dpp,
optional_yield y,
rgw::sal::Object* _head_obj,
- const rgw_user& owner,
+ const ACLOwner& owner,
const rgw_placement_rule *ptail_placement_rule,
const std::string& unique_tag,
uint64_t position,
@@ -68,7 +73,7 @@ public:
virtual std::unique_ptr<Writer> get_atomic_writer(const DoutPrefixProvider *dpp,
optional_yield y,
rgw::sal::Object* _head_obj,
- const rgw_user& owner,
+ const ACLOwner& owner,
const rgw_placement_rule *ptail_placement_rule,
uint64_t olh_epoch,
const std::string& unique_tag) override;
@@ -116,10 +121,6 @@ public:
driver(_driver) {}
virtual ~POSIXUser() = default;
- virtual int list_buckets(const DoutPrefixProvider* dpp,
- const std::string& marker, const std::string& end_marker,
- uint64_t max, bool need_stats, BucketList& buckets,
- optional_yield y) override;
virtual Attrs& get_attrs() override { return next->get_attrs(); }
virtual void set_attrs(Attrs& _attrs) override { next->set_attrs(_attrs); }
virtual int read_attrs(const DoutPrefixProvider* dpp, optional_yield y) override;
@@ -196,11 +197,11 @@ public:
virtual int read_stats_async(const DoutPrefixProvider *dpp,
const bucket_index_layout_generation& idx_layout,
int shard_id, boost::intrusive_ptr<ReadStatsCB> ctx) override;
- virtual int sync_user_stats(const DoutPrefixProvider *dpp, optional_yield y,
- RGWBucketEnt* ent) override;
+ virtual int sync_owner_stats(const DoutPrefixProvider *dpp, optional_yield y,
+ RGWBucketEnt* ent) override;
virtual int check_bucket_shards(const DoutPrefixProvider* dpp,
uint64_t num_objs, optional_yield y) override;
- virtual int chown(const DoutPrefixProvider* dpp, const rgw_user& new_owner, optional_yield y) override;
+ virtual int chown(const DoutPrefixProvider* dpp, const rgw_owner& new_owner, optional_yield y) override;
virtual int put_info(const DoutPrefixProvider* dpp, bool exclusive,
ceph::real_time mtime, optional_yield y) override;
virtual int check_empty(const DoutPrefixProvider* dpp, optional_yield y) override;
@@ -320,7 +321,8 @@ public:
virtual int delete_object(const DoutPrefixProvider* dpp,
optional_yield y,
uint32_t flags) override;
- virtual int copy_object(User* user,
+ virtual int copy_object(const ACLOwner& owner,
+ const rgw_user& remote_user,
req_info* info, const rgw_zone_id& source_zone,
rgw::sal::Object* dest_object, rgw::sal::Bucket* dest_bucket,
rgw::sal::Bucket* src_bucket,
@@ -367,10 +369,10 @@ public:
optional_yield y) override;
virtual bool placement_rules_match(rgw_placement_rule& r1, rgw_placement_rule& r2) override;
virtual int dump_obj_layout(const DoutPrefixProvider *dpp, optional_yield y, Formatter* f) override;
- virtual int swift_versioning_restore(bool& restored,
+ virtual int swift_versioning_restore(const ACLOwner& owner, const rgw_user& remote_user, bool& restored,
const DoutPrefixProvider* dpp, optional_yield y) override;
- virtual int swift_versioning_copy(const DoutPrefixProvider* dpp,
- optional_yield y) override;
+ virtual int swift_versioning_copy(const ACLOwner& owner, const rgw_user& remote_user,
+ const DoutPrefixProvider* dpp, optional_yield y) override;
virtual std::unique_ptr<ReadOp> get_read_op() override;
virtual std::unique_ptr<DeleteOp> get_delete_op() override;
virtual int omap_get_vals_by_keys(const DoutPrefixProvider *dpp, const std::string& oid,
@@ -561,7 +563,7 @@ public:
virtual std::unique_ptr<Writer> get_writer(const DoutPrefixProvider *dpp,
optional_yield y,
rgw::sal::Object* _head_obj,
- const rgw_user& owner,
+ const ACLOwner& owner,
const rgw_placement_rule *ptail_placement_rule,
uint64_t part_num,
const std::string& part_num_str) override;
@@ -574,7 +576,7 @@ private:
class POSIXAtomicWriter : public StoreWriter {
private:
POSIXDriver* driver;
- const rgw_user& owner;
+ const ACLOwner& owner;
const rgw_placement_rule *ptail_placement_rule;
uint64_t olh_epoch;
const std::string& unique_tag;
@@ -585,7 +587,7 @@ public:
optional_yield y,
rgw::sal::Object* _head_obj,
POSIXDriver* _driver,
- const rgw_user& _owner,
+ const ACLOwner& _owner,
const rgw_placement_rule *_ptail_placement_rule,
uint64_t _olh_epoch,
const std::string& _unique_tag) :
@@ -614,7 +616,7 @@ public:
class POSIXMultipartWriter : public StoreWriter {
private:
POSIXDriver* driver;
- const rgw_user& owner;
+ const ACLOwner& owner;
const rgw_placement_rule *ptail_placement_rule;
uint64_t part_num;
std::unique_ptr<Bucket> shadow_bucket;
@@ -626,7 +628,7 @@ public:
std::unique_ptr<Bucket> _shadow_bucket,
rgw_obj_key& _key,
POSIXDriver* _driver,
- const rgw_user& _owner,
+ const ACLOwner& _owner,
const rgw_placement_rule *_ptail_placement_rule,
uint64_t _part_num) :
StoreWriter(dpp, y),
diff --git a/src/rgw/driver/rados/account.cc b/src/rgw/driver/rados/account.cc
new file mode 100644
index 00000000000..fc881d07804
--- /dev/null
+++ b/src/rgw/driver/rados/account.cc
@@ -0,0 +1,674 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab ft=cpp
+
+/*
+ * Ceph - scalable distributed file system
+ *
+ * Copyright contributors to the Ceph project
+ *
+ * This is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License version 2.1, as published by the Free Software
+ * Foundation. See file COPYING.
+ *
+ */
+
+#include "account.h"
+
+#include <boost/algorithm/string.hpp>
+#include "include/rados/librados.hpp"
+#include "cls/user/cls_user_types.h"
+#include "common/errno.h"
+#include "rgw_account.h"
+#include "rgw_common.h"
+#include "rgw_metadata.h"
+#include "rgw_metadata_lister.h"
+#include "rgw_obj_types.h"
+#include "rgw_string.h"
+#include "rgw_tools.h"
+#include "rgw_user.h"
+#include "rgw_zone.h"
+#include "services/svc_sys_obj.h"
+
+namespace rgwrados::account {
+
+static constexpr std::string_view buckets_oid_prefix = "buckets.";
+static constexpr std::string_view users_oid_prefix = "users.";
+static constexpr std::string_view groups_oid_prefix = "groups.";
+static constexpr std::string_view roles_oid_prefix = "roles.";
+static constexpr std::string_view topics_oid_prefix = "topics.";
+static const std::string account_oid_prefix = "account.";
+static constexpr std::string_view name_oid_prefix = "name.";
+
+// metadata keys/objects
+static std::string get_buckets_key(std::string_view account_id) {
+ return string_cat_reserve(buckets_oid_prefix, account_id);
+}
+rgw_raw_obj get_buckets_obj(const RGWZoneParams& zone,
+ std::string_view account_id) {
+ return {zone.account_pool, get_buckets_key(account_id)};
+}
+
+static std::string get_users_key(std::string_view account_id) {
+ return string_cat_reserve(users_oid_prefix, account_id);
+}
+rgw_raw_obj get_users_obj(const RGWZoneParams& zone,
+ std::string_view account_id) {
+ return {zone.account_pool, get_users_key(account_id)};
+}
+
+static std::string get_groups_key(std::string_view account_id) {
+ return string_cat_reserve(groups_oid_prefix, account_id);
+}
+rgw_raw_obj get_groups_obj(const RGWZoneParams& zone,
+ std::string_view account_id) {
+ return {zone.account_pool, get_groups_key(account_id)};
+}
+
+static std::string get_roles_key(std::string_view account_id) {
+ return string_cat_reserve(roles_oid_prefix, account_id);
+}
+rgw_raw_obj get_roles_obj(const RGWZoneParams& zone,
+ std::string_view account_id) {
+ return {zone.account_pool, get_roles_key(account_id)};
+}
+
+static std::string get_topics_key(std::string_view account_id) {
+ return string_cat_reserve(topics_oid_prefix, account_id);
+}
+rgw_raw_obj get_topics_obj(const RGWZoneParams& zone,
+ std::string_view account_id) {
+ return {zone.account_pool, get_topics_key(account_id)};
+}
+
+static std::string get_account_key(std::string_view account_id) {
+ return string_cat_reserve(account_oid_prefix, account_id);
+}
+static rgw_raw_obj get_account_obj(const RGWZoneParams& zone,
+ std::string_view account_id) {
+ return {zone.account_pool, get_account_key(account_id)};
+}
+
+static std::string get_name_key(std::string_view tenant,
+ std::string_view name) {
+ return string_cat_reserve(name_oid_prefix, tenant, "$", name);
+}
+static rgw_raw_obj get_name_obj(const RGWZoneParams& zone,
+ std::string_view tenant,
+ std::string_view name) {
+ return {zone.account_pool, get_name_key(tenant, name)};
+}
+
+// store in lower case for case-insensitive matching
+static std::string get_email_key(std::string_view email) {
+ auto lower = std::string{email};
+ boost::to_lower(lower);
+ return lower;
+}
+// note that account email oids conflict with user email oids. this ensures
+// that all emails are globally unique. we rely on rgw::account::validate_id()
+// to distinguish between user and account ids
+static rgw_raw_obj get_email_obj(const RGWZoneParams& zone,
+ std::string_view email) {
+ return {zone.user_email_pool, get_email_key(email)};
+}
+
+
+struct RedirectObj {
+ rgw_raw_obj obj;
+ RGWUID data;
+ RGWObjVersionTracker objv;
+};
+
+static int read_redirect(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ RGWSI_SysObj& sysobj,
+ RedirectObj& redirect)
+{
+ bufferlist bl;
+ int r = rgw_get_system_obj(&sysobj, redirect.obj.pool, redirect.obj.oid,
+ bl, &redirect.objv, nullptr, y, dpp);
+ if (r < 0) {
+ ldpp_dout(dpp, 20) << "failed to read " << redirect.obj.oid
+ << " with: " << cpp_strerror(r) << dendl;
+ return r;
+ }
+
+ try {
+ auto p = bl.cbegin();
+ decode(redirect.data, p);
+ } catch (const buffer::error& e) {
+ ldpp_dout(dpp, 0) << "ERROR: failed to decode account redirect: "
+ << e.what() << dendl;
+ return -EIO;
+ }
+ return 0;
+}
+
+static int write_redirect(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ RGWSI_SysObj& sysobj,
+ RedirectObj& redirect)
+{
+ bufferlist bl;
+ encode(redirect.data, bl);
+
+ constexpr bool exclusive = true;
+ return rgw_put_system_obj(dpp, &sysobj, redirect.obj.pool,
+ redirect.obj.oid, bl, exclusive,
+ &redirect.objv, ceph::real_time{}, y);
+}
+
+
+int read(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ RGWSI_SysObj& sysobj,
+ const RGWZoneParams& zone,
+ std::string_view account_id,
+ RGWAccountInfo& info,
+ std::map<std::string, ceph::buffer::list>& attrs,
+ ceph::real_time& mtime,
+ RGWObjVersionTracker& objv)
+{
+ const rgw_raw_obj obj = get_account_obj(zone, account_id);
+
+ bufferlist bl;
+ int r = rgw_get_system_obj(&sysobj, obj.pool, obj.oid, bl,
+ &objv, &mtime, y, dpp, &attrs);
+ if (r < 0) {
+ ldpp_dout(dpp, 20) << "account lookup with id=" << account_id
+ << " failed: " << cpp_strerror(r) << dendl;
+ return r;
+ }
+
+ try {
+ auto p = bl.cbegin();
+ decode(info, p);
+ } catch (const buffer::error& e) {
+ ldpp_dout(dpp, 0) << "ERROR: failed to decode account info: "
+ << e.what() << dendl;
+ return -EIO;
+ }
+ if (info.id != account_id) {
+ ldpp_dout(dpp, 0) << "ERROR: read account id mismatch "
+ << info.id << " != " << account_id << dendl;
+ return -EIO;
+ }
+ return 0;
+}
+
+int read_by_name(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ RGWSI_SysObj& sysobj,
+ const RGWZoneParams& zone,
+ std::string_view tenant,
+ std::string_view name,
+ RGWAccountInfo& info,
+ std::map<std::string, ceph::buffer::list>& attrs,
+ RGWObjVersionTracker& objv)
+{
+ auto redirect = RedirectObj{.obj = get_name_obj(zone, tenant, name)};
+ int r = read_redirect(dpp, y, sysobj, redirect);
+ if (r < 0) {
+ return r;
+ }
+ ceph::real_time mtime; // ignored
+ return read(dpp, y, sysobj, zone, redirect.data.id, info, attrs, mtime, objv);
+}
+
+int read_by_email(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ RGWSI_SysObj& sysobj,
+ const RGWZoneParams& zone,
+ std::string_view email,
+ RGWAccountInfo& info,
+ std::map<std::string, ceph::buffer::list>& attrs,
+ RGWObjVersionTracker& objv)
+{
+ auto redirect = RedirectObj{.obj = get_email_obj(zone, email)};
+ int r = read_redirect(dpp, y, sysobj, redirect);
+ if (r < 0) {
+ return r;
+ }
+ if (!rgw::account::validate_id(redirect.data.id)) {
+ // this index is used for a user, not an account
+ return -ENOENT;
+ }
+ ceph::real_time mtime; // ignored
+ return read(dpp, y, sysobj, zone, redirect.data.id, info, attrs, mtime, objv);
+}
+
+
+int write(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ RGWSI_SysObj& sysobj,
+ const RGWZoneParams& zone,
+ const RGWAccountInfo& info,
+ const RGWAccountInfo* old_info,
+ const std::map<std::string, ceph::buffer::list>& attrs,
+ ceph::real_time mtime,
+ bool exclusive,
+ RGWObjVersionTracker& objv)
+{
+ const rgw_raw_obj obj = get_account_obj(zone, info.id);
+
+ const bool same_name = old_info
+ && old_info->tenant == info.tenant
+ && old_info->name == info.name;
+ const bool same_email = old_info
+ && boost::iequals(old_info->email, info.email);
+
+ std::optional<RedirectObj> remove_name;
+ std::optional<RedirectObj> remove_email;
+ if (old_info) {
+ if (old_info->id != info.id) {
+ ldpp_dout(dpp, 1) << "ERROR: can't modify account id" << dendl;
+ return -EINVAL;
+ }
+ if (!same_name && !old_info->name.empty()) {
+ // read old account name object
+ RedirectObj redirect;
+ redirect.obj = get_name_obj(zone, old_info->tenant, old_info->name);
+ int r = read_redirect(dpp, y, sysobj, redirect);
+ if (r == -ENOENT) {
+ // leave remove_name empty
+ } else if (r < 0) {
+ return r;
+ } else if (redirect.data.id == info.id) {
+ remove_name = std::move(redirect);
+ }
+ }
+ if (!same_email && !old_info->email.empty()) {
+ // read old account email object
+ RedirectObj redirect;
+ redirect.obj = get_email_obj(zone, old_info->email);
+ int r = read_redirect(dpp, y, sysobj, redirect);
+ if (r == -ENOENT) {
+ // leave remove_email empty
+ } else if (r < 0) {
+ return r;
+ } else if (redirect.data.id == info.id) {
+ remove_email = std::move(redirect);
+ }
+ }
+ } // old_info
+
+ if (!same_name && !info.name.empty()) {
+ // read new account name object
+ RedirectObj redirect;
+ redirect.obj = get_name_obj(zone, info.tenant, info.name);
+ int r = read_redirect(dpp, y, sysobj, redirect);
+ if (r == -ENOENT) {
+ // write the new name object below
+ } else if (r == 0) {
+ ldpp_dout(dpp, 1) << "ERROR: account name obj " << redirect.obj
+ << " already taken for account id " << redirect.data.id << dendl;
+ return -EEXIST;
+ } else if (r < 0) {
+ return r;
+ }
+ }
+
+ if (!same_email && !info.email.empty()) {
+ // read new account email object
+ RedirectObj redirect;
+ redirect.obj = get_email_obj(zone, info.email);
+ int r = read_redirect(dpp, y, sysobj, redirect);
+ if (r == -ENOENT) {
+ // write the new email object below
+ } else if (r == 0) {
+ ldpp_dout(dpp, 1) << "ERROR: account email obj " << redirect.obj
+ << " already taken for " << redirect.data.id << dendl;
+ return -EEXIST;
+ } else if (r < 0) {
+ return r;
+ }
+ }
+
+ // encode/write the account info
+ {
+ bufferlist bl;
+ encode(info, bl);
+
+ const rgw_raw_obj obj = get_account_obj(zone, info.id);
+ int r = rgw_put_system_obj(dpp, &sysobj, obj.pool, obj.oid, bl,
+ exclusive, &objv, mtime, y, &attrs);
+ if (r < 0) {
+ ldpp_dout(dpp, 1) << "ERROR: failed to write account obj " << obj
+ << " with: " << cpp_strerror(r) << dendl;
+ return r;
+ }
+ }
+
+ if (remove_name) {
+ // remove the old name object, ignoring errors
+ auto& redirect = *remove_name;
+ int r = rgw_delete_system_obj(dpp, &sysobj, redirect.obj.pool,
+ redirect.obj.oid, &redirect.objv, y);
+ if (r < 0) {
+ ldpp_dout(dpp, 20) << "WARNING: failed to remove old name obj "
+ << redirect.obj.oid << ": " << cpp_strerror(r) << dendl;
+ } // not fatal
+ }
+ if (!same_name && !info.name.empty()) {
+ // write the new name object
+ RedirectObj redirect;
+ redirect.obj = get_name_obj(zone, info.tenant, info.name);
+ redirect.data.id = info.id;
+ redirect.objv.generate_new_write_ver(dpp->get_cct());
+
+ int r = write_redirect(dpp, y, sysobj, redirect);
+ if (r < 0) {
+ ldpp_dout(dpp, 20) << "WARNING: failed to write name obj "
+ << redirect.obj << " with: " << cpp_strerror(r) << dendl;
+ } // not fatal
+ }
+
+ if (remove_email) {
+ // remove the old email object, ignoring errors
+ auto& redirect = *remove_email;
+ int r = rgw_delete_system_obj(dpp, &sysobj, redirect.obj.pool,
+ redirect.obj.oid, &redirect.objv, y);
+ if (r < 0) {
+ ldpp_dout(dpp, 20) << "WARNING: failed to remove old email obj "
+ << redirect.obj.oid << ": " << cpp_strerror(r) << dendl;
+ } // not fatal
+ }
+ if (!same_email && !info.email.empty()) {
+ // write the new email object
+ RedirectObj redirect;
+ redirect.obj = get_email_obj(zone, info.email);
+ redirect.data.id = info.id;
+ redirect.objv.generate_new_write_ver(dpp->get_cct());
+
+ int r = write_redirect(dpp, y, sysobj, redirect);
+ if (r < 0) {
+ ldpp_dout(dpp, 20) << "WARNING: failed to write email obj "
+ << redirect.obj << " with: " << cpp_strerror(r) << dendl;
+ } // not fatal
+ }
+
+ return 0;
+}
+
+int remove(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ RGWSI_SysObj& sysobj,
+ const RGWZoneParams& zone,
+ const RGWAccountInfo& info,
+ RGWObjVersionTracker& objv)
+{
+ const rgw_raw_obj obj = get_account_obj(zone, info.id);
+ int r = rgw_delete_system_obj(dpp, &sysobj, obj.pool, obj.oid, &objv, y);
+ if (r < 0) {
+ ldpp_dout(dpp, 1) << "ERROR: failed to remove account obj "
+ << obj << " with: " << cpp_strerror(r) << dendl;
+ return r;
+ }
+
+ if (!info.name.empty()) {
+ // remove the name object
+ const rgw_raw_obj obj = get_name_obj(zone, info.tenant, info.name);
+ r = rgw_delete_system_obj(dpp, &sysobj, obj.pool, obj.oid, nullptr, y);
+ if (r < 0) {
+ ldpp_dout(dpp, 20) << "WARNING: failed to remove name obj "
+ << obj << " with: " << cpp_strerror(r) << dendl;
+ } // not fatal
+ }
+ if (!info.email.empty()) {
+ // remove the email object
+ const rgw_raw_obj obj = get_email_obj(zone, info.email);
+ r = rgw_delete_system_obj(dpp, &sysobj, obj.pool, obj.oid, nullptr, y);
+ if (r < 0) {
+ ldpp_dout(dpp, 20) << "WARNING: failed to remove email obj "
+ << obj << " with: " << cpp_strerror(r) << dendl;
+ } // not fatal
+ }
+ {
+ // remove the users object
+ const rgw_raw_obj obj = get_users_obj(zone, info.id);
+ r = rgw_delete_system_obj(dpp, &sysobj, obj.pool, obj.oid, nullptr, y);
+ if (r < 0) {
+ ldpp_dout(dpp, 20) << "WARNING: failed to remove users obj "
+ << obj << " with: " << cpp_strerror(r) << dendl;
+ } // not fatal
+ }
+
+ return 0;
+}
+
+// read the resource count from cls_user_account_header
+int resource_count(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ librados::Rados& rados,
+ const rgw_raw_obj& obj,
+ uint32_t& count)
+{
+ rgw_rados_ref ref;
+ int r = rgw_get_rados_ref(dpp, &rados, obj, &ref);
+ if (r < 0) {
+ return r;
+ }
+
+ librados::ObjectReadOperation op;
+ bufferlist bl;
+ int ret = 0;
+ op.omap_get_header(&bl, &ret);
+
+ r = ref.operate(dpp, &op, nullptr, y);
+ if (r == -ENOENT) { // doesn't exist yet
+ count = 0;
+ return 0;
+ }
+ if (r < 0) {
+ return r;
+ }
+
+ if (!bl.length()) { // exists but no header yet
+ count = 0;
+ return 0;
+ }
+
+ cls_user_account_header header;
+ try {
+ auto p = bl.cbegin();
+ decode(header, p);
+ } catch (const buffer::error&) {
+ return -EIO;
+ }
+
+ count = header.count;
+ return 0;
+}
+
+
+// metadata abstraction
+
+struct CompleteInfo {
+ RGWAccountInfo info;
+ std::map<std::string, bufferlist> attrs;
+ bool has_attrs = false;
+
+ void dump(Formatter* f) const {
+ info.dump(f);
+ encode_json("attrs", attrs, f);
+ }
+
+ void decode_json(JSONObj* obj) {
+ decode_json_obj(info, obj);
+ has_attrs = JSONDecoder::decode_json("attrs", attrs, obj);
+ }
+};
+
+class MetadataObject : public RGWMetadataObject {
+ CompleteInfo aci;
+ public:
+ MetadataObject(const CompleteInfo& aci, const obj_version& v,
+ ceph::real_time m)
+ : RGWMetadataObject(v, m), aci(aci) {}
+
+ void dump(Formatter *f) const override {
+ aci.dump(f);
+ }
+
+ CompleteInfo& get() { return aci; }
+};
+
+
+class MetadataLister : public RGWMetadataLister {
+ public:
+ using RGWMetadataLister::RGWMetadataLister;
+
+ void filter_transform(std::vector<std::string>& oids,
+ std::list<std::string>& keys) override
+ {
+ // remove the oid prefix from keys
+ constexpr auto trim = [] (const std::string& oid) {
+ return oid.substr(account_oid_prefix.size());
+ };
+ std::transform(oids.begin(), oids.end(),
+ std::back_inserter(keys),
+ trim);
+ }
+};
+
+class MetadataHandler : public RGWMetadataHandler {
+ RGWSI_SysObj& sysobj;
+ const RGWZoneParams& zone;
+ public:
+ MetadataHandler(RGWSI_SysObj& sysobj, const RGWZoneParams& zone)
+ : sysobj(sysobj), zone(zone) {}
+
+ std::string get_type() override { return "account"; }
+
+ RGWMetadataObject* get_meta_obj(JSONObj* obj,
+ const obj_version& objv,
+ const ceph::real_time& mtime) override
+ {
+ CompleteInfo aci;
+ try {
+ decode_json_obj(aci, obj);
+ } catch (const JSONDecoder::err&) {
+ return nullptr;
+ }
+ return new MetadataObject(aci, objv, mtime);
+ }
+
+ int get(std::string& entry, RGWMetadataObject** obj,
+ optional_yield y, const DoutPrefixProvider* dpp) override
+ {
+ const std::string& account_id = entry;
+ CompleteInfo aci;
+ RGWObjVersionTracker objv;
+ ceph::real_time mtime;
+
+ int r = read(dpp, y, sysobj, zone, account_id,
+ aci.info, aci.attrs, mtime, objv);
+ if (r < 0) {
+ return r;
+ }
+
+ *obj = new MetadataObject(aci, objv.read_version, mtime);
+ return 0;
+ }
+
+ int put(std::string& entry, RGWMetadataObject* obj,
+ RGWObjVersionTracker& objv, optional_yield y,
+ const DoutPrefixProvider* dpp,
+ RGWMDLogSyncType type, bool from_remote_zone) override
+ {
+ const std::string& account_id = entry;
+ auto account_obj = static_cast<MetadataObject*>(obj);
+ const auto& new_info = account_obj->get().info;
+
+ // account id must match metadata key
+ if (new_info.id != account_id) {
+ return -EINVAL;
+ }
+
+ // read existing metadata
+ RGWAccountInfo old_info;
+ std::map<std::string, ceph::buffer::list> old_attrs;
+ ceph::real_time old_mtime;
+ int r = read(dpp, y, sysobj, zone, account_id,
+ old_info, old_attrs, old_mtime, objv);
+ if (r < 0 && r != -ENOENT) {
+ return r;
+ }
+ const RGWAccountInfo* pold_info = (r == -ENOENT ? nullptr : &old_info);
+
+ // write/overwrite metadata
+ constexpr bool exclusive = false;
+ return write(dpp, y, sysobj, zone, new_info, pold_info,
+ account_obj->get().attrs, obj->get_mtime(),
+ exclusive, objv);
+ }
+
+ int remove(std::string& entry, RGWObjVersionTracker& objv,
+ optional_yield y, const DoutPrefixProvider* dpp) override
+ {
+ const std::string& account_id = entry;
+
+ // read existing metadata
+ RGWAccountInfo info;
+ std::map<std::string, ceph::buffer::list> attrs;
+ ceph::real_time mtime;
+ int r = read(dpp, y, sysobj, zone, account_id,
+ info, attrs, mtime, objv);
+ if (r < 0) {
+ return r;
+ }
+
+ return account::remove(dpp, y, sysobj, zone, info, objv);
+ }
+
+ int mutate(const std::string& entry,
+ const ceph::real_time& mtime,
+ RGWObjVersionTracker* objv,
+ optional_yield y,
+ const DoutPrefixProvider* dpp,
+ RGWMDLogStatus op_type,
+ std::function<int()> f) override
+ {
+ return -ENOTSUP; // unused
+ }
+
+ int list_keys_init(const DoutPrefixProvider* dpp,
+ const std::string& marker, void** phandle) override
+ {
+ auto lister = std::make_unique<MetadataLister>(
+ sysobj.get_pool(zone.account_pool));
+ int r = lister->init(dpp, marker, account_oid_prefix);
+ if (r < 0) {
+ return r;
+ }
+ *phandle = lister.release();
+ return 0;
+ }
+
+ int list_keys_next(const DoutPrefixProvider* dpp, void* handle, int max,
+ std::list<std::string>& keys, bool* truncated) override
+ {
+ auto lister = static_cast<MetadataLister*>(handle);
+ return lister->get_next(dpp, max, keys, truncated);
+ }
+
+ void list_keys_complete(void* handle) override
+ {
+ delete static_cast<MetadataLister*>(handle);
+ }
+
+ std::string get_marker(void* handle) override
+ {
+ auto lister = static_cast<MetadataLister*>(handle);
+ return lister->get_marker();
+ }
+};
+
+auto create_metadata_handler(RGWSI_SysObj& sysobj, const RGWZoneParams& zone)
+ -> std::unique_ptr<RGWMetadataHandler>
+{
+ return std::make_unique<MetadataHandler>(sysobj, zone);
+}
+
+} // namespace rgwrados::account
diff --git a/src/rgw/driver/rados/account.h b/src/rgw/driver/rados/account.h
new file mode 100644
index 00000000000..36d1d10ffd7
--- /dev/null
+++ b/src/rgw/driver/rados/account.h
@@ -0,0 +1,130 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab ft=cpp
+
+/*
+ * Ceph - scalable distributed file system
+ *
+ * Copyright contributors to the Ceph project
+ *
+ * This is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License version 2.1, as published by the Free Software
+ * Foundation. See file COPYING.
+ *
+ */
+
+#pragma once
+
+#include <list>
+#include <map>
+#include <memory>
+#include <string>
+#include "include/encoding.h"
+#include "include/rados/librados_fwd.hpp"
+#include "common/async/yield_context.h"
+
+namespace ceph { class Formatter; }
+class DoutPrefixProvider;
+class JSONObj;
+struct rgw_raw_obj;
+class RGWAccountInfo;
+struct RGWBucketInfo;
+class RGWMetadataHandler;
+class RGWObjVersionTracker;
+class RGWSI_SysObj;
+class RGWStorageStats;
+class RGWZoneParams;
+
+namespace rgwrados::account {
+
+/// Account metadata handler factory
+auto create_metadata_handler(RGWSI_SysObj& sysobj, const RGWZoneParams& zone)
+ -> std::unique_ptr<RGWMetadataHandler>;
+
+/// Return the rados object that tracks the given account's buckets. This
+/// can be used with the cls_user interface in namespace rgwrados::buckets.
+rgw_raw_obj get_buckets_obj(const RGWZoneParams& zone,
+ std::string_view account_id);
+
+/// Return the rados object that tracks the given account's users. This
+/// can be used with the cls_user interface in namespace rgwrados::users.
+rgw_raw_obj get_users_obj(const RGWZoneParams& zone,
+ std::string_view account_id);
+
+/// Return the rados object that tracks the given account's groups. This
+/// can be used with the cls_user interface in namespace rgwrados::groups.
+rgw_raw_obj get_groups_obj(const RGWZoneParams& zone,
+ std::string_view account_id);
+
+/// Return the rados object that tracks the given account's roles. This
+/// can be used with the cls_user interface in namespace rgwrados::roles.
+rgw_raw_obj get_roles_obj(const RGWZoneParams& zone,
+ std::string_view account_id);
+
+/// Return the rados object that tracks the given account's topics. This
+/// can be used with the cls_user interface in namespace rgwrados::topics.
+rgw_raw_obj get_topics_obj(const RGWZoneParams& zone,
+ std::string_view account_id);
+
+
+/// Read account info by id
+int read(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ RGWSI_SysObj& sysobj,
+ const RGWZoneParams& zone,
+ std::string_view account_id,
+ RGWAccountInfo& info,
+ std::map<std::string, ceph::buffer::list>& attrs,
+ ceph::real_time& mtime,
+ RGWObjVersionTracker& objv);
+
+/// Read account info by name
+int read_by_name(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ RGWSI_SysObj& sysobj,
+ const RGWZoneParams& zone,
+ std::string_view tenant,
+ std::string_view name,
+ RGWAccountInfo& info,
+ std::map<std::string, ceph::buffer::list>& attrs,
+ RGWObjVersionTracker& objv);
+
+/// Read account info by email
+int read_by_email(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ RGWSI_SysObj& sysobj,
+ const RGWZoneParams& zone,
+ std::string_view email,
+ RGWAccountInfo& info,
+ std::map<std::string, ceph::buffer::list>& attrs,
+ RGWObjVersionTracker& objv);
+
+/// Write account info and update name/email indices
+int write(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ RGWSI_SysObj& sysobj,
+ const RGWZoneParams& zone,
+ const RGWAccountInfo& info,
+ const RGWAccountInfo* old_info,
+ const std::map<std::string, ceph::buffer::list>& attrs,
+ ceph::real_time mtime,
+ bool exclusive,
+ RGWObjVersionTracker& objv);
+
+/// Remove account info and name/email indices
+int remove(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ RGWSI_SysObj& sysobj,
+ const RGWZoneParams& zone,
+ const RGWAccountInfo& info,
+ RGWObjVersionTracker& objv);
+
+
+/// Read the resource count from an account index object.
+int resource_count(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ librados::Rados& rados,
+ const rgw_raw_obj& obj,
+ uint32_t& count);
+
+} // namespace rgwrados::account
diff --git a/src/rgw/driver/rados/buckets.cc b/src/rgw/driver/rados/buckets.cc
new file mode 100644
index 00000000000..8c35028d093
--- /dev/null
+++ b/src/rgw/driver/rados/buckets.cc
@@ -0,0 +1,275 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab ft=cpp
+
+/*
+ * Ceph - scalable distributed file system
+ *
+ * Copyright contributors to the Ceph project
+ *
+ * This is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License version 2.1, as published by the Free Software
+ * Foundation. See file COPYING.
+ *
+ */
+
+#include "buckets.h"
+#include "include/rados/librados.hpp"
+#include "common/async/yield_context.h"
+#include "common/dout.h"
+#include "cls/user/cls_user_client.h"
+#include "rgw_common.h"
+#include "rgw_sal.h"
+#include "rgw_tools.h"
+
+namespace rgwrados::buckets {
+
+static int set(const DoutPrefixProvider* dpp, optional_yield y,
+ librados::Rados& rados, const rgw_raw_obj& obj,
+ cls_user_bucket_entry&& entry, bool add)
+{
+ std::list<cls_user_bucket_entry> entries;
+ entries.push_back(std::move(entry));
+
+ rgw_rados_ref ref;
+ int r = rgw_get_rados_ref(dpp, &rados, obj, &ref);
+ if (r < 0) {
+ return r;
+ }
+
+ librados::ObjectWriteOperation op;
+ ::cls_user_set_buckets(op, entries, add);
+ return ref.operate(dpp, &op, y);
+}
+
+int add(const DoutPrefixProvider* dpp, optional_yield y,
+ librados::Rados& rados, const rgw_raw_obj& obj,
+ const rgw_bucket& bucket, ceph::real_time creation_time)
+{
+ cls_user_bucket_entry entry;
+ bucket.convert(&entry.bucket);
+
+ if (ceph::real_clock::is_zero(creation_time)) {
+ entry.creation_time = ceph::real_clock::now();
+ } else {
+ entry.creation_time = creation_time;
+ }
+
+ constexpr bool add = true; // create/update entry
+ return set(dpp, y, rados, obj, std::move(entry), add);
+}
+
+int remove(const DoutPrefixProvider* dpp, optional_yield y,
+ librados::Rados& rados, const rgw_raw_obj& obj,
+ const rgw_bucket& bucket)
+{
+ cls_user_bucket clsbucket;
+ bucket.convert(&clsbucket);
+
+ rgw_rados_ref ref;
+ int r = rgw_get_rados_ref(dpp, &rados, obj, &ref);
+ if (r < 0) {
+ return r;
+ }
+
+ librados::ObjectWriteOperation op;
+ ::cls_user_remove_bucket(op, clsbucket);
+ return ref.operate(dpp, &op, y);
+}
+
+int list(const DoutPrefixProvider* dpp, optional_yield y,
+ librados::Rados& rados, const rgw_raw_obj& obj,
+ const std::string& tenant, const std::string& start_marker,
+ const std::string& end_marker, uint64_t max,
+ rgw::sal::BucketList& listing)
+{
+ listing.buckets.clear();
+
+ rgw_rados_ref ref;
+ int r = rgw_get_rados_ref(dpp, &rados, obj, &ref);
+ if (r < 0) {
+ return r;
+ }
+
+ std::string marker = start_marker;
+ bool truncated = false;
+
+ do {
+ const uint64_t count = max - listing.buckets.size();
+ std::list<cls_user_bucket_entry> entries;
+
+ librados::ObjectReadOperation op;
+ int rc = 0;
+ ::cls_user_bucket_list(op, marker, end_marker, count,
+ entries, &marker, &truncated, &rc);
+
+ bufferlist bl;
+ int r = ref.operate(dpp, &op, &bl, y);
+ if (r == -ENOENT) {
+ listing.next_marker.clear();
+ return 0;
+ }
+ if (r < 0) {
+ return r;
+ }
+ if (rc < 0) {
+ return rc;
+ }
+
+ for (auto& entry : entries) {
+ RGWBucketEnt ent;
+ ent.bucket.tenant = tenant;
+ ent.bucket.name = std::move(entry.bucket.name);
+ ent.bucket.marker = std::move(entry.bucket.marker);
+ ent.bucket.bucket_id = std::move(entry.bucket.bucket_id);
+ ent.size = entry.size;
+ ent.size_rounded = entry.size_rounded;
+ ent.creation_time = entry.creation_time;
+ ent.count = entry.count;
+
+ listing.buckets.push_back(std::move(ent));
+ }
+ } while (truncated && listing.buckets.size() < max);
+
+ if (truncated) {
+ listing.next_marker = std::move(marker);
+ } else {
+ listing.next_marker.clear();
+ }
+ return 0;
+}
+
+int write_stats(const DoutPrefixProvider* dpp, optional_yield y,
+ librados::Rados& rados, const rgw_raw_obj& obj,
+ const RGWBucketEnt& ent)
+{
+ cls_user_bucket_entry entry;
+ ent.convert(&entry);
+
+ constexpr bool add = false; // bucket entry must exist
+ return set(dpp, y, rados, obj, std::move(entry), add);
+}
+
+int read_stats(const DoutPrefixProvider* dpp, optional_yield y,
+ librados::Rados& rados, const rgw_raw_obj& obj,
+ RGWStorageStats& stats, ceph::real_time* last_synced,
+ ceph::real_time* last_updated)
+{
+ rgw_rados_ref ref;
+ int r = rgw_get_rados_ref(dpp, &rados, obj, &ref);
+ if (r < 0) {
+ return r;
+ }
+
+ librados::ObjectReadOperation op;
+ cls_user_header header;
+ ::cls_user_get_header(op, &header, nullptr);
+
+ bufferlist bl;
+ r = ref.operate(dpp, &op, &bl, y);
+ if (r < 0 && r != -ENOENT) {
+ return r;
+ }
+
+ stats.size = header.stats.total_bytes;
+ stats.size_rounded = header.stats.total_bytes_rounded;
+ stats.num_objects = header.stats.total_entries;
+ if (last_synced) {
+ *last_synced = header.last_stats_sync;
+ }
+ if (last_updated) {
+ *last_updated = header.last_stats_update;
+ }
+ return 0;
+}
+
+// callback wrapper for cls_user_get_header_async()
+class AsyncHeaderCB : public RGWGetUserHeader_CB {
+ boost::intrusive_ptr<rgw::sal::ReadStatsCB> cb;
+ public:
+ explicit AsyncHeaderCB(boost::intrusive_ptr<rgw::sal::ReadStatsCB> cb)
+ : cb(std::move(cb)) {}
+
+ void handle_response(int r, cls_user_header& header) override {
+ const cls_user_stats& hs = header.stats;
+ RGWStorageStats stats;
+ stats.size = hs.total_bytes;
+ stats.size_rounded = hs.total_bytes_rounded;
+ stats.num_objects = hs.total_entries;
+ cb->handle_response(r, stats);
+ cb.reset();
+ }
+};
+
+int read_stats_async(const DoutPrefixProvider* dpp,
+ librados::Rados& rados,
+ const rgw_raw_obj& obj,
+ boost::intrusive_ptr<rgw::sal::ReadStatsCB> cb)
+{
+ rgw_rados_ref ref;
+ int r = rgw_get_rados_ref(dpp, &rados, obj, &ref);
+ if (r < 0) {
+ return r;
+ }
+
+ auto headercb = std::make_unique<AsyncHeaderCB>(std::move(cb));
+ r = ::cls_user_get_header_async(ref.ioctx, ref.obj.oid, headercb.get());
+ if (r >= 0) {
+ headercb.release(); // release ownership, handle_response() will free
+ }
+ return r;
+}
+
+int reset_stats(const DoutPrefixProvider* dpp, optional_yield y,
+ librados::Rados& rados, const rgw_raw_obj& obj)
+{
+ rgw_rados_ref ref;
+ int r = rgw_get_rados_ref(dpp, &rados, obj, &ref);
+ if (r < 0) {
+ return r;
+ }
+
+ int rval;
+
+ cls_user_reset_stats2_op call;
+ cls_user_reset_stats2_ret ret;
+
+ do {
+ buffer::list in, out;
+ librados::ObjectWriteOperation op;
+
+ call.time = ceph::real_clock::now();
+ ret.update_call(call);
+
+ encode(call, in);
+ op.exec("user", "reset_user_stats2", in, &out, &rval);
+ r = ref.operate(dpp, &op, y, librados::OPERATION_RETURNVEC);
+ if (r < 0) {
+ return r;
+ }
+ try {
+ auto bliter = out.cbegin();
+ decode(ret, bliter);
+ } catch (ceph::buffer::error& err) {
+ return -EINVAL;
+ }
+ } while (ret.truncated);
+
+ return rval;
+}
+
+int complete_flush_stats(const DoutPrefixProvider* dpp, optional_yield y,
+ librados::Rados& rados, const rgw_raw_obj& obj)
+{
+ rgw_rados_ref ref;
+ int r = rgw_get_rados_ref(dpp, &rados, obj, &ref);
+ if (r < 0) {
+ return r;
+ }
+
+ librados::ObjectWriteOperation op;
+ ::cls_user_complete_stats_sync(op);
+ return ref.operate(dpp, &op, y);
+}
+
+} // namespace rgwrados::buckets
diff --git a/src/rgw/driver/rados/buckets.h b/src/rgw/driver/rados/buckets.h
new file mode 100644
index 00000000000..3ac29f8de39
--- /dev/null
+++ b/src/rgw/driver/rados/buckets.h
@@ -0,0 +1,96 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab ft=cpp
+
+/*
+ * Ceph - scalable distributed file system
+ *
+ * Copyright contributors to the Ceph project
+ *
+ * This is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License version 2.1, as published by the Free Software
+ * Foundation. See file COPYING.
+ *
+ */
+
+#pragma once
+
+#include <cstdint>
+#include <string>
+#include <boost/intrusive_ptr.hpp>
+#include "include/rados/librados_fwd.hpp"
+#include "common/ceph_time.h"
+#include "rgw_sal_fwd.h"
+
+class DoutPrefixProvider;
+class optional_yield;
+struct rgw_bucket;
+struct rgw_raw_obj;
+struct RGWBucketEnt;
+struct RGWStorageStats;
+
+/// Interface for bucket owners (users or accounts) to manage
+/// their list of buckets and storage stats with cls_user.
+namespace rgwrados::buckets {
+
+/// Add the given bucket to the list.
+int add(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ librados::Rados& rados,
+ const rgw_raw_obj& obj,
+ const rgw_bucket& bucket,
+ ceph::real_time creation_time);
+
+/// Remove the given bucket from the list.
+int remove(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ librados::Rados& rados,
+ const rgw_raw_obj& obj,
+ const rgw_bucket& bucket);
+
+/// Return a paginated list of buckets.
+int list(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ librados::Rados& rados,
+ const rgw_raw_obj& obj,
+ const std::string& tenant,
+ const std::string& marker,
+ const std::string& end_marker,
+ uint64_t max,
+ rgw::sal::BucketList& buckets);
+
+/// Update usage stats for the given bucket.
+int write_stats(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ librados::Rados& rados,
+ const rgw_raw_obj& obj,
+ const RGWBucketEnt& bucket);
+
+/// Read the total usage stats of all buckets.
+int read_stats(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ librados::Rados& rados,
+ const rgw_raw_obj& obj,
+ RGWStorageStats& stats,
+ ceph::real_time* last_synced,
+ ceph::real_time* last_updated);
+
+/// Read the total usage stats of all buckets asynchronously.
+int read_stats_async(const DoutPrefixProvider* dpp,
+ librados::Rados& rados,
+ const rgw_raw_obj& obj,
+ boost::intrusive_ptr<rgw::sal::ReadStatsCB> cb);
+
+/// Recalculate the sum of bucket usage.
+int reset_stats(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ librados::Rados& rados,
+ const rgw_raw_obj& obj);
+
+/// Update the last_synced timestamp.
+int complete_flush_stats(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ librados::Rados& rados,
+ const rgw_raw_obj& obj);
+
+} // namespace rgwrados::buckets
diff --git a/src/rgw/driver/rados/cls_fifo_legacy.h b/src/rgw/driver/rados/cls_fifo_legacy.h
index c345c728512..ed23129eb30 100644
--- a/src/rgw/driver/rados/cls_fifo_legacy.h
+++ b/src/rgw/driver/rados/cls_fifo_legacy.h
@@ -41,7 +41,7 @@
namespace rgw::cls::fifo {
namespace cb = ceph::buffer;
-namespace fifo = rados::cls::fifo;
+namespace fifo = ::rados::cls::fifo;
namespace lr = librados;
inline constexpr std::uint64_t default_max_part_size = 4 * 1024 * 1024;
diff --git a/src/rgw/driver/rados/group.cc b/src/rgw/driver/rados/group.cc
new file mode 100644
index 00000000000..7cdd9487156
--- /dev/null
+++ b/src/rgw/driver/rados/group.cc
@@ -0,0 +1,522 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab ft=cpp
+
+/*
+ * Ceph - scalable distributed file system
+ *
+ * Copyright contributors to the Ceph project
+ *
+ * This is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License version 2.1, as published by the Free Software
+ * Foundation. See file COPYING.
+ *
+ */
+
+#include "group.h"
+
+#include <boost/algorithm/string.hpp>
+#include "common/errno.h"
+#include "account.h"
+#include "groups.h"
+#include "rgw_common.h"
+#include "rgw_metadata.h"
+#include "rgw_metadata_lister.h"
+#include "rgw_obj_types.h"
+#include "rgw_string.h"
+#include "rgw_tools.h"
+#include "rgw_user.h"
+#include "rgw_zone.h"
+#include "services/svc_sys_obj.h"
+
+namespace rgwrados::group {
+
+static constexpr std::string_view info_oid_prefix = "info.";
+static constexpr std::string_view name_oid_prefix = "name.";
+static constexpr std::string_view users_oid_prefix = "users.";
+
+// metadata keys/objects
+std::string get_users_key(std::string_view group_id) {
+ return string_cat_reserve(users_oid_prefix, group_id);
+}
+rgw_raw_obj get_users_obj(const RGWZoneParams& zone,
+ std::string_view group_id) {
+ return {zone.group_pool, get_users_key(group_id)};
+}
+
+static std::string get_group_key(std::string_view group_id) {
+ return string_cat_reserve(info_oid_prefix, group_id);
+}
+static rgw_raw_obj get_group_obj(const RGWZoneParams& zone,
+ std::string_view group_id) {
+ return {zone.group_pool, get_group_key(group_id)};
+}
+
+static std::string get_name_key(std::string_view account,
+ std::string_view name) {
+ // names are case-insensitive, so store them in lower case
+ std::string lower_name{name};
+ boost::algorithm::to_lower(lower_name);
+ return string_cat_reserve(name_oid_prefix, account, "$", lower_name);
+}
+static rgw_raw_obj get_name_obj(const RGWZoneParams& zone,
+ std::string_view account,
+ std::string_view name) {
+ return {zone.group_pool, get_name_key(account, name)};
+}
+
+
+struct NameObj {
+ rgw_raw_obj obj;
+ RGWUID data;
+ RGWObjVersionTracker objv;
+};
+
+static int read_name(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ RGWSI_SysObj& sysobj,
+ NameObj& name)
+{
+ bufferlist bl;
+ int r = rgw_get_system_obj(&sysobj, name.obj.pool, name.obj.oid,
+ bl, &name.objv, nullptr, y, dpp);
+ if (r < 0) {
+ ldpp_dout(dpp, 20) << "failed to read " << name.obj.oid
+ << " with: " << cpp_strerror(r) << dendl;
+ return r;
+ }
+
+ try {
+ auto p = bl.cbegin();
+ decode(name.data, p);
+ } catch (const buffer::error& e) {
+ ldpp_dout(dpp, 0) << "ERROR: failed to decode group name: "
+ << e.what() << dendl;
+ return -EIO;
+ }
+ return 0;
+}
+
+static int write_name(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ RGWSI_SysObj& sysobj,
+ NameObj& name)
+{
+ bufferlist bl;
+ encode(name.data, bl);
+
+ constexpr bool exclusive = true;
+ return rgw_put_system_obj(dpp, &sysobj, name.obj.pool,
+ name.obj.oid, bl, exclusive,
+ &name.objv, ceph::real_time{}, y);
+}
+
+
+int read(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ RGWSI_SysObj& sysobj,
+ const RGWZoneParams& zone,
+ std::string_view id,
+ RGWGroupInfo& info,
+ std::map<std::string, ceph::buffer::list>& attrs,
+ ceph::real_time& mtime,
+ RGWObjVersionTracker& objv)
+{
+ const rgw_raw_obj obj = get_group_obj(zone, id);
+
+ bufferlist bl;
+ int r = rgw_get_system_obj(&sysobj, obj.pool, obj.oid, bl,
+ &objv, &mtime, y, dpp, &attrs);
+ if (r < 0) {
+ ldpp_dout(dpp, 20) << "group lookup with id=" << id
+ << " failed: " << cpp_strerror(r) << dendl;
+ return r;
+ }
+
+ try {
+ auto p = bl.cbegin();
+ decode(info, p);
+ } catch (const buffer::error& e) {
+ ldpp_dout(dpp, 0) << "ERROR: failed to decode group info: "
+ << e.what() << dendl;
+ return -EIO;
+ }
+ if (info.id != id) {
+ ldpp_dout(dpp, 0) << "ERROR: read group id mismatch "
+ << info.id << " != " << id << dendl;
+ return -EIO;
+ }
+ return 0;
+}
+
+int read_by_name(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ RGWSI_SysObj& sysobj,
+ const RGWZoneParams& zone,
+ std::string_view tenant,
+ std::string_view name,
+ RGWGroupInfo& info,
+ std::map<std::string, ceph::buffer::list>& attrs,
+ RGWObjVersionTracker& objv)
+{
+ auto nameobj = NameObj{.obj = get_name_obj(zone, tenant, name)};
+ int r = read_name(dpp, y, sysobj, nameobj);
+ if (r < 0) {
+ return r;
+ }
+ ceph::real_time mtime; // ignored
+ return read(dpp, y, sysobj, zone, nameobj.data.id, info, attrs, mtime, objv);
+}
+
+
+int write(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ RGWSI_SysObj& sysobj,
+ librados::Rados& rados,
+ const RGWZoneParams& zone,
+ const RGWGroupInfo& info,
+ const RGWGroupInfo* old_info,
+ const std::map<std::string, ceph::buffer::list>& attrs,
+ ceph::real_time mtime,
+ bool exclusive,
+ RGWObjVersionTracker& objv)
+{
+ const rgw_raw_obj obj = get_group_obj(zone, info.id);
+
+ const bool same_name = old_info
+ && old_info->account_id == info.account_id
+ && old_info->name == info.name;
+
+ std::optional<NameObj> remove_name;
+ if (old_info) {
+ if (old_info->id != info.id) {
+ ldpp_dout(dpp, 1) << "ERROR: can't modify group id" << dendl;
+ return -EINVAL;
+ }
+ if (!same_name && !old_info->name.empty()) {
+ // read old group name object
+ NameObj nameobj;
+ nameobj.obj = get_name_obj(zone, old_info->account_id, old_info->name);
+ int r = read_name(dpp, y, sysobj, nameobj);
+ if (r == -ENOENT) {
+ // leave remove_name empty
+ } else if (r < 0) {
+ return r;
+ } else if (nameobj.data.id == info.id) {
+ remove_name = std::move(nameobj);
+ }
+ }
+ } // old_info
+
+ if (!same_name && !info.name.empty()) {
+ // read new account name object
+ NameObj nameobj;
+ nameobj.obj = get_name_obj(zone, info.account_id, info.name);
+ int r = read_name(dpp, y, sysobj, nameobj);
+ if (r == -ENOENT) {
+ // write the new name object below
+ } else if (r == 0) {
+ ldpp_dout(dpp, 1) << "ERROR: group name obj " << nameobj.obj
+ << " already taken for group id " << nameobj.data.id << dendl;
+ return -EEXIST;
+ } else if (r < 0) {
+ return r;
+ }
+ }
+
+ // encode/write the group info
+ {
+ bufferlist bl;
+ encode(info, bl);
+
+ const rgw_raw_obj obj = get_group_obj(zone, info.id);
+ int r = rgw_put_system_obj(dpp, &sysobj, obj.pool, obj.oid, bl,
+ exclusive, &objv, mtime, y, &attrs);
+ if (r < 0) {
+ ldpp_dout(dpp, 1) << "ERROR: failed to write group obj " << obj
+ << " with: " << cpp_strerror(r) << dendl;
+ return r;
+ }
+ }
+
+ if (remove_name) {
+ // remove the old name object, ignoring errors
+ auto& nameobj = *remove_name;
+ int r = rgw_delete_system_obj(dpp, &sysobj, nameobj.obj.pool,
+ nameobj.obj.oid, &nameobj.objv, y);
+ if (r < 0) {
+ ldpp_dout(dpp, 20) << "WARNING: failed to remove old name obj "
+ << nameobj.obj.oid << ": " << cpp_strerror(r) << dendl;
+ } // not fatal
+ // unlink the old name from its account
+ const auto& users = account::get_groups_obj(zone, old_info->account_id);
+ r = groups::remove(dpp, y, rados, users, old_info->name);
+ if (r < 0) {
+ ldpp_dout(dpp, 0) << "ERROR: could not unlink from account "
+ << old_info->account_id << ": " << cpp_strerror(r) << dendl;
+ } // not fatal
+ }
+ if (!same_name && !info.name.empty()) {
+ // write the new name object
+ NameObj nameobj;
+ nameobj.obj = get_name_obj(zone, info.account_id, info.name);
+ nameobj.data.id = info.id;
+ nameobj.objv.generate_new_write_ver(dpp->get_cct());
+
+ int r = write_name(dpp, y, sysobj, nameobj);
+ if (r < 0) {
+ ldpp_dout(dpp, 20) << "WARNING: failed to write name obj "
+ << nameobj.obj << " with: " << cpp_strerror(r) << dendl;
+ } // not fatal
+ // link the new name to its account
+ const auto& users = account::get_groups_obj(zone, info.account_id);
+ r = groups::add(dpp, y, rados, users, info, false,
+ std::numeric_limits<uint32_t>::max());
+ if (r < 0) {
+ ldpp_dout(dpp, 0) << "ERROR: could not link to account "
+ << info.account_id << ": " << cpp_strerror(r) << dendl;
+ } // not fatal
+ }
+
+ return 0;
+}
+
+int remove(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ RGWSI_SysObj& sysobj,
+ librados::Rados& rados,
+ const RGWZoneParams& zone,
+ const RGWGroupInfo& info,
+ RGWObjVersionTracker& objv)
+{
+ const rgw_raw_obj obj = get_group_obj(zone, info.id);
+ int r = rgw_delete_system_obj(dpp, &sysobj, obj.pool, obj.oid, &objv, y);
+ if (r < 0) {
+ ldpp_dout(dpp, 1) << "ERROR: failed to remove account obj "
+ << obj << " with: " << cpp_strerror(r) << dendl;
+ return r;
+ }
+
+ {
+ // remove the name object
+ const rgw_raw_obj obj = get_name_obj(zone, info.account_id, info.name);
+ r = rgw_delete_system_obj(dpp, &sysobj, obj.pool, obj.oid, nullptr, y);
+ if (r < 0) {
+ ldpp_dout(dpp, 20) << "WARNING: failed to remove name obj "
+ << obj << " with: " << cpp_strerror(r) << dendl;
+ } // not fatal
+ }
+ {
+ // remove the users object
+ const rgw_raw_obj obj = get_users_obj(zone, info.id);
+ r = rgw_delete_system_obj(dpp, &sysobj, obj.pool, obj.oid, nullptr, y);
+ if (r < 0) {
+ ldpp_dout(dpp, 20) << "WARNING: failed to remove users obj "
+ << obj << " with: " << cpp_strerror(r) << dendl;
+ } // not fatal
+ }
+ {
+ // unlink the name from its account
+ const auto& users = account::get_groups_obj(zone, info.account_id);
+ r = groups::remove(dpp, y, rados, users, info.name);
+ if (r < 0) {
+ ldpp_dout(dpp, 0) << "ERROR: could not unlink from account "
+ << info.account_id << ": " << cpp_strerror(r) << dendl;
+ } // not fatal
+ }
+
+ return 0;
+}
+
+
+// metadata abstraction
+
+struct CompleteInfo {
+ RGWGroupInfo info;
+ std::map<std::string, bufferlist> attrs;
+ bool has_attrs = false;
+
+ void dump(Formatter* f) const {
+ info.dump(f);
+ encode_json("attrs", attrs, f);
+ }
+
+ void decode_json(JSONObj* obj) {
+ decode_json_obj(info, obj);
+ has_attrs = JSONDecoder::decode_json("attrs", attrs, obj);
+ }
+};
+
+class MetadataObject : public RGWMetadataObject {
+ CompleteInfo aci;
+ public:
+ MetadataObject(CompleteInfo& aci, const obj_version& v, ceph::real_time m)
+ : RGWMetadataObject(v, m), aci(std::move(aci)) {}
+
+ void dump(Formatter *f) const override {
+ aci.dump(f);
+ }
+
+ CompleteInfo& get() { return aci; }
+};
+
+class MetadataLister : public RGWMetadataLister {
+ public:
+ using RGWMetadataLister::RGWMetadataLister;
+
+ void filter_transform(std::vector<std::string>& oids,
+ std::list<std::string>& keys) override
+ {
+ // remove the oid prefix from keys
+ constexpr auto trim = [] (const std::string& oid) {
+ return oid.substr(info_oid_prefix.size());
+ };
+ std::transform(oids.begin(), oids.end(),
+ std::back_inserter(keys),
+ trim);
+ }
+};
+
+class MetadataHandler : public RGWMetadataHandler {
+ RGWSI_SysObj& sysobj;
+ librados::Rados& rados;
+ const RGWZoneParams& zone;
+ public:
+ MetadataHandler(RGWSI_SysObj& sysobj, librados::Rados& rados,
+ const RGWZoneParams& zone)
+ : sysobj(sysobj), rados(rados), zone(zone) {}
+
+ std::string get_type() override { return "group"; }
+
+ RGWMetadataObject* get_meta_obj(JSONObj* obj,
+ const obj_version& objv,
+ const ceph::real_time& mtime) override
+ {
+ CompleteInfo aci;
+ try {
+ decode_json_obj(aci, obj);
+ } catch (const JSONDecoder::err&) {
+ return nullptr;
+ }
+ return new MetadataObject(aci, objv, mtime);
+ }
+
+ int get(std::string& entry, RGWMetadataObject** obj,
+ optional_yield y, const DoutPrefixProvider* dpp) override
+ {
+ const std::string& group_id = entry;
+ CompleteInfo aci;
+ RGWObjVersionTracker objv;
+ ceph::real_time mtime;
+
+ int r = read(dpp, y, sysobj, zone, group_id,
+ aci.info, aci.attrs, mtime, objv);
+ if (r < 0) {
+ return r;
+ }
+
+ *obj = new MetadataObject(aci, objv.read_version, mtime);
+ return 0;
+ }
+
+ int put(std::string& entry, RGWMetadataObject* obj,
+ RGWObjVersionTracker& objv, optional_yield y,
+ const DoutPrefixProvider* dpp,
+ RGWMDLogSyncType type, bool from_remote_zone) override
+ {
+ const std::string& group_id = entry;
+ auto group_obj = static_cast<MetadataObject*>(obj);
+ const auto& new_info = group_obj->get().info;
+
+ // account id must match metadata key
+ if (new_info.id != group_id) {
+ return -EINVAL;
+ }
+
+ // read existing metadata
+ RGWGroupInfo old_info;
+ std::map<std::string, ceph::buffer::list> old_attrs;
+ ceph::real_time old_mtime;
+ int r = read(dpp, y, sysobj, zone, group_id,
+ old_info, old_attrs, old_mtime, objv);
+ if (r < 0 && r != -ENOENT) {
+ return r;
+ }
+ const RGWGroupInfo* pold_info = (r == -ENOENT ? nullptr : &old_info);
+
+ // write/overwrite metadata
+ constexpr bool exclusive = false;
+ return write(dpp, y, sysobj, rados, zone, new_info, pold_info,
+ group_obj->get().attrs, obj->get_mtime(),
+ exclusive, objv);
+ }
+
+ int remove(std::string& entry, RGWObjVersionTracker& objv,
+ optional_yield y, const DoutPrefixProvider* dpp) override
+ {
+ const std::string& group_id = entry;
+
+ // read existing metadata
+ RGWGroupInfo info;
+ std::map<std::string, ceph::buffer::list> attrs;
+ ceph::real_time mtime;
+ int r = read(dpp, y, sysobj, zone, group_id,
+ info, attrs, mtime, objv);
+ if (r < 0) {
+ return r;
+ }
+
+ return group::remove(dpp, y, sysobj, rados, zone, info, objv);
+ }
+
+ int mutate(const std::string& entry,
+ const ceph::real_time& mtime,
+ RGWObjVersionTracker* objv,
+ optional_yield y,
+ const DoutPrefixProvider* dpp,
+ RGWMDLogStatus op_type,
+ std::function<int()> f) override
+ {
+ return -ENOTSUP; // unused
+ }
+
+ int list_keys_init(const DoutPrefixProvider* dpp,
+ const std::string& marker, void** phandle) override
+ {
+ auto lister = std::make_unique<MetadataLister>(
+ sysobj.get_pool(zone.group_pool));
+ int r = lister->init(dpp, marker, std::string{info_oid_prefix});
+ if (r < 0) {
+ return r;
+ }
+ *phandle = lister.release();
+ return 0;
+ }
+
+ int list_keys_next(const DoutPrefixProvider* dpp, void* handle, int max,
+ std::list<std::string>& keys, bool* truncated) override
+ {
+ auto lister = static_cast<MetadataLister*>(handle);
+ return lister->get_next(dpp, max, keys, truncated);
+ }
+
+ void list_keys_complete(void* handle) override
+ {
+ delete static_cast<MetadataLister*>(handle);
+ }
+
+ std::string get_marker(void* handle) override
+ {
+ auto lister = static_cast<MetadataLister*>(handle);
+ return lister->get_marker();
+ }
+};
+
+auto create_metadata_handler(RGWSI_SysObj& sysobj, librados::Rados& rados,
+ const RGWZoneParams& zone)
+ -> std::unique_ptr<RGWMetadataHandler>
+{
+ return std::make_unique<MetadataHandler>(sysobj, rados, zone);
+}
+
+} // namespace rgwrados::group
diff --git a/src/rgw/driver/rados/group.h b/src/rgw/driver/rados/group.h
new file mode 100644
index 00000000000..b96d1cc0259
--- /dev/null
+++ b/src/rgw/driver/rados/group.h
@@ -0,0 +1,90 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab ft=cpp
+
+/*
+ * Ceph - scalable distributed file system
+ *
+ * Copyright contributors to the Ceph project
+ *
+ * This is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License version 2.1, as published by the Free Software
+ * Foundation. See file COPYING.
+ *
+ */
+
+#pragma once
+
+#include <map>
+#include <memory>
+#include <string>
+#include "include/buffer_fwd.h"
+#include "include/rados/librados_fwd.hpp"
+#include "common/async/yield_context.h"
+#include "common/ceph_time.h"
+
+class DoutPrefixProvider;
+struct rgw_raw_obj;
+class RGWGroupInfo;
+class RGWMetadataHandler;
+class RGWObjVersionTracker;
+class RGWSI_SysObj;
+class RGWZoneParams;
+
+namespace rgwrados::group {
+
+/// Group metadata handler factory
+auto create_metadata_handler(RGWSI_SysObj& sysobj, librados::Rados& rados,
+ const RGWZoneParams& zone)
+ -> std::unique_ptr<RGWMetadataHandler>;
+
+/// Return the rados object that tracks the given group's users
+rgw_raw_obj get_users_obj(const RGWZoneParams& zone,
+ std::string_view group_id);
+
+
+/// Read group info by id
+int read(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ RGWSI_SysObj& sysobj,
+ const RGWZoneParams& zone,
+ std::string_view id,
+ RGWGroupInfo& info,
+ std::map<std::string, ceph::buffer::list>& attrs,
+ ceph::real_time& mtime,
+ RGWObjVersionTracker& objv);
+
+/// Read group info by name
+int read_by_name(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ RGWSI_SysObj& sysobj,
+ const RGWZoneParams& zone,
+ std::string_view account_id,
+ std::string_view name,
+ RGWGroupInfo& info,
+ std::map<std::string, ceph::buffer::list>& attrs,
+ RGWObjVersionTracker& objv);
+
+/// Write group info and update name index
+int write(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ RGWSI_SysObj& sysobj,
+ librados::Rados& rados,
+ const RGWZoneParams& zone,
+ const RGWGroupInfo& info,
+ const RGWGroupInfo* old_info,
+ const std::map<std::string, ceph::buffer::list>& attrs,
+ ceph::real_time mtime,
+ bool exclusive,
+ RGWObjVersionTracker& objv);
+
+/// Remove group info and name index
+int remove(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ RGWSI_SysObj& sysobj,
+ librados::Rados& rados,
+ const RGWZoneParams& zone,
+ const RGWGroupInfo& info,
+ RGWObjVersionTracker& objv);
+
+} // namespace rgwrados::group
diff --git a/src/rgw/driver/rados/groups.cc b/src/rgw/driver/rados/groups.cc
new file mode 100644
index 00000000000..21f66e7b7d5
--- /dev/null
+++ b/src/rgw/driver/rados/groups.cc
@@ -0,0 +1,135 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab ft=cpp
+
+/*
+ * Ceph - scalable distributed file system
+ *
+ * Copyright contributors to the Ceph project
+ *
+ * This is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License version 2.1, as published by the Free Software
+ * Foundation. See file COPYING.
+ *
+ */
+
+#include "groups.h"
+
+#include "include/rados/librados.hpp"
+#include "common/ceph_json.h"
+#include "common/dout.h"
+#include "cls/user/cls_user_client.h"
+#include "rgw_sal.h"
+
+namespace rgwrados::groups {
+
+int add(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ librados::Rados& rados,
+ const rgw_raw_obj& obj,
+ const RGWGroupInfo& group,
+ bool exclusive, uint32_t limit)
+{
+ resource_metadata meta;
+ meta.group_id = group.id;
+
+ cls_user_account_resource resource;
+ resource.name = group.name;
+ resource.path = group.path;
+ encode(meta, resource.metadata);
+
+ rgw_rados_ref ref;
+ int r = rgw_get_rados_ref(dpp, &rados, obj, &ref);
+ if (r < 0) {
+ return r;
+ }
+
+ librados::ObjectWriteOperation op;
+ ::cls_user_account_resource_add(op, resource, exclusive, limit);
+ return ref.operate(dpp, &op, y);
+}
+
+int remove(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ librados::Rados& rados,
+ const rgw_raw_obj& obj,
+ std::string_view name)
+{
+ rgw_rados_ref ref;
+ int r = rgw_get_rados_ref(dpp, &rados, obj, &ref);
+ if (r < 0) {
+ return r;
+ }
+
+ librados::ObjectWriteOperation op;
+ ::cls_user_account_resource_rm(op, name);
+ return ref.operate(dpp, &op, y);
+}
+
+int list(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ librados::Rados& rados,
+ const rgw_raw_obj& obj,
+ std::string_view marker,
+ std::string_view path_prefix,
+ uint32_t max_items,
+ std::vector<std::string>& ids,
+ std::string& next_marker)
+{
+ rgw_rados_ref ref;
+ int r = rgw_get_rados_ref(dpp, &rados, obj, &ref);
+ if (r < 0) {
+ return r;
+ }
+
+ librados::ObjectReadOperation op;
+ std::vector<cls_user_account_resource> entries;
+ bool truncated = false;
+ int ret = 0;
+ ::cls_user_account_resource_list(op, marker, path_prefix, max_items,
+ entries, &truncated, &next_marker, &ret);
+
+ r = ref.operate(dpp, &op, nullptr, y);
+ if (r == -ENOENT) {
+ next_marker.clear();
+ return 0;
+ }
+ if (r < 0) {
+ return r;
+ }
+ if (ret < 0) {
+ return ret;
+ }
+
+ for (auto& resource : entries) {
+ resource_metadata meta;
+ try {
+ auto p = resource.metadata.cbegin();
+ decode(meta, p);
+ } catch (const buffer::error&) {
+ return -EIO;
+ }
+ ids.push_back(std::move(meta.group_id));
+ }
+
+ if (!truncated) {
+ next_marker.clear();
+ }
+ return 0;
+}
+
+
+void resource_metadata::dump(ceph::Formatter* f) const
+{
+ encode_json("group_id", group_id, f);
+}
+
+void resource_metadata::generate_test_instances(std::list<resource_metadata*>& o)
+{
+ o.push_back(new resource_metadata);
+ auto m = new resource_metadata;
+ m->group_id = "id";
+ o.push_back(m);
+}
+
+} // namespace rgwrados::groups
diff --git a/src/rgw/driver/rados/groups.h b/src/rgw/driver/rados/groups.h
new file mode 100644
index 00000000000..50ebcad2d62
--- /dev/null
+++ b/src/rgw/driver/rados/groups.h
@@ -0,0 +1,79 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab ft=cpp
+
+/*
+ * Ceph - scalable distributed file system
+ *
+ * Copyright contributors to the Ceph project
+ *
+ * This is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License version 2.1, as published by the Free Software
+ * Foundation. See file COPYING.
+ *
+ */
+
+#pragma once
+
+#include <list>
+#include <string>
+#include "include/rados/librados_fwd.hpp"
+#include "include/encoding.h"
+#include "rgw_sal_fwd.h"
+
+namespace ceph { class Formatter; }
+class DoutPrefixProvider;
+class optional_yield;
+struct rgw_raw_obj;
+struct RGWGroupInfo;
+
+
+namespace rgwrados::groups {
+
+/// Add the given group to the list.
+int add(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ librados::Rados& rados,
+ const rgw_raw_obj& obj,
+ const RGWGroupInfo& info,
+ bool exclusive, uint32_t limit);
+
+/// Remove the given group from the list.
+int remove(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ librados::Rados& rados,
+ const rgw_raw_obj& obj,
+ std::string_view name);
+
+/// Return a paginated listing of group ids.
+int list(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ librados::Rados& rados,
+ const rgw_raw_obj& obj,
+ std::string_view marker,
+ std::string_view path_prefix,
+ uint32_t max_items,
+ std::vector<std::string>& ids,
+ std::string& next_marker);
+
+// group-specific metadata for cls_user_account_resource
+struct resource_metadata {
+ std::string group_id;
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(group_id, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(group_id, bl);
+ DECODE_FINISH(bl);
+ }
+
+ void dump(ceph::Formatter* f) const;
+ static void generate_test_instances(std::list<resource_metadata*>& o);
+};
+WRITE_CLASS_ENCODER(resource_metadata);
+
+} // namespace rgwrados::groups
diff --git a/src/rgw/driver/rados/rgw_bucket.cc b/src/rgw/driver/rados/rgw_bucket.cc
index ce869f399d6..c51e61a2755 100644
--- a/src/rgw/driver/rados/rgw_bucket.cc
+++ b/src/rgw/driver/rados/rgw_bucket.cc
@@ -1,6 +1,7 @@
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
+#include "include/function2.hpp"
#include "rgw_acl_s3.h"
#include "rgw_tag_s3.h"
@@ -12,6 +13,8 @@
#include "services/svc_bucket.h"
#include "services/svc_user.h"
+#include "account.h"
+#include "buckets.h"
#include "rgw_reshard.h"
#include "rgw_pubsub.h"
@@ -90,17 +93,18 @@ static void dump_mulipart_index_results(list<rgw_obj_index_key>& objs_to_unlink,
}
}
-void check_bad_user_bucket_mapping(rgw::sal::Driver* driver, rgw::sal::User& user,
- bool fix,
- optional_yield y,
- const DoutPrefixProvider *dpp)
+void check_bad_owner_bucket_mapping(rgw::sal::Driver* driver,
+ const rgw_owner& owner,
+ const std::string& tenant,
+ bool fix, optional_yield y,
+ const DoutPrefixProvider *dpp)
{
size_t max_entries = dpp->get_cct()->_conf->rgw_list_buckets_max_chunk;
rgw::sal::BucketList listing;
do {
- int ret = user.list_buckets(dpp, listing.next_marker, string(),
- max_entries, false, listing, y);
+ int ret = driver->list_buckets(dpp, owner, tenant, listing.next_marker,
+ string(), max_entries, false, listing, y);
if (ret < 0) {
ldpp_dout(dpp, 0) << "failed to read user buckets: "
<< cpp_strerror(-ret) << dendl;
@@ -109,7 +113,7 @@ void check_bad_user_bucket_mapping(rgw::sal::Driver* driver, rgw::sal::User& use
for (const auto& ent : listing.buckets) {
std::unique_ptr<rgw::sal::Bucket> bucket;
- int r = driver->load_bucket(dpp, rgw_bucket(user.get_tenant(), ent.bucket.name),
+ int r = driver->load_bucket(dpp, rgw_bucket(tenant, ent.bucket.name),
&bucket, y);
if (r < 0) {
ldpp_dout(dpp, 0) << "could not get bucket info for bucket=" << bucket << dendl;
@@ -121,7 +125,7 @@ void check_bad_user_bucket_mapping(rgw::sal::Driver* driver, rgw::sal::User& use
<< " got " << bucket << std::endl;
if (fix) {
cout << "fixing" << std::endl;
- r = bucket->chown(dpp, user.get_id(), y);
+ r = bucket->chown(dpp, owner, y);
if (r < 0) {
cerr << "failed to fix bucket: " << cpp_strerror(-r) << std::endl;
}
@@ -983,15 +987,21 @@ int RGWBucketAdminOp::dump_s3_policy(rgw::sal::Driver* driver, RGWBucketAdminOpS
return 0;
}
-int RGWBucketAdminOp::unlink(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp, optional_yield y)
+int RGWBucketAdminOp::unlink(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp, optional_yield y, string *err)
{
- RGWBucket bucket;
+ auto radosdriver = dynamic_cast<rgw::sal::RadosStore*>(driver);
+ if (!radosdriver) {
+ set_err_msg(err, "rados store only");
+ return -ENOTSUP;
+ }
+ RGWBucket bucket;
int ret = bucket.init(driver, op_state, y, dpp);
if (ret < 0)
return ret;
- return static_cast<rgw::sal::RadosStore*>(driver)->ctl()->bucket->unlink_bucket(op_state.get_user_id(), op_state.get_bucket()->get_info().bucket, y, dpp, true);
+ auto* rados = radosdriver->getRados()->get_rados_handle();
+ return radosdriver->ctl()->bucket->unlink_bucket(*rados, op_state.get_user_id(), op_state.get_bucket()->get_info().bucket, y, dpp, true);
}
int RGWBucketAdminOp::link(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp, optional_yield y, string *err)
@@ -1000,6 +1010,11 @@ int RGWBucketAdminOp::link(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_s
set_err_msg(err, "empty user id");
return -EINVAL;
}
+ auto radosdriver = dynamic_cast<rgw::sal::RadosStore*>(driver);
+ if (!radosdriver) {
+ set_err_msg(err, "rados store only");
+ return -ENOTSUP;
+ }
RGWBucket bucket;
int ret = bucket.init(driver, op_state, y, dpp, err);
@@ -1057,9 +1072,10 @@ int RGWBucketAdminOp::link(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_s
return -EIO;
}
- int r = static_cast<rgw::sal::RadosStore*>(driver)->ctl()->bucket->unlink_bucket(owner.id, old_bucket->get_info().bucket, y, dpp, false);
+ auto* rados = radosdriver->getRados()->get_rados_handle();
+ int r = radosdriver->ctl()->bucket->unlink_bucket(*rados, owner.id, old_bucket->get_info().bucket, y, dpp, false);
if (r < 0) {
- set_err_msg(err, "could not unlink policy from user " + owner.id.to_str());
+ set_err_msg(err, "could not unlink bucket from owner " + to_string(owner.id));
return r;
}
@@ -1098,7 +1114,7 @@ int RGWBucketAdminOp::link(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_s
rgw::sal::Attrs ep_attrs;
rgw_ep_info ep_data{ep, ep_attrs};
- r = static_cast<rgw::sal::RadosStore*>(driver)->ctl()->bucket->link_bucket(op_state.get_user_id(), loc_bucket->get_info().bucket, loc_bucket->get_info().creation_time, y, dpp, true, &ep_data);
+ r = radosdriver->ctl()->bucket->link_bucket(*rados, op_state.get_user_id(), loc_bucket->get_info().bucket, loc_bucket->get_info().creation_time, y, dpp, true, &ep_data);
if (r < 0) {
set_err_msg(err, "failed to relink bucket");
return r;
@@ -1106,7 +1122,7 @@ int RGWBucketAdminOp::link(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_s
if (*loc_bucket != *old_bucket) {
// like RGWRados::delete_bucket -- excepting no bucket_index work.
- r = static_cast<rgw::sal::RadosStore*>(driver)->ctl()->bucket->remove_bucket_entrypoint_info(
+ r = radosdriver->ctl()->bucket->remove_bucket_entrypoint_info(
old_bucket->get_key(), y, dpp,
RGWBucketCtl::Bucket::RemoveParams()
.set_objv_tracker(&ep_data.ep_objv));
@@ -1114,7 +1130,7 @@ int RGWBucketAdminOp::link(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_s
set_err_msg(err, "failed to unlink old bucket " + old_bucket->get_tenant() + "/" + old_bucket->get_name());
return r;
}
- r = static_cast<rgw::sal::RadosStore*>(driver)->ctl()->bucket->remove_bucket_instance_info(
+ r = radosdriver->ctl()->bucket->remove_bucket_instance_info(
old_bucket->get_key(), old_bucket->get_info(),
y, dpp,
RGWBucketCtl::BucketInstance::RemoveParams()
@@ -1368,7 +1384,7 @@ int RGWBucketAdminOp::limit_check(rgw::sal::Driver* driver,
formatter->open_array_section("users");
for (const auto& user_id : user_ids) {
- std::unique_ptr<rgw::sal::User> user = driver->get_user(rgw_user(user_id));
+ const auto user = rgw_user{user_id};
formatter->open_object_section("user");
formatter->dump_string("user_id", user_id);
@@ -1376,8 +1392,8 @@ int RGWBucketAdminOp::limit_check(rgw::sal::Driver* driver,
rgw::sal::BucketList listing;
do {
- ret = user->list_buckets(dpp, listing.next_marker, string(),
- max_entries, false, listing, y);
+ ret = driver->list_buckets(dpp, user, user.tenant, listing.next_marker,
+ string(), max_entries, false, listing, y);
if (ret < 0)
return ret;
@@ -1451,6 +1467,47 @@ int RGWBucketAdminOp::limit_check(rgw::sal::Driver* driver,
return ret;
} /* RGWBucketAdminOp::limit_check */
+static int list_owner_bucket_info(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ rgw::sal::Driver* driver,
+ const rgw_owner& owner,
+ const std::string& tenant,
+ const std::string& marker,
+ bool show_stats,
+ RGWFormatterFlusher& flusher)
+{
+ Formatter* formatter = flusher.get_formatter();
+ formatter->open_array_section("buckets");
+
+ const std::string empty_end_marker;
+ const size_t max_entries = dpp->get_cct()->_conf->rgw_list_buckets_max_chunk;
+ constexpr bool no_need_stats = false; // set need_stats to false
+
+ rgw::sal::BucketList listing;
+ listing.next_marker = marker;
+ do {
+ int ret = driver->list_buckets(dpp, owner, tenant, listing.next_marker,
+ empty_end_marker, max_entries, no_need_stats,
+ listing, y);
+ if (ret < 0) {
+ return ret;
+ }
+
+ for (const auto& ent : listing.buckets) {
+ if (show_stats) {
+ bucket_stats(driver, tenant, ent.bucket.name, formatter, dpp, y);
+ } else {
+ formatter->dump_string("bucket", ent.bucket.name);
+ }
+ } // for loop
+
+ flusher.flush();
+ } while (!listing.next_marker.empty());
+
+ formatter->close_section();
+ return 0;
+}
+
int RGWBucketAdminOp::info(rgw::sal::Driver* driver,
RGWBucketAdminOpState& op_state,
RGWFormatterFlusher& flusher,
@@ -1479,34 +1536,43 @@ int RGWBucketAdminOp::info(rgw::sal::Driver* driver,
return ret;
}
} else if (op_state.is_user_op()) {
- formatter->open_array_section("buckets");
-
- std::unique_ptr<rgw::sal::User> user = driver->get_user(op_state.get_user_id());
- const std::string empty_end_marker;
- const size_t max_entries = dpp->get_cct()->_conf->rgw_list_buckets_max_chunk;
- constexpr bool no_need_stats = false; // set need_stats to false
-
- rgw::sal::BucketList listing;
- listing.next_marker = op_state.marker;
- do {
- ret = user->list_buckets(dpp, listing.next_marker, empty_end_marker,
- max_entries, no_need_stats, listing, y);
- if (ret < 0) {
- return ret;
- }
-
- for (const auto& ent : listing.buckets) {
- if (show_stats) {
- bucket_stats(driver, user_id.tenant, ent.bucket.name, formatter, dpp, y);
- } else {
- formatter->dump_string("bucket", ent.bucket.name);
- }
- } // for loop
-
- flusher.flush();
- } while (!listing.next_marker.empty());
+ const rgw_user& uid = op_state.get_user_id();
+ auto user = driver->get_user(uid);
+ ret = user->load_user(dpp, y);
+ if (ret < 0) {
+ return ret;
+ }
+ const RGWUserInfo& info = user->get_info();
+ if (!info.account_id.empty()) {
+ ldpp_dout(dpp, 1) << "Listing buckets in user account "
+ << info.account_id << dendl;
+ ret = list_owner_bucket_info(dpp, y, driver, info.account_id, uid.tenant,
+ op_state.marker, show_stats, flusher);
+ } else {
+ ret = list_owner_bucket_info(dpp, y, driver, uid, uid.tenant,
+ op_state.marker, show_stats, flusher);
+ }
+ if (ret < 0) {
+ return ret;
+ }
+ } else if (op_state.is_account_op()) {
+ // look up the account's tenant
+ const rgw_account_id& account_id = op_state.get_account_id();
+ RGWAccountInfo info;
+ rgw::sal::Attrs attrs; // ignored
+ RGWObjVersionTracker objv; // ignored
+ int ret = driver->load_account_by_id(dpp, y, account_id, info, attrs, objv);
+ if (ret < 0) {
+ ldpp_dout(dpp, 1) << "failed to load account " << account_id
+ << ": " << cpp_strerror(ret) << dendl;
+ return ret;
+ }
- formatter->close_section();
+ ret = list_owner_bucket_info(dpp, y, driver, account_id, info.tenant,
+ op_state.marker, show_stats, flusher);
+ if (ret < 0) {
+ return ret;
+ }
} else {
void *handle = nullptr;
bool truncated = true;
@@ -1934,6 +2000,7 @@ void RGWBucketCompleteInfo::decode_json(JSONObj *obj) {
class RGWBucketMetadataHandler : public RGWBucketMetadataHandlerBase {
public:
+ librados::Rados& rados;
struct Svc {
RGWSI_Bucket *bucket{nullptr};
} svc;
@@ -1942,7 +2009,8 @@ public:
RGWBucketCtl *bucket{nullptr};
} ctl;
- RGWBucketMetadataHandler() {}
+ explicit RGWBucketMetadataHandler(librados::Rados& rados)
+ : rados(rados) {}
void init(RGWSI_Bucket *bucket_svc,
RGWBucketCtl *bucket_ctl) override {
@@ -2010,7 +2078,7 @@ public:
* it immediately and don't want to invalidate our cached objv_version or the bucket obj removal
* will incorrectly fail.
*/
- ret = ctl.bucket->unlink_bucket(be.owner, be.bucket, y, dpp, false);
+ ret = ctl.bucket->unlink_bucket(rados, be.owner, be.bucket, y, dpp, false);
if (ret < 0) {
ldpp_dout(dpp, -1) << "could not unlink bucket=" << entry << " owner=" << be.owner << dendl;
}
@@ -2039,17 +2107,19 @@ public:
class RGWMetadataHandlerPut_Bucket : public RGWMetadataHandlerPut_SObj
{
RGWBucketMetadataHandler *bhandler;
+ librados::Rados& rados;
RGWBucketEntryMetadataObject *obj;
public:
- RGWMetadataHandlerPut_Bucket(RGWBucketMetadataHandler *_handler,
+ RGWMetadataHandlerPut_Bucket(RGWBucketMetadataHandler *_handler, librados::Rados& rados,
RGWSI_MetaBackend_Handler::Op *op, string& entry,
RGWMetadataObject *_obj, RGWObjVersionTracker& objv_tracker,
optional_yield y,
- RGWMDLogSyncType type, bool from_remote_zone) : RGWMetadataHandlerPut_SObj(_handler, op, entry, _obj, objv_tracker, y, type, from_remote_zone),
- bhandler(_handler) {
- obj = static_cast<RGWBucketEntryMetadataObject *>(_obj);
- }
- ~RGWMetadataHandlerPut_Bucket() {}
+ RGWMDLogSyncType type, bool from_remote_zone)
+ : RGWMetadataHandlerPut_SObj(_handler, op, entry, _obj, objv_tracker, y, type, from_remote_zone),
+ bhandler(_handler),
+ rados(rados),
+ obj(static_cast<RGWBucketEntryMetadataObject *>(_obj))
+ {}
void encode_obj(bufferlist *bl) override {
obj->get_ep().encode(*bl);
@@ -2066,7 +2136,8 @@ int RGWBucketMetadataHandler::do_put(RGWSI_MetaBackend_Handler::Op *op, string&
const DoutPrefixProvider *dpp,
RGWMDLogSyncType type, bool from_remote_zone)
{
- RGWMetadataHandlerPut_Bucket put_op(this, op, entry, obj, objv_tracker, y, type, from_remote_zone);
+ RGWMetadataHandlerPut_Bucket put_op(this, rados, op, entry, obj, objv_tracker,
+ y, type, from_remote_zone);
return do_put_operate(&put_op, dpp);
}
@@ -2096,18 +2167,32 @@ int RGWMetadataHandlerPut_Bucket::put_checked(const DoutPrefixProvider *dpp)
int RGWMetadataHandlerPut_Bucket::put_post(const DoutPrefixProvider *dpp)
{
- auto& be = obj->get_ep();
+ auto* orig_obj = static_cast<RGWBucketEntryMetadataObject *>(old_obj);
+ auto* old_be = orig_obj ? &orig_obj->get_ep() : nullptr;
+ auto& new_be = obj->get_ep();
- int ret;
+ RGWBucketCtl& ctl = *bhandler->ctl.bucket;
+ constexpr bool update_entrypoint = false;
- /* link bucket */
- if (be.linked) {
- ret = bhandler->ctl.bucket->link_bucket(be.owner, be.bucket, be.creation_time, y, dpp, false);
- } else {
- ret = bhandler->ctl.bucket->unlink_bucket(be.owner, be.bucket, y, dpp, false);
+ if (old_be && (old_be->owner != new_be.owner || // owner changed
+ (old_be->linked && !new_be.linked))) { // linked -> false
+ int ret = ctl.unlink_bucket(rados, old_be->owner, old_be->bucket,
+ y, dpp, update_entrypoint);
+ if (ret < 0) {
+ return ret;
+ }
}
- return ret;
+ if (new_be.linked && (!old_be || !old_be->linked || // linked -> true
+ old_be->owner != new_be.owner)) { // owner changed
+ int ret = ctl.link_bucket(rados, new_be.owner, new_be.bucket,
+ new_be.creation_time, y, dpp, update_entrypoint);
+ if (ret < 0) {
+ return ret;
+ }
+ }
+
+ return 0;
}
int update_bucket_topic_mappings(const DoutPrefixProvider* dpp,
@@ -2261,7 +2346,8 @@ WRITE_CLASS_ENCODER(archive_meta_info)
class RGWArchiveBucketMetadataHandler : public RGWBucketMetadataHandler {
public:
- RGWArchiveBucketMetadataHandler() {}
+ explicit RGWArchiveBucketMetadataHandler(librados::Rados& rados)
+ : RGWBucketMetadataHandler(rados) {}
int do_remove(RGWSI_MetaBackend_Handler::Op *op, string& entry, RGWObjVersionTracker& objv_tracker,
optional_yield y, const DoutPrefixProvider *dpp) override {
@@ -2356,7 +2442,7 @@ public:
/* link new bucket */
- ret = ctl.bucket->link_bucket(new_be.owner, new_be.bucket, new_be.creation_time, y, dpp, false);
+ ret = ctl.bucket->link_bucket(rados, new_be.owner, new_be.bucket, new_be.creation_time, y, dpp, false);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to link new bucket for bucket=" << new_be.bucket << " ret=" << ret << dendl;
return ret;
@@ -2364,7 +2450,7 @@ public:
/* clean up old stuff */
- ret = ctl.bucket->unlink_bucket(be.owner, entry_bucket, y, dpp, false);
+ ret = ctl.bucket->unlink_bucket(rados, be.owner, entry_bucket, y, dpp, false);
if (ret < 0) {
ldpp_dout(dpp, -1) << "could not unlink bucket=" << entry << " owner=" << be.owner << dendl;
}
@@ -3096,7 +3182,8 @@ int RGWBucketCtl::set_bucket_instance_attrs(RGWBucketInfo& bucket_info,
}
-int RGWBucketCtl::link_bucket(const rgw_user& user_id,
+int RGWBucketCtl::link_bucket(librados::Rados& rados,
+ const rgw_owner& owner,
const rgw_bucket& bucket,
ceph::real_time creation_time,
optional_yield y,
@@ -3105,13 +3192,28 @@ int RGWBucketCtl::link_bucket(const rgw_user& user_id,
rgw_ep_info *pinfo)
{
return bm_handler->call([&](RGWSI_Bucket_EP_Ctx& ctx) {
- return do_link_bucket(ctx, user_id, bucket, creation_time,
+ return do_link_bucket(ctx, rados, owner, bucket, creation_time,
update_entrypoint, pinfo, y, dpp);
});
}
+static rgw_raw_obj get_owner_buckets_obj(RGWSI_User* svc_user,
+ RGWSI_Zone* svc_zone,
+ const rgw_owner& owner)
+{
+ return std::visit(fu2::overload(
+ [&] (const rgw_user& uid) {
+ return svc_user->get_buckets_obj(uid);
+ },
+ [&] (const rgw_account_id& account_id) {
+ const RGWZoneParams& zone = svc_zone->get_zone_params();
+ return rgwrados::account::get_buckets_obj(zone, account_id);
+ }), owner);
+}
+
int RGWBucketCtl::do_link_bucket(RGWSI_Bucket_EP_Ctx& ctx,
- const rgw_user& user_id,
+ librados::Rados& rados,
+ const rgw_owner& owner,
const rgw_bucket& bucket,
ceph::real_time creation_time,
bool update_entrypoint,
@@ -3146,10 +3248,12 @@ int RGWBucketCtl::do_link_bucket(RGWSI_Bucket_EP_Ctx& ctx,
}
}
- ret = svc.user->add_bucket(dpp, user_id, bucket, creation_time, y);
+ const auto& buckets_obj = get_owner_buckets_obj(svc.user, svc.zone, owner);
+ ret = rgwrados::buckets::add(dpp, y, rados, buckets_obj,
+ bucket, creation_time);
if (ret < 0) {
- ldpp_dout(dpp, 0) << "ERROR: error adding bucket to user directory:"
- << " user=" << user_id
+ ldpp_dout(dpp, 0) << "ERROR: error adding bucket to owner directory:"
+ << " owner=" << owner
<< " bucket=" << bucket
<< " err=" << cpp_strerror(-ret)
<< dendl;
@@ -3160,7 +3264,7 @@ int RGWBucketCtl::do_link_bucket(RGWSI_Bucket_EP_Ctx& ctx,
return 0;
ep.linked = true;
- ep.owner = user_id;
+ ep.owner = owner;
ep.bucket = bucket;
ret = svc.bucket->store_bucket_entrypoint_info(
ctx, meta_key, ep, false, real_time(), pattrs, &rot, y, dpp);
@@ -3170,7 +3274,7 @@ int RGWBucketCtl::do_link_bucket(RGWSI_Bucket_EP_Ctx& ctx,
return 0;
done_err:
- int r = do_unlink_bucket(ctx, user_id, bucket, true, y, dpp);
+ int r = do_unlink_bucket(ctx, rados, owner, bucket, true, y, dpp);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed unlinking bucket on error cleanup: "
<< cpp_strerror(-r) << dendl;
@@ -3178,21 +3282,25 @@ done_err:
return ret;
}
-int RGWBucketCtl::unlink_bucket(const rgw_user& user_id, const rgw_bucket& bucket, optional_yield y, const DoutPrefixProvider *dpp, bool update_entrypoint)
+int RGWBucketCtl::unlink_bucket(librados::Rados& rados, const rgw_owner& owner,
+ const rgw_bucket& bucket, optional_yield y,
+ const DoutPrefixProvider *dpp, bool update_entrypoint)
{
return bm_handler->call([&](RGWSI_Bucket_EP_Ctx& ctx) {
- return do_unlink_bucket(ctx, user_id, bucket, update_entrypoint, y, dpp);
+ return do_unlink_bucket(ctx, rados, owner, bucket, update_entrypoint, y, dpp);
});
}
int RGWBucketCtl::do_unlink_bucket(RGWSI_Bucket_EP_Ctx& ctx,
- const rgw_user& user_id,
+ librados::Rados& rados,
+ const rgw_owner& owner,
const rgw_bucket& bucket,
bool update_entrypoint,
optional_yield y,
const DoutPrefixProvider *dpp)
{
- int ret = svc.user->remove_bucket(dpp, user_id, bucket, y);
+ const auto& buckets_obj = get_owner_buckets_obj(svc.user, svc.zone, owner);
+ int ret = rgwrados::buckets::remove(dpp, y, rados, buckets_obj, bucket);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: error removing bucket from directory: "
<< cpp_strerror(-ret)<< dendl;
@@ -3214,8 +3322,8 @@ int RGWBucketCtl::do_unlink_bucket(RGWSI_Bucket_EP_Ctx& ctx,
if (!ep.linked)
return 0;
- if (ep.owner != user_id) {
- ldpp_dout(dpp, 0) << "bucket entry point user mismatch, can't unlink bucket: " << ep.owner << " != " << user_id << dendl;
+ if (ep.owner != owner) {
+ ldpp_dout(dpp, 0) << "bucket entry point owner mismatch, can't unlink bucket: " << ep.owner << " != " << owner << dendl;
return -EINVAL;
}
@@ -3233,19 +3341,20 @@ int RGWBucketCtl::read_bucket_stats(const rgw_bucket& bucket,
});
}
-int RGWBucketCtl::read_buckets_stats(map<string, RGWBucketEnt>& m,
+int RGWBucketCtl::read_buckets_stats(std::vector<RGWBucketEnt>& buckets,
optional_yield y, const DoutPrefixProvider *dpp)
{
return call([&](RGWSI_Bucket_X_Ctx& ctx) {
- return svc.bucket->read_buckets_stats(ctx, m, y, dpp);
+ return svc.bucket->read_buckets_stats(ctx, buckets, y, dpp);
});
}
-int RGWBucketCtl::sync_user_stats(const DoutPrefixProvider *dpp,
- const rgw_user& user_id,
- const RGWBucketInfo& bucket_info,
- optional_yield y,
- RGWBucketEnt* pent)
+int RGWBucketCtl::sync_owner_stats(const DoutPrefixProvider *dpp,
+ librados::Rados& rados,
+ const rgw_owner& owner,
+ const RGWBucketInfo& bucket_info,
+ optional_yield y,
+ RGWBucketEnt* pent)
{
RGWBucketEnt ent;
if (!pent) {
@@ -3257,7 +3366,16 @@ int RGWBucketCtl::sync_user_stats(const DoutPrefixProvider *dpp,
return r;
}
- return svc.user->flush_bucket_stats(dpp, user_id, *pent, y);
+ // flush stats to the user/account owner object
+ const rgw_raw_obj& obj = std::visit(fu2::overload(
+ [&] (const rgw_user& user) {
+ return svc.user->get_buckets_obj(user);
+ },
+ [&] (const rgw_account_id& id) {
+ const RGWZoneParams& zone = svc.zone->get_zone_params();
+ return rgwrados::account::get_buckets_obj(zone, id);
+ }), owner);
+ return rgwrados::buckets::write_stats(dpp, y, rados, obj, *pent);
}
int RGWBucketCtl::get_sync_policy_handler(std::optional<rgw_zone_id> zone,
@@ -3305,9 +3423,9 @@ int RGWBucketCtl::bucket_imports_data(const rgw_bucket& bucket,
return handler->bucket_imports_data();
}
-RGWBucketMetadataHandlerBase* RGWBucketMetaHandlerAllocator::alloc()
+RGWBucketMetadataHandlerBase* RGWBucketMetaHandlerAllocator::alloc(librados::Rados& rados)
{
- return new RGWBucketMetadataHandler();
+ return new RGWBucketMetadataHandler(rados);
}
RGWBucketInstanceMetadataHandlerBase* RGWBucketInstanceMetaHandlerAllocator::alloc(rgw::sal::Driver* driver)
@@ -3315,9 +3433,9 @@ RGWBucketInstanceMetadataHandlerBase* RGWBucketInstanceMetaHandlerAllocator::all
return new RGWBucketInstanceMetadataHandler(driver);
}
-RGWBucketMetadataHandlerBase* RGWArchiveBucketMetaHandlerAllocator::alloc()
+RGWBucketMetadataHandlerBase* RGWArchiveBucketMetaHandlerAllocator::alloc(librados::Rados& rados)
{
- return new RGWArchiveBucketMetadataHandler();
+ return new RGWArchiveBucketMetadataHandler(rados);
}
RGWBucketInstanceMetadataHandlerBase* RGWArchiveBucketInstanceMetaHandlerAllocator::alloc(rgw::sal::Driver* driver)
diff --git a/src/rgw/driver/rados/rgw_bucket.h b/src/rgw/driver/rados/rgw_bucket.h
index 5af10b524de..e91c0d7e139 100644
--- a/src/rgw/driver/rados/rgw_bucket.h
+++ b/src/rgw/driver/rados/rgw_bucket.h
@@ -10,6 +10,7 @@
#include <boost/container/flat_map.hpp>
#include <boost/container/flat_set.hpp>
+#include "include/rados/librados_fwd.hpp"
#include "include/types.h"
#include "rgw_common.h"
#include "rgw_tools.h"
@@ -188,7 +189,7 @@ public:
class RGWBucketMetaHandlerAllocator {
public:
- static RGWBucketMetadataHandlerBase *alloc();
+ static RGWBucketMetadataHandlerBase *alloc(librados::Rados& rados);
};
class RGWBucketInstanceMetaHandlerAllocator {
@@ -198,7 +199,7 @@ public:
class RGWArchiveBucketMetaHandlerAllocator {
public:
- static RGWBucketMetadataHandlerBase *alloc();
+ static RGWBucketMetadataHandlerBase *alloc(librados::Rados& rados);
};
class RGWArchiveBucketInstanceMetaHandlerAllocator {
@@ -212,10 +213,15 @@ extern int rgw_object_get_attr(rgw::sal::Driver* driver, rgw::sal::Object* obj,
const char* attr_name, bufferlist& out_bl,
optional_yield y);
-extern void check_bad_user_bucket_mapping(rgw::sal::Driver* driver, rgw::sal::User& user, bool fix, optional_yield y, const DoutPrefixProvider *dpp);
+void check_bad_owner_bucket_mapping(rgw::sal::Driver* driver,
+ const rgw_owner& owner,
+ const std::string& tenant,
+ bool fix, optional_yield y,
+ const DoutPrefixProvider *dpp);
struct RGWBucketAdminOpState {
rgw_user uid;
+ rgw_account_id account_id;
std::string display_name;
std::string bucket_name;
std::string bucket_id;
@@ -277,6 +283,7 @@ struct RGWBucketAdminOpState {
void set_sync_bucket(bool value) { sync_bucket = value; }
rgw_user& get_user_id() { return uid; }
+ rgw_account_id& get_account_id() { return account_id; }
std::string& get_user_display_name() { return display_name; }
std::string& get_bucket_name() { return bucket_name; }
std::string& get_object_name() { return object_name; }
@@ -298,6 +305,7 @@ struct RGWBucketAdminOpState {
bool will_delete_children() { return delete_child_objects; }
bool will_check_objects() { return check_objects; }
bool is_user_op() { return !uid.empty(); }
+ bool is_account_op() { return !account_id.empty(); }
bool is_system_op() { return uid.empty(); }
bool has_bucket_stored() { return bucket_stored; }
int get_max_aio() { return max_aio; }
@@ -372,7 +380,7 @@ public:
static int dump_s3_policy(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state,
std::ostream& os, const DoutPrefixProvider *dpp, optional_yield y);
- static int unlink(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp, optional_yield y);
+ static int unlink(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp, optional_yield y, std::string *err_msg = nullptr);
static int link(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp, optional_yield y, std::string *err_msg = NULL);
static int chown(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, const std::string& marker, const DoutPrefixProvider *dpp, optional_yield y, std::string *err_msg = NULL);
@@ -678,7 +686,8 @@ public:
const DoutPrefixProvider *dpp);
/* user/bucket */
- int link_bucket(const rgw_user& user_id,
+ int link_bucket(librados::Rados& rados,
+ const rgw_owner& owner,
const rgw_bucket& bucket,
ceph::real_time creation_time,
optional_yield y,
@@ -686,13 +695,14 @@ public:
bool update_entrypoint = true,
rgw_ep_info *pinfo = nullptr);
- int unlink_bucket(const rgw_user& user_id,
+ int unlink_bucket(librados::Rados& rados,
+ const rgw_owner& owner,
const rgw_bucket& bucket,
optional_yield y,
const DoutPrefixProvider *dpp,
bool update_entrypoint = true);
- int read_buckets_stats(std::map<std::string, RGWBucketEnt>& m,
+ int read_buckets_stats(std::vector<RGWBucketEnt>& buckets,
optional_yield y,
const DoutPrefixProvider *dpp);
@@ -702,10 +712,12 @@ public:
const DoutPrefixProvider *dpp);
/* quota related */
- int sync_user_stats(const DoutPrefixProvider *dpp,
- const rgw_user& user_id, const RGWBucketInfo& bucket_info,
- optional_yield y,
- RGWBucketEnt* pent);
+ int sync_owner_stats(const DoutPrefixProvider *dpp,
+ librados::Rados& rados,
+ const rgw_owner& owner,
+ const RGWBucketInfo& bucket_info,
+ optional_yield y,
+ RGWBucketEnt* pent);
/* bucket sync */
int get_sync_policy_handler(std::optional<rgw_zone_id> zone,
@@ -744,7 +756,8 @@ private:
const DoutPrefixProvider *dpp);
int do_link_bucket(RGWSI_Bucket_EP_Ctx& ctx,
- const rgw_user& user,
+ librados::Rados& rados,
+ const rgw_owner& owner,
const rgw_bucket& bucket,
ceph::real_time creation_time,
bool update_entrypoint,
@@ -753,7 +766,8 @@ private:
const DoutPrefixProvider *dpp);
int do_unlink_bucket(RGWSI_Bucket_EP_Ctx& ctx,
- const rgw_user& user_id,
+ librados::Rados& rados,
+ const rgw_owner& owner,
const rgw_bucket& bucket,
bool update_entrypoint,
optional_yield y,
diff --git a/src/rgw/driver/rados/rgw_bucket_sync.cc b/src/rgw/driver/rados/rgw_bucket_sync.cc
index 6ff76c16a90..dafbb6df46f 100644
--- a/src/rgw/driver/rados/rgw_bucket_sync.cc
+++ b/src/rgw/driver/rados/rgw_bucket_sync.cc
@@ -768,15 +768,6 @@ RGWBucketSyncPolicyHandler::RGWBucketSyncPolicyHandler(const RGWBucketSyncPolicy
bucket_attrs(std::move(_bucket_attrs)) {
if (_bucket_info.sync_policy) {
sync_policy = *_bucket_info.sync_policy;
-
- for (auto& entry : sync_policy.groups) {
- for (auto& pipe : entry.second.pipes) {
- if (pipe.params.mode == rgw_sync_pipe_params::MODE_USER &&
- pipe.params.user.empty()) {
- pipe.params.user = _bucket_info.owner;
- }
- }
- }
}
legacy_config = parent->legacy_config;
bucket = _bucket_info.bucket;
diff --git a/src/rgw/driver/rados/rgw_cr_rados.cc b/src/rgw/driver/rados/rgw_cr_rados.cc
index 51ebf95197e..396c556926f 100644
--- a/src/rgw/driver/rados/rgw_cr_rados.cc
+++ b/src/rgw/driver/rados/rgw_cr_rados.cc
@@ -932,7 +932,7 @@ int RGWAsyncRemoveObj::_send_request(const DoutPrefixProvider *dpp)
std::unique_ptr<rgw::sal::Object::DeleteOp> del_op = obj->get_delete_op();
- del_op->params.bucket_owner.id = bucket->get_info().owner;
+ del_op->params.bucket_owner = bucket->get_info().owner;
del_op->params.obj_owner = policy.get_owner();
if (del_if_older) {
del_op->params.unmod_since = timestamp;
diff --git a/src/rgw/driver/rados/rgw_data_sync.cc b/src/rgw/driver/rados/rgw_data_sync.cc
index 0a7d21a7277..dbea56d4de7 100644
--- a/src/rgw/driver/rados/rgw_data_sync.cc
+++ b/src/rgw/driver/rados/rgw_data_sync.cc
@@ -2614,7 +2614,6 @@ class RGWUserPermHandler {
rgw_user uid;
struct _info {
- RGWUserInfo user_info;
rgw::IAM::Environment env;
std::unique_ptr<rgw::auth::Identity> identity;
RGWAccessControlPolicy user_acl;
@@ -2638,27 +2637,23 @@ class RGWUserPermHandler {
uid(handler->uid),
info(handler->info) {}
int operate() override {
- auto user_ctl = sync_env->driver->getRados()->ctl.user;
-
- ret = user_ctl->get_info_by_uid(sync_env->dpp, uid, &info->user_info, null_yield);
+ auto user = sync_env->driver->get_user(uid);
+ ret = user->load_user(sync_env->dpp, null_yield);
if (ret < 0) {
return ret;
}
- info->identity = rgw::auth::transform_old_authinfo(sync_env->cct,
- uid,
- RGW_PERM_FULL_CONTROL,
- false, /* system_request? */
- TYPE_RGW);
-
- map<string, bufferlist> uattrs;
-
- ret = user_ctl->get_attrs_by_uid(sync_env->dpp, uid, &uattrs, null_yield);
- if (ret == 0) {
- ret = RGWUserPermHandler::policy_from_attrs(sync_env->cct, uattrs, &info->user_acl);
+ auto result = rgw::auth::transform_old_authinfo(
+ sync_env->dpp, null_yield, sync_env->driver, user.get());
+ if (!result) {
+ return result.error();
}
+ info->identity = std::move(result).value();
+
+ ret = RGWUserPermHandler::policy_from_attrs(
+ sync_env->cct, user->get_attrs(), &info->user_acl);
if (ret == -ENOENT) {
- info->user_acl.create_default(uid, info->user_info.display_name);
+ info->user_acl.create_default(uid, user->get_display_name());
}
return 0;
@@ -2849,7 +2844,7 @@ int RGWFetchObjFilter_Sync::filter(CephContext *cct,
rgw_user& acl_translation_owner = params.dest.acl_translation->owner;
if (!acl_translation_owner.empty()) {
if (params.mode == rgw_sync_pipe_params::MODE_USER &&
- acl_translation_owner != dest_bucket_info.owner) {
+ rgw_owner{acl_translation_owner} != dest_bucket_info.owner) {
ldout(cct, 0) << "ERROR: " << __func__ << ": acl translation was requested, but user (" << acl_translation_owner
<< ") is not dest bucket owner (" << dest_bucket_info.owner << ")" << dendl;
return -EPERM;
@@ -3125,8 +3120,8 @@ public:
RGWDataSyncModule *get_data_handler() override {
return &data_handler;
}
- RGWMetadataHandler *alloc_bucket_meta_handler() override {
- return RGWArchiveBucketMetaHandlerAllocator::alloc();
+ RGWMetadataHandler *alloc_bucket_meta_handler(librados::Rados& rados) override {
+ return RGWArchiveBucketMetaHandlerAllocator::alloc(rados);
}
RGWBucketInstanceMetadataHandlerBase *alloc_bucket_instance_meta_handler(rgw::sal::Driver* driver) override {
return RGWArchiveBucketInstanceMetaHandlerAllocator::alloc(driver);
diff --git a/src/rgw/driver/rados/rgw_notify.cc b/src/rgw/driver/rados/rgw_notify.cc
index 5a1786ef7ad..d612d5452cd 100644
--- a/src/rgw/driver/rados/rgw_notify.cc
+++ b/src/rgw/driver/rados/rgw_notify.cc
@@ -11,6 +11,7 @@
#include <boost/asio/io_context.hpp>
#include <boost/context/protected_fixedsize_stack.hpp>
#include <spawn/spawn.hpp>
+#include "include/function2.hpp"
#include "rgw_sal_rados.h"
#include "rgw_pubsub.h"
#include "rgw_pubsub_push.h"
@@ -730,36 +731,36 @@ public:
ldpp_dout(this, 10) << "Started notification manager with: " << worker_count << " workers" << dendl;
}
- int add_persistent_topic(const std::string& topic_name, optional_yield y) {
- if (topic_name == Q_LIST_OBJECT_NAME) {
+ int add_persistent_topic(const std::string& topic_queue, optional_yield y) {
+ if (topic_queue == Q_LIST_OBJECT_NAME) {
ldpp_dout(this, 1) << "ERROR: topic name cannot be: " << Q_LIST_OBJECT_NAME << " (conflict with queue list object name)" << dendl;
return -EINVAL;
}
librados::ObjectWriteOperation op;
op.create(true);
- cls_2pc_queue_init(op, topic_name, max_queue_size);
+ cls_2pc_queue_init(op, topic_queue, max_queue_size);
auto& rados_ioctx = rados_store.getRados()->get_notif_pool_ctx();
- auto ret = rgw_rados_operate(this, rados_ioctx, topic_name, &op, y);
+ auto ret = rgw_rados_operate(this, rados_ioctx, topic_queue, &op, y);
if (ret == -EEXIST) {
// queue already exists - nothing to do
- ldpp_dout(this, 20) << "INFO: queue for topic: " << topic_name << " already exists. nothing to do" << dendl;
+ ldpp_dout(this, 20) << "INFO: queue for topic: " << topic_queue << " already exists. nothing to do" << dendl;
return 0;
}
if (ret < 0) {
// failed to create queue
- ldpp_dout(this, 1) << "ERROR: failed to create queue for topic: " << topic_name << ". error: " << ret << dendl;
+ ldpp_dout(this, 1) << "ERROR: failed to create queue for topic: " << topic_queue << ". error: " << ret << dendl;
return ret;
}
bufferlist empty_bl;
- std::map<std::string, bufferlist> new_topic{{topic_name, empty_bl}};
+ std::map<std::string, bufferlist> new_topic{{topic_queue, empty_bl}};
op.omap_set(new_topic);
ret = rgw_rados_operate(this, rados_ioctx, Q_LIST_OBJECT_NAME, &op, y);
if (ret < 0) {
- ldpp_dout(this, 1) << "ERROR: failed to add queue: " << topic_name << " to queue list. error: " << ret << dendl;
+ ldpp_dout(this, 1) << "ERROR: failed to add queue: " << topic_queue << " to queue list. error: " << ret << dendl;
return ret;
}
- ldpp_dout(this, 20) << "INFO: queue: " << topic_name << " added to queue list" << dendl;
+ ldpp_dout(this, 20) << "INFO: queue: " << topic_queue << " added to queue list" << dendl;
return 0;
}
};
@@ -805,37 +806,37 @@ int add_persistent_topic(const std::string& topic_name, optional_yield y) {
return s_manager->add_persistent_topic(topic_name, y);
}
-int remove_persistent_topic(const DoutPrefixProvider* dpp, librados::IoCtx& rados_ioctx, const std::string& topic_name, optional_yield y) {
+int remove_persistent_topic(const DoutPrefixProvider* dpp, librados::IoCtx& rados_ioctx, const std::string& topic_queue, optional_yield y) {
librados::ObjectWriteOperation op;
op.remove();
- auto ret = rgw_rados_operate(dpp, rados_ioctx, topic_name, &op, y);
+ auto ret = rgw_rados_operate(dpp, rados_ioctx, topic_queue, &op, y);
if (ret == -ENOENT) {
// queue already removed - nothing to do
- ldpp_dout(dpp, 20) << "INFO: queue for topic: " << topic_name << " already removed. nothing to do" << dendl;
+ ldpp_dout(dpp, 20) << "INFO: queue for topic: " << topic_queue << " already removed. nothing to do" << dendl;
return 0;
}
if (ret < 0) {
// failed to remove queue
- ldpp_dout(dpp, 1) << "ERROR: failed to remove queue for topic: " << topic_name << ". error: " << ret << dendl;
+ ldpp_dout(dpp, 1) << "ERROR: failed to remove queue for topic: " << topic_queue << ". error: " << ret << dendl;
return ret;
}
- std::set<std::string> topic_to_remove{{topic_name}};
+ std::set<std::string> topic_to_remove{{topic_queue}};
op.omap_rm_keys(topic_to_remove);
ret = rgw_rados_operate(dpp, rados_ioctx, Q_LIST_OBJECT_NAME, &op, y);
if (ret < 0) {
- ldpp_dout(dpp, 1) << "ERROR: failed to remove queue: " << topic_name << " from queue list. error: " << ret << dendl;
+ ldpp_dout(dpp, 1) << "ERROR: failed to remove queue: " << topic_queue << " from queue list. error: " << ret << dendl;
return ret;
}
- ldpp_dout(dpp, 20) << "INFO: queue: " << topic_name << " removed from queue list" << dendl;
+ ldpp_dout(dpp, 20) << "INFO: queue: " << topic_queue << " removed from queue list" << dendl;
return 0;
}
-int remove_persistent_topic(const std::string& topic_name, optional_yield y) {
+int remove_persistent_topic(const std::string& topic_queue, optional_yield y) {
if (!s_manager) {
return -EAGAIN;
}
- return remove_persistent_topic(s_manager, s_manager->rados_store.getRados()->get_notif_pool_ctx(), topic_name, y);
+ return remove_persistent_topic(s_manager, s_manager->rados_store.getRados()->get_notif_pool_ctx(), topic_queue, y);
}
rgw::sal::Object* get_object_with_attributes(
@@ -921,7 +922,7 @@ static inline void populate_event(reservation_t& res,
event.x_amz_id_2 = res.store->getRados()->host_id; // RGW on which the change was made
// configurationId is filled from notification configuration
event.bucket_name = res.bucket->get_name();
- event.bucket_ownerIdentity = res.bucket->get_owner().id;
+ event.bucket_ownerIdentity = to_string(res.bucket->get_owner());
const auto region = res.store->get_zone()->get_zonegroup().get_api_name();
rgw::ARN bucket_arn(res.bucket->get_key());
bucket_arn.region = region;
@@ -1024,9 +1025,9 @@ int publish_reserve(const DoutPrefixProvider* dpp,
return rc;
}
}
- for (const auto& bucket_topic : bucket_topics.topics) {
- const rgw_pubsub_topic_filter& topic_filter = bucket_topic.second;
- const rgw_pubsub_topic& topic_cfg = topic_filter.topic;
+ for (auto& bucket_topic : bucket_topics.topics) {
+ rgw_pubsub_topic_filter& topic_filter = bucket_topic.second;
+ rgw_pubsub_topic& topic_cfg = topic_filter.topic;
for (auto& event_type : event_types) {
if (!notification_match(res, topic_filter, event_type, req_tags)) {
// notification does not apply to req_state
@@ -1039,6 +1040,30 @@ int publish_reserve(const DoutPrefixProvider* dpp,
<< "') apply to event of type: '" << to_string(event_type) << "'"
<< dendl;
+ // reload the topic in case it changed since the notification was added
+ const std::string& topic_tenant = std::visit(fu2::overload(
+ [] (const rgw_user& u) -> const std::string& { return u.tenant; },
+ [] (const rgw_account_id& a) -> const std::string& { return a; }
+ ), topic_cfg.owner);
+ const RGWPubSub ps(res.store, topic_tenant, site);
+ int ret = ps.get_topic(res.dpp, topic_cfg.dest.arn_topic,
+ topic_cfg, res.yield, nullptr);
+ if (ret < 0) {
+ ldpp_dout(res.dpp, 1)
+ << "INFO: failed to load topic: " << topic_cfg.dest.arn_topic
+ << ". error: " << ret
+ << " while reserving persistent notification event" << dendl;
+ if (ret == -ENOENT) {
+ // either the topic is deleted but the corresponding notification
+ // still exist or in v2 mode the notification could have synced first
+ // but topic is not synced yet.
+ return 0;
+ }
+ ldpp_dout(res.dpp, 1)
+ << "WARN: Using the stored topic from bucket notification struct."
+ << dendl;
+ }
+
cls_2pc_reservation::id_t res_id = cls_2pc_reservation::NO_ID;
if (topic_cfg.dest.persistent) {
// TODO: take default reservation size from conf
@@ -1047,7 +1072,7 @@ int publish_reserve(const DoutPrefixProvider* dpp,
librados::ObjectWriteOperation op;
bufferlist obl;
int rval;
- const auto& queue_name = topic_cfg.dest.arn_topic;
+ const auto& queue_name = topic_cfg.dest.persistent_queue;
cls_2pc_queue_reserve(op, res.size, 1, &obl, &rval);
auto ret = rgw_rados_operate(
res.dpp, res.store->getRados()->get_notif_pool_ctx(), queue_name,
@@ -1067,31 +1092,8 @@ int publish_reserve(const DoutPrefixProvider* dpp,
return ret;
}
}
- // load the topic,if there is change in topic config while it's stored in
- // notification.
- rgw_pubsub_topic result;
- const RGWPubSub ps(res.store, res.user_tenant, site);
- auto ret =
- ps.get_topic(res.dpp, topic_cfg.dest.arn_topic, result, res.yield, nullptr);
- if (ret < 0) {
- ldpp_dout(res.dpp, 1)
- << "INFO: failed to load topic: " << topic_cfg.name
- << ". error: " << ret
- << " while reserving persistent notification event" << dendl;
- if (ret == -ENOENT) {
- // either the topic is deleted but the corresponding notification
- // still exist or in v2 mode the notification could have synced first
- // but topic is not synced yet.
- return 0;
- }
- ldpp_dout(res.dpp, 1)
- << "WARN: Using the stored topic from bucket notification struct."
- << dendl;
- res.topics.emplace_back(topic_filter.s3_id, topic_cfg, res_id,
- event_type);
- } else {
- res.topics.emplace_back(topic_filter.s3_id, result, res_id, event_type);
- }
+
+ res.topics.emplace_back(topic_filter.s3_id, topic_cfg, res_id, event_type);
}
}
return 0;
@@ -1127,7 +1129,7 @@ int publish_commit(rgw::sal::Object* obj,
event_entry.retry_sleep_duration = topic.cfg.dest.retry_sleep_duration;
bufferlist bl;
encode(event_entry, bl);
- const auto& queue_name = topic.cfg.dest.arn_topic;
+ const auto& queue_name = topic.cfg.dest.persistent_queue;
if (bl.length() > res.size) {
// try to make a larger reservation, fail only if this is not possible
ldpp_dout(dpp, 5) << "WARNING: committed size: " << bl.length()
@@ -1140,7 +1142,7 @@ int publish_commit(rgw::sal::Object* obj,
cls_2pc_queue_abort(op, topic.res_id);
auto ret = rgw_rados_operate(
dpp, res.store->getRados()->get_notif_pool_ctx(),
- topic.cfg.dest.arn_topic, &op,
+ queue_name, &op,
res.yield);
if (ret < 0) {
ldpp_dout(dpp, 1) << "ERROR: failed to abort reservation: "
@@ -1224,7 +1226,7 @@ int publish_abort(reservation_t& res) {
// nothing to abort or already committed/aborted
continue;
}
- const auto& queue_name = topic.cfg.dest.arn_topic;
+ const auto& queue_name = topic.cfg.dest.persistent_queue;
librados::ObjectWriteOperation op;
cls_2pc_queue_abort(op, topic.res_id);
const auto ret = rgw_rados_operate(
@@ -1241,18 +1243,19 @@ int publish_abort(reservation_t& res) {
return 0;
}
-int get_persistent_queue_stats_by_topic_name(const DoutPrefixProvider *dpp, librados::IoCtx &rados_ioctx,
- const std::string &topic_name, rgw_topic_stats &stats, optional_yield y)
+int get_persistent_queue_stats(const DoutPrefixProvider *dpp, librados::IoCtx &rados_ioctx,
+ const std::string &queue_name, rgw_topic_stats &stats, optional_yield y)
{
+ // TODO: use optional_yield instead calling rados_ioctx.operate() synchronously
cls_2pc_reservations reservations;
- auto ret = cls_2pc_queue_list_reservations(rados_ioctx, topic_name, reservations);
+ auto ret = cls_2pc_queue_list_reservations(rados_ioctx, queue_name, reservations);
if (ret < 0) {
ldpp_dout(dpp, 1) << "ERROR: failed to read queue list reservation: " << ret << dendl;
return ret;
}
stats.queue_reservations = reservations.size();
- ret = cls_2pc_queue_get_topic_stats(rados_ioctx, topic_name, stats.queue_entries, stats.queue_size);
+ ret = cls_2pc_queue_get_topic_stats(rados_ioctx, queue_name, stats.queue_entries, stats.queue_size);
if (ret < 0) {
ldpp_dout(dpp, 1) << "ERROR: failed to get the queue size or the number of entries: " << ret << dendl;
return ret;
@@ -1273,7 +1276,7 @@ reservation_t::reservation_t(const DoutPrefixProvider* _dpp,
object_name(_object_name),
tagset(_s->tagset),
metadata_fetched_from_attributes(false),
- user_id(_s->user->get_id().id),
+ user_id(to_string(_s->owner.id)),
user_tenant(_s->user->get_id().tenant),
req_id(_s->req_id),
yield(y)
diff --git a/src/rgw/driver/rados/rgw_notify.h b/src/rgw/driver/rados/rgw_notify.h
index ec8117c2f57..7014cda3ca3 100644
--- a/src/rgw/driver/rados/rgw_notify.h
+++ b/src/rgw/driver/rados/rgw_notify.h
@@ -33,15 +33,15 @@ bool init(CephContext* cct, rgw::sal::RadosStore* store,
void shutdown();
// create persistent delivery queue for a topic (endpoint)
-// this operation also add a topic name to the common (to all RGWs) list of all topics
-int add_persistent_topic(const std::string& topic_name, optional_yield y);
+// this operation also add a topic queue to the common (to all RGWs) list of all topics
+int add_persistent_topic(const std::string& topic_queue, optional_yield y);
// remove persistent delivery queue for a topic (endpoint)
-// this operation also remove the topic name from the common (to all RGWs) list of all topics
-int remove_persistent_topic(const std::string& topic_name, optional_yield y);
+// this operation also remove the topic queue from the common (to all RGWs) list of all topics
+int remove_persistent_topic(const std::string& topic_queue, optional_yield y);
// same as the above, expect you need to provide the IoCtx, the above uses rgw::notify::Manager::rados_ioctx
-int remove_persistent_topic(const DoutPrefixProvider* dpp, librados::IoCtx& rados_ioctx, const std::string& topic_name, optional_yield y);
+int remove_persistent_topic(const DoutPrefixProvider* dpp, librados::IoCtx& rados_ioctx, const std::string& topic_queue, optional_yield y);
// struct holding reservation information
// populated in the publish_reserve call
@@ -134,8 +134,8 @@ int publish_commit(rgw::sal::Object* obj,
// cancel the reservation
int publish_abort(reservation_t& reservation);
-int get_persistent_queue_stats_by_topic_name(const DoutPrefixProvider *dpp, librados::IoCtx &rados_ioctx,
- const std::string &topic_name, rgw_topic_stats &stats, optional_yield y);
+int get_persistent_queue_stats(const DoutPrefixProvider *dpp, librados::IoCtx &rados_ioctx,
+ const std::string &queue_name, rgw_topic_stats &stats, optional_yield y);
}
diff --git a/src/rgw/driver/rados/rgw_putobj_processor.cc b/src/rgw/driver/rados/rgw_putobj_processor.cc
index d1a4a53688b..d41678cdb06 100644
--- a/src/rgw/driver/rados/rgw_putobj_processor.cc
+++ b/src/rgw/driver/rados/rgw_putobj_processor.cc
@@ -378,6 +378,7 @@ int AtomicObjectProcessor::complete(size_t accounted_size,
obj_op.meta.mtime = mtime;
obj_op.meta.set_mtime = set_mtime;
obj_op.meta.owner = owner;
+ obj_op.meta.bucket_owner = bucket_info.owner;
obj_op.meta.flags = PUT_OBJ_CREATE;
obj_op.meta.olh_epoch = olh_epoch;
obj_op.meta.delete_at = delete_at;
@@ -517,6 +518,7 @@ int MultipartObjectProcessor::complete(size_t accounted_size,
obj_op.meta.set_mtime = set_mtime;
obj_op.meta.mtime = mtime;
obj_op.meta.owner = owner;
+ obj_op.meta.bucket_owner = bucket_info.owner;
obj_op.meta.delete_at = delete_at;
obj_op.meta.zones_trace = zones_trace;
obj_op.meta.modify_tail = true;
@@ -728,6 +730,7 @@ int AppendObjectProcessor::complete(size_t accounted_size, const string &etag, c
obj_op.meta.mtime = mtime;
obj_op.meta.set_mtime = set_mtime;
obj_op.meta.owner = owner;
+ obj_op.meta.bucket_owner = bucket_info.owner;
obj_op.meta.flags = PUT_OBJ_CREATE;
obj_op.meta.delete_at = delete_at;
obj_op.meta.user_data = user_data;
diff --git a/src/rgw/driver/rados/rgw_putobj_processor.h b/src/rgw/driver/rados/rgw_putobj_processor.h
index 35fc8c55105..655428f83e9 100644
--- a/src/rgw/driver/rados/rgw_putobj_processor.h
+++ b/src/rgw/driver/rados/rgw_putobj_processor.h
@@ -117,7 +117,7 @@ class ManifestObjectProcessor : public HeadObjectProcessor,
RGWRados* const store;
RGWBucketInfo& bucket_info;
rgw_placement_rule tail_placement_rule;
- rgw_user owner;
+ ACLOwner owner;
RGWObjectCtx& obj_ctx;
rgw_obj head_obj;
@@ -135,7 +135,7 @@ class ManifestObjectProcessor : public HeadObjectProcessor,
ManifestObjectProcessor(Aio *aio, RGWRados* store,
RGWBucketInfo& bucket_info,
const rgw_placement_rule *ptail_placement_rule,
- const rgw_user& owner, RGWObjectCtx& _obj_ctx,
+ const ACLOwner& owner, RGWObjectCtx& _obj_ctx,
const rgw_obj& _head_obj,
const DoutPrefixProvider* dpp,
optional_yield y,
@@ -151,7 +151,7 @@ class ManifestObjectProcessor : public HeadObjectProcessor,
}
}
- void set_owner(const rgw_user& _owner) {
+ void set_owner(const ACLOwner& _owner) {
owner = _owner;
}
@@ -177,7 +177,7 @@ class AtomicObjectProcessor : public ManifestObjectProcessor {
AtomicObjectProcessor(Aio *aio, RGWRados* store,
RGWBucketInfo& bucket_info,
const rgw_placement_rule *ptail_placement_rule,
- const rgw_user& owner,
+ const ACLOwner& owner,
RGWObjectCtx& obj_ctx, const rgw_obj& _head_obj,
std::optional<uint64_t> olh_epoch,
const std::string& unique_tag,
@@ -222,7 +222,7 @@ class MultipartObjectProcessor : public ManifestObjectProcessor {
MultipartObjectProcessor(Aio *aio, RGWRados* store,
RGWBucketInfo& bucket_info,
const rgw_placement_rule *ptail_placement_rule,
- const rgw_user& owner, RGWObjectCtx& obj_ctx,
+ const ACLOwner& owner, RGWObjectCtx& obj_ctx,
const rgw_obj& _head_obj,
const std::string& upload_id, uint64_t part_num,
const std::string& part_num_str,
@@ -266,7 +266,7 @@ class MultipartObjectProcessor : public ManifestObjectProcessor {
AppendObjectProcessor(Aio *aio, RGWRados* store,
RGWBucketInfo& bucket_info,
const rgw_placement_rule *ptail_placement_rule,
- const rgw_user& owner, RGWObjectCtx& obj_ctx,
+ const ACLOwner& owner, RGWObjectCtx& obj_ctx,
const rgw_obj& _head_obj,
const std::string& unique_tag, uint64_t position,
uint64_t *cur_accounted_size,
diff --git a/src/rgw/driver/rados/rgw_rados.cc b/src/rgw/driver/rados/rgw_rados.cc
index c9da60eff9e..95f05f149a0 100644
--- a/src/rgw/driver/rados/rgw_rados.cc
+++ b/src/rgw/driver/rados/rgw_rados.cc
@@ -1411,7 +1411,7 @@ int RGWRados::init_begin(CephContext* _cct, const DoutPrefixProvider *dpp,
return ret;
}
- ret = ctl.init(&svc, driver, dpp);
+ ret = ctl.init(&svc, driver, *get_rados_handle(), dpp);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to init ctls (ret=" << cpp_strerror(-ret) << ")" << dendl;
return ret;
@@ -2300,7 +2300,7 @@ void RGWRados::create_bucket_id(string *bucket_id)
int RGWRados::create_bucket(const DoutPrefixProvider* dpp,
optional_yield y,
const rgw_bucket& bucket,
- const rgw_user& owner,
+ const rgw_owner& owner,
const std::string& zonegroup_id,
const rgw_placement_rule& placement_rule,
const RGWZonePlacementInfo* zone_placement,
@@ -2853,7 +2853,8 @@ bool RGWRados::swift_versioning_enabled(const RGWBucketInfo& bucket_info) const
}
int RGWRados::swift_versioning_copy(RGWObjectCtx& obj_ctx,
- const rgw_user& user,
+ const ACLOwner& owner,
+ const rgw_user& remote_user,
RGWBucketInfo& bucket_info,
const rgw_obj& obj,
const DoutPrefixProvider *dpp,
@@ -2910,7 +2911,8 @@ int RGWRados::swift_versioning_copy(RGWObjectCtx& obj_ctx,
jspan_context no_trace{false, false};
r = copy_obj(obj_ctx,
- user,
+ owner,
+ remote_user,
NULL, /* req_info *info */
no_zone,
dest_obj,
@@ -2949,7 +2951,8 @@ int RGWRados::swift_versioning_copy(RGWObjectCtx& obj_ctx,
}
int RGWRados::swift_versioning_restore(RGWObjectCtx& obj_ctx,
- const rgw_user& user,
+ const ACLOwner& owner,
+ const rgw_user& remote_user,
RGWBucketInfo& bucket_info,
rgw_obj& obj,
bool& restored,
@@ -3007,7 +3010,8 @@ int RGWRados::swift_versioning_restore(RGWObjectCtx& obj_ctx,
jspan_context no_trace{false, false};
int ret = copy_obj(obj_ctx,
- user,
+ owner,
+ remote_user,
nullptr, /* req_info *info */
no_zone,
obj, /* dest obj */
@@ -3147,7 +3151,6 @@ int RGWRados::Object::Write::_do_write_meta(uint64_t size, uint64_t accounted_si
string etag;
string content_type;
- bufferlist acl_bl;
string storage_class;
map<string, bufferlist>::iterator iter;
@@ -3184,8 +3187,6 @@ int RGWRados::Object::Write::_do_write_meta(uint64_t size, uint64_t accounted_si
etag = rgw_bl_str(bl);
} else if (name.compare(RGW_ATTR_CONTENT_TYPE) == 0) {
content_type = rgw_bl_str(bl);
- } else if (name.compare(RGW_ATTR_ACL) == 0) {
- acl_bl = bl;
}
}
if (attrs.find(RGW_ATTR_PG_VER) == attrs.end()) {
@@ -3263,7 +3264,7 @@ int RGWRados::Object::Write::_do_write_meta(uint64_t size, uint64_t accounted_si
tracepoint(rgw_rados, complete_enter, req_id.c_str());
r = index_op->complete(rctx.dpp, poolid, epoch, size, accounted_size,
meta.set_mtime, etag, content_type,
- storage_class, &acl_bl,
+ storage_class, meta.owner,
meta.category, meta.remove_objs, rctx.y,
meta.user_data, meta.appendable, log_op);
tracepoint(rgw_rados, complete_exit, req_id.c_str());
@@ -3301,11 +3302,11 @@ int RGWRados::Object::Write::_do_write_meta(uint64_t size, uint64_t accounted_si
/* update quota cache */
if (meta.completeMultipart){
- store->quota_handler->update_stats(meta.owner, obj.bucket, (orig_exists ? 0 : 1),
+ store->quota_handler->update_stats(meta.bucket_owner, obj.bucket, (orig_exists ? 0 : 1),
0, orig_size);
}
else {
- store->quota_handler->update_stats(meta.owner, obj.bucket, (orig_exists ? 0 : 1),
+ store->quota_handler->update_stats(meta.bucket_owner, obj.bucket, (orig_exists ? 0 : 1),
accounted_size, orig_size);
}
return 0;
@@ -3659,7 +3660,13 @@ int RGWRados::rewrite_obj(RGWBucketInfo& dest_bucket_info, const rgw_obj& obj, c
attrset.erase(RGW_ATTR_TAIL_TAG);
attrset.erase(RGW_ATTR_STORAGE_CLASS);
- return copy_obj_data(octx, dest_bucket_info, dest_bucket_info.placement_rule,
+ ACLOwner owner;
+ if (auto i = attrset.find(RGW_ATTR_ACL); i != attrset.end()) {
+ (void) decode_policy(dpp, i->second, &owner);
+ }
+
+ return copy_obj_data(octx, owner, dest_bucket_info,
+ dest_bucket_info.placement_rule,
read_op, obj_size - 1, obj, NULL, mtime,
attrset, 0, real_time(), NULL, dpp, y);
}
@@ -3810,6 +3817,7 @@ int RGWRados::reindex_obj(rgw::sal::Driver* driver,
std::string etag;
std::string content_type;
std::string storage_class;
+ bool found_acl = false;
bufferlist acl_bl;
bool found_olh_info { false };
bufferlist olh_info_bl;
@@ -3820,7 +3828,7 @@ int RGWRados::reindex_obj(rgw::sal::Driver* driver,
read_attr(attr_set, RGW_ATTR_ETAG, etag);
read_attr(attr_set, RGW_ATTR_CONTENT_TYPE, content_type);
read_attr(attr_set, RGW_ATTR_STORAGE_CLASS, storage_class);
- read_attr(attr_set, RGW_ATTR_ACL, acl_bl);
+ read_attr(attr_set, RGW_ATTR_ACL, acl_bl, &found_acl);
read_attr(attr_set, RGW_ATTR_OLH_INFO, olh_info_bl, &found_olh_info);
read_attr(attr_set, RGW_ATTR_APPEND_PART_NUM, part_num_bl, &appendable);
@@ -3845,6 +3853,11 @@ int RGWRados::reindex_obj(rgw::sal::Driver* driver,
}
}
+ ACLOwner owner;
+ if (found_acl) {
+ (void) decode_policy(dpp, acl_bl, &owner);
+ }
+
Bucket bkt(this, bucket_info);
RGWRados::Bucket::UpdateIndex update_idx(&bkt, head_obj);
@@ -3859,7 +3872,7 @@ int RGWRados::reindex_obj(rgw::sal::Driver* driver,
etag,
content_type,
storage_class,
- &acl_bl,
+ owner,
RGWObjCategory::Main, // RGWObjCategory category,
nullptr, // remove_objs list
y,
@@ -4181,11 +4194,15 @@ int RGWRados::fetch_remote_obj(RGWObjectCtx& obj_ctx,
set_mtime_weight.high_precision = high_precision_time;
int ret;
+ // use an empty owner until we decode RGW_ATTR_ACL
+ ACLOwner owner;
+ RGWAccessControlPolicy policy;
+
rgw::BlockingAioThrottle aio(cct->_conf->rgw_put_obj_min_window_size);
using namespace rgw::putobj;
jspan_context no_trace{false, false};
AtomicObjectProcessor processor(&aio, this, dest_bucket_info, nullptr,
- user_id, obj_ctx, dest_obj, olh_epoch,
+ owner, obj_ctx, dest_obj, olh_epoch,
tag, rctx.dpp, rctx.y, no_trace);
RGWRESTConn *conn;
auto& zone_conn_map = svc.zone->get_zone_conn_map();
@@ -4345,44 +4362,33 @@ int RGWRados::fetch_remote_obj(RGWObjectCtx& obj_ctx,
}
}
- if (override_owner) {
- processor.set_owner(*override_owner);
-
- auto& obj_attrs = cb.get_attrs();
+ // decode the ACLOwner from RGW_ATTR_ACL for the bucket index
+ if (auto i = cb.get_attrs().find(RGW_ATTR_ACL); i != cb.get_attrs().end()) {
+ ret = decode_policy(rctx.dpp, i->second, &owner);
+ if (ret < 0) {
+ return ret;
+ }
+ }
+ if (override_owner) {
RGWUserInfo owner_info;
if (ctl.user->get_info_by_uid(rctx.dpp, *override_owner, &owner_info, rctx.y) < 0) {
ldpp_dout(rctx.dpp, 10) << "owner info does not exist" << dendl;
return -EINVAL;
}
- RGWAccessControlPolicy acl;
+ owner.id = *override_owner;
+ owner.display_name = owner_info.display_name;
- auto aiter = obj_attrs.find(RGW_ATTR_ACL);
- if (aiter == obj_attrs.end()) {
- ldpp_dout(rctx.dpp, 0) << "WARNING: " << __func__ << "(): object doesn't have ACL attribute, setting default ACLs" << dendl;
- acl.create_default(owner_info.user_id, owner_info.display_name);
- } else {
- auto iter = aiter->second.cbegin();
- try {
- acl.decode(iter);
- } catch (buffer::error& err) {
- ldpp_dout(rctx.dpp, 0) << "ERROR: " << __func__ << "(): could not decode policy, caught buffer::error" << dendl;
- return -EIO;
- }
- }
-
- ACLOwner new_owner;
- new_owner.id = *override_owner;
- new_owner.display_name = owner_info.display_name;
-
- acl.set_owner(new_owner);
+ policy.create_default(owner_info.user_id, owner_info.display_name);
bufferlist bl;
- acl.encode(bl);
- obj_attrs[RGW_ATTR_ACL] = std::move(bl);
+ policy.encode(bl);
+ cb.get_attrs()[RGW_ATTR_ACL] = std::move(bl);
}
+ processor.set_owner(owner);
+
if (source_zone.empty()) { /* need to preserve expiration if copy in the same zonegroup */
cb.get_attrs().erase(RGW_ATTR_DELETE_AT);
} else {
@@ -4599,7 +4605,8 @@ int RGWRados::copy_obj_to_remote_dest(const DoutPrefixProvider *dpp,
* Returns: 0 on success, -ERR# otherwise.
*/
int RGWRados::copy_obj(RGWObjectCtx& obj_ctx,
- const rgw_user& user_id,
+ const ACLOwner& owner,
+ const rgw_user& remote_user,
req_info *info,
const rgw_zone_id& source_zone,
const rgw_obj& dest_obj,
@@ -4658,7 +4665,7 @@ int RGWRados::copy_obj(RGWObjectCtx& obj_ctx,
if (remote_src || !source_zone.empty()) {
rgw_zone_set_entry source_trace_entry{source_zone.id, std::nullopt};
const req_context rctx{dpp, y, nullptr};
- return fetch_remote_obj(obj_ctx, user_id, info, source_zone,
+ return fetch_remote_obj(obj_ctx, remote_user, info, source_zone,
dest_obj, src_obj, dest_bucket_info, &src_bucket_info,
dest_placement, src_mtime, mtime, mod_ptr,
unmod_ptr, high_precision_time,
@@ -4735,7 +4742,7 @@ int RGWRados::copy_obj(RGWObjectCtx& obj_ctx,
if (remote_dest) {
/* dest is in a different zonegroup, copy it there */
- return copy_obj_to_remote_dest(dpp, astate, attrs, read_op, user_id, dest_obj, mtime, y);
+ return copy_obj_to_remote_dest(dpp, astate, attrs, read_op, remote_user, dest_obj, mtime, y);
}
uint64_t max_chunk_size;
@@ -4802,7 +4809,7 @@ int RGWRados::copy_obj(RGWObjectCtx& obj_ctx,
if (copy_data) { /* refcounting tail wouldn't work here, just copy the data */
attrs.erase(RGW_ATTR_TAIL_TAG);
- return copy_obj_data(obj_ctx, dest_bucket_info, dest_placement, read_op, obj_size - 1, dest_obj,
+ return copy_obj_data(obj_ctx, owner, dest_bucket_info, dest_placement, read_op, obj_size - 1, dest_obj,
mtime, real_time(), attrs, olh_epoch, delete_at, petag, dpp, y);
}
@@ -4900,7 +4907,8 @@ int RGWRados::copy_obj(RGWObjectCtx& obj_ctx,
write_op.meta.data = &first_chunk;
write_op.meta.manifest = pmanifest;
write_op.meta.ptag = &tag;
- write_op.meta.owner = dest_bucket_info.owner;
+ write_op.meta.owner = owner;
+ write_op.meta.bucket_owner = dest_bucket_info.owner;
write_op.meta.mtime = mtime;
write_op.meta.flags = PUT_OBJ_CREATE;
write_op.meta.category = category;
@@ -4959,6 +4967,7 @@ done_ret:
int RGWRados::copy_obj_data(RGWObjectCtx& obj_ctx,
+ const ACLOwner& owner,
RGWBucketInfo& dest_bucket_info,
const rgw_placement_rule& dest_placement,
RGWRados::Object::Read& read_op, off_t end,
@@ -4980,7 +4989,7 @@ int RGWRados::copy_obj_data(RGWObjectCtx& obj_ctx,
using namespace rgw::putobj;
jspan_context no_trace{false, false};
AtomicObjectProcessor processor(aio.get(), this, dest_bucket_info,
- &dest_placement, dest_bucket_info.owner,
+ &dest_placement, owner,
obj_ctx, dest_obj, olh_epoch, tag, dpp, y, no_trace);
int ret = processor.prepare(y);
if (ret < 0)
@@ -5076,7 +5085,13 @@ int RGWRados::transition_obj(RGWObjectCtx& obj_ctx,
attrs.erase(RGW_ATTR_ID_TAG);
attrs.erase(RGW_ATTR_TAIL_TAG);
+ ACLOwner owner;
+ if (auto i = attrs.find(RGW_ATTR_ACL); i != attrs.end()) {
+ (void) decode_policy(dpp, i->second, &owner);
+ }
+
ret = copy_obj_data(obj_ctx,
+ owner,
bucket_info,
placement_rule,
read_op,
@@ -5696,7 +5711,7 @@ int RGWRados::Object::Delete::delete_obj(optional_yield y, const DoutPrefixProvi
struct rgw_bucket_dir_entry_meta meta;
- meta.owner = params.obj_owner.id.to_str();
+ meta.owner = to_string(params.obj_owner.id);
meta.owner_display_name = params.obj_owner.display_name;
if (real_clock::is_zero(params.mtime)) {
@@ -6556,9 +6571,9 @@ int RGWRados::set_attrs(const DoutPrefixProvider *dpp, RGWObjectCtx* octx, RGWBu
r = rgw_rados_operate(dpp, ioctx, ref.obj.oid, &op, y);
if (state) {
if (r >= 0) {
- bufferlist acl_bl;
+ ACLOwner owner;
if (iter = attrs.find(RGW_ATTR_ACL); iter != attrs.end()) {
- acl_bl = iter->second;
+ (void) decode_policy(dpp, iter->second, &owner);
}
std::string etag;
if (iter = attrs.find(RGW_ATTR_ETAG); iter != attrs.end()) {
@@ -6575,7 +6590,7 @@ int RGWRados::set_attrs(const DoutPrefixProvider *dpp, RGWObjectCtx* octx, RGWBu
uint64_t epoch = ioctx.get_last_version();
int64_t poolid = ioctx.get_id();
r = index_op.complete(dpp, poolid, epoch, state->size, state->accounted_size,
- mtime, etag, content_type, storage_class, &acl_bl,
+ mtime, etag, content_type, storage_class, owner,
RGWObjCategory::Main, nullptr, y);
} else {
int ret = index_op.cancel(dpp, nullptr, y);
@@ -6942,7 +6957,7 @@ int RGWRados::Bucket::UpdateIndex::complete(const DoutPrefixProvider *dpp, int64
uint64_t size, uint64_t accounted_size,
const ceph::real_time& ut, const string& etag,
const string& content_type, const string& storage_class,
- bufferlist *acl_bl,
+ const ACLOwner& owner,
RGWObjCategory category,
list<rgw_obj_index_key> *remove_objs,
optional_yield y,
@@ -6972,14 +6987,7 @@ int RGWRados::Bucket::UpdateIndex::complete(const DoutPrefixProvider *dpp, int64
if (user_data)
ent.meta.user_data = *user_data;
- ACLOwner owner;
- if (acl_bl && acl_bl->length()) {
- int ret = store->decode_policy(dpp, *acl_bl, &owner);
- if (ret < 0) {
- ldpp_dout(dpp, 0) << "WARNING: could not decode policy ret=" << ret << dendl;
- }
- }
- ent.meta.owner = owner.id.to_str();
+ ent.meta.owner = to_string(owner.id);
ent.meta.owner_display_name = owner.display_name;
ent.meta.content_type = content_type;
ent.meta.appendable = appendable;
@@ -10224,7 +10232,7 @@ int RGWRados::check_disk_state(const DoutPrefixProvider *dpp,
object.meta.etag = etag;
object.meta.content_type = content_type;
object.meta.storage_class = storage_class;
- object.meta.owner = owner.id.to_str();
+ object.meta.owner = to_string(owner.id);
object.meta.owner_display_name = owner.display_name;
object.meta.appendable = appendable;
@@ -10254,7 +10262,7 @@ int RGWRados::check_disk_state(const DoutPrefixProvider *dpp,
list_state.tag = astate->obj_tag.c_str();
}
- list_state.meta.owner = owner.id.to_str();
+ list_state.meta.owner = to_string(owner.id);
list_state.meta.owner_display_name = owner.display_name;
list_state.exists = true;
@@ -10378,7 +10386,7 @@ int RGWRados::add_bucket_to_reshard(const DoutPrefixProvider *dpp, const RGWBuck
cls_rgw_reshard_entry entry;
entry.time = real_clock::now();
- entry.tenant = bucket_info.owner.tenant;
+ entry.tenant = bucket_info.bucket.tenant;
entry.bucket_name = bucket_info.bucket.name;
entry.bucket_id = bucket_info.bucket.bucket_id;
entry.old_num_shards = num_source_shards;
@@ -10387,7 +10395,7 @@ int RGWRados::add_bucket_to_reshard(const DoutPrefixProvider *dpp, const RGWBuck
return reshard.add(dpp, entry, y);
}
-int RGWRados::check_quota(const DoutPrefixProvider *dpp, const rgw_user& bucket_owner, rgw_bucket& bucket,
+int RGWRados::check_quota(const DoutPrefixProvider *dpp, const rgw_owner& bucket_owner, rgw_bucket& bucket,
RGWQuota& quota,
uint64_t obj_size, optional_yield y,
bool check_size_only)
diff --git a/src/rgw/driver/rados/rgw_rados.h b/src/rgw/driver/rados/rgw_rados.h
index f05b661b6fd..481a94a140d 100644
--- a/src/rgw/driver/rados/rgw_rados.h
+++ b/src/rgw/driver/rados/rgw_rados.h
@@ -633,7 +633,7 @@ public:
int create_bucket(const DoutPrefixProvider* dpp,
optional_yield y,
const rgw_bucket& bucket,
- const rgw_user& owner,
+ const rgw_owner& owner,
const std::string& zonegroup_id,
const rgw_placement_rule& placement_rule,
const RGWZonePlacementInfo* zone_placement,
@@ -797,7 +797,8 @@ public:
const std::string *ptag;
std::list<rgw_obj_index_key> *remove_objs;
ceph::real_time set_mtime;
- rgw_user owner;
+ rgw_owner bucket_owner; // for quota stats update
+ ACLOwner owner; // owner/owner_display_name for bucket index
RGWObjCategory category;
int flags;
const char *if_match;
@@ -838,7 +839,7 @@ public:
RGWRados::Object *target;
struct DeleteParams {
- rgw_user bucket_owner;
+ rgw_owner bucket_owner; // for quota stats update
int versioning_status; // versioning flags defined in enum RGWBucketFlags
ACLOwner obj_owner; // needed for creation of deletion marker
uint64_t olh_epoch;
@@ -976,7 +977,7 @@ public:
uint64_t accounted_size, const ceph::real_time& ut,
const std::string& etag, const std::string& content_type,
const std::string& storage_class,
- bufferlist *acl_bl, RGWObjCategory category,
+ const ACLOwner& owner, RGWObjCategory category,
std::list<rgw_obj_index_key> *remove_objs,
optional_yield y,
const std::string *user_data = nullptr,
@@ -1071,13 +1072,15 @@ public:
bool swift_versioning_enabled(const RGWBucketInfo& bucket_info) const;
int swift_versioning_copy(RGWObjectCtx& obj_ctx, /* in/out */
- const rgw_user& user, /* in */
+ const ACLOwner& owner, /* in */
+ const rgw_user& remote_user, /* in */
RGWBucketInfo& bucket_info, /* in */
const rgw_obj& obj, /* in */
const DoutPrefixProvider *dpp, /* in */
optional_yield y); /* in */
int swift_versioning_restore(RGWObjectCtx& obj_ctx, /* in/out */
- const rgw_user& user, /* in */
+ const ACLOwner& owner, /* in */
+ const rgw_user& remote_user, /* in */
RGWBucketInfo& bucket_info, /* in */
rgw_obj& obj, /* in/out */
bool& restored, /* out */
@@ -1173,7 +1176,8 @@ public:
* Returns: 0 on success, -ERR# otherwise.
*/
int copy_obj(RGWObjectCtx& obj_ctx,
- const rgw_user& user_id,
+ const ACLOwner& owner, // owner of destination object
+ const rgw_user& remote_user, // uid for fetch_remote_obj() auth
req_info *info,
const rgw_zone_id& source_zone,
const rgw_obj& dest_obj,
@@ -1204,6 +1208,7 @@ public:
jspan_context& trace);
int copy_obj_data(RGWObjectCtx& obj_ctx,
+ const ACLOwner& owner,
RGWBucketInfo& dest_bucket_info,
const rgw_placement_rule& dest_placement,
RGWRados::Object::Read& read_op, off_t end,
@@ -1577,7 +1582,7 @@ public:
int fix_tail_obj_locator(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info,
rgw_obj_key& key, bool fix, bool *need_fix, optional_yield y);
- int check_quota(const DoutPrefixProvider *dpp, const rgw_user& bucket_owner, rgw_bucket& bucket,
+ int check_quota(const DoutPrefixProvider *dpp, const rgw_owner& bucket_owner, rgw_bucket& bucket,
RGWQuota& quota, uint64_t obj_size,
optional_yield y, bool check_size_only = false);
diff --git a/src/rgw/driver/rados/rgw_reshard.cc b/src/rgw/driver/rados/rgw_reshard.cc
index 7a54da7fe7d..c1011bd60a5 100644
--- a/src/rgw/driver/rados/rgw_reshard.cc
+++ b/src/rgw/driver/rados/rgw_reshard.cc
@@ -1056,7 +1056,7 @@ int RGWReshard::update(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucke
cls_rgw_reshard_entry entry;
entry.bucket_name = bucket_info.bucket.name;
entry.bucket_id = bucket_info.bucket.bucket_id;
- entry.tenant = bucket_info.owner.tenant;
+ entry.tenant = bucket_info.bucket.tenant;
int ret = get(dpp, entry);
if (ret < 0) {
diff --git a/src/rgw/driver/rados/rgw_rest_user.cc b/src/rgw/driver/rados/rgw_rest_user.cc
index 58623cd6468..34112c94727 100644
--- a/src/rgw/driver/rados/rgw_rest_user.cc
+++ b/src/rgw/driver/rados/rgw_rest_user.cc
@@ -22,6 +22,7 @@ using namespace std;
int fetch_access_keys_from_master(const DoutPrefixProvider* dpp, req_state* s,
std::map<std::string, RGWAccessKey>& keys,
+ ceph::real_time& create_date,
optional_yield y)
{
bufferlist data;
@@ -36,6 +37,7 @@ int fetch_access_keys_from_master(const DoutPrefixProvider* dpp, req_state* s,
RGWUserInfo ui;
ui.decode_json(&jp);
keys = std::move(ui.access_keys);
+ create_date = ui.create_date;
return 0;
}
@@ -159,6 +161,7 @@ void RGWOp_User_Create::execute(optional_yield y)
bool gen_key;
bool suspended;
bool system;
+ bool account_root = false;
bool exclusive;
int32_t max_buckets;
@@ -181,10 +184,13 @@ void RGWOp_User_Create::execute(optional_yield y)
RESTArgs::get_bool(s, "suspended", false, &suspended);
RESTArgs::get_int32(s, "max-buckets", default_max_buckets, &max_buckets);
RESTArgs::get_bool(s, "system", false, &system);
+ RESTArgs::get_bool(s, "account-root", false, &account_root);
RESTArgs::get_bool(s, "exclusive", false, &exclusive);
RESTArgs::get_string(s, "op-mask", op_mask_str, &op_mask_str);
RESTArgs::get_string(s, "default-placement", default_placement_str, &default_placement_str);
RESTArgs::get_string(s, "placement-tags", placement_tags_str, &placement_tags_str);
+ RESTArgs::get_string(s, "account-id", "", &op_state.account_id);
+ RESTArgs::get_string(s, "path", "", &op_state.path);
if (!s->user->get_info().system && system) {
ldpp_dout(this, 0) << "cannot set system flag by non-system user" << dendl;
@@ -237,6 +243,9 @@ void RGWOp_User_Create::execute(optional_yield y)
if (s->info.args.exists("system"))
op_state.set_system(system);
+ if (s->info.args.exists("account-root"))
+ op_state.set_account_root(account_root);
+
if (s->info.args.exists("exclusive"))
op_state.set_exclusive(exclusive);
@@ -258,7 +267,9 @@ void RGWOp_User_Create::execute(optional_yield y)
}
if (!s->penv.site->is_meta_master()) {
- op_ret = fetch_access_keys_from_master(this, s, op_state.op_access_keys, y);
+ op_state.create_date.emplace();
+ op_ret = fetch_access_keys_from_master(this, s, op_state.op_access_keys,
+ *op_state.create_date, y);
if (op_ret < 0) {
return;
}
@@ -302,6 +313,7 @@ void RGWOp_User_Modify::execute(optional_yield y)
bool gen_key;
bool suspended;
bool system;
+ bool account_root = false;
bool email_set;
bool quota_set;
int32_t max_buckets;
@@ -321,9 +333,12 @@ void RGWOp_User_Modify::execute(optional_yield y)
RESTArgs::get_string(s, "key-type", key_type_str, &key_type_str);
RESTArgs::get_bool(s, "system", false, &system);
+ RESTArgs::get_bool(s, "account-root", false, &account_root);
RESTArgs::get_string(s, "op-mask", op_mask_str, &op_mask_str);
RESTArgs::get_string(s, "default-placement", default_placement_str, &default_placement_str);
RESTArgs::get_string(s, "placement-tags", placement_tags_str, &placement_tags_str);
+ RESTArgs::get_string(s, "account-id", "", &op_state.account_id);
+ RESTArgs::get_string(s, "path", "", &op_state.path);
if (!s->user->get_info().system && system) {
ldpp_dout(this, 0) << "cannot set system flag by non-system user" << dendl;
@@ -373,6 +388,9 @@ void RGWOp_User_Modify::execute(optional_yield y)
if (s->info.args.exists("system"))
op_state.set_system(system);
+ if (s->info.args.exists("account-root"))
+ op_state.set_account_root(account_root);
+
if (!op_mask_str.empty()) {
uint32_t op_mask;
int ret = rgw_parse_op_type_list(op_mask_str, &op_mask);
@@ -402,7 +420,9 @@ void RGWOp_User_Modify::execute(optional_yield y)
}
if (!s->penv.site->is_meta_master()) {
- op_ret = fetch_access_keys_from_master(this, s, op_state.op_access_keys, y);
+ op_state.create_date.emplace();
+ op_ret = fetch_access_keys_from_master(this, s, op_state.op_access_keys,
+ *op_state.create_date, y);
if (op_ret < 0) {
return;
}
diff --git a/src/rgw/driver/rados/rgw_sal_rados.cc b/src/rgw/driver/rados/rgw_sal_rados.cc
index 7239e289b6e..78f76218e84 100644
--- a/src/rgw/driver/rados/rgw_sal_rados.cc
+++ b/src/rgw/driver/rados/rgw_sal_rados.cc
@@ -23,6 +23,7 @@
#include <boost/process.hpp>
#include "common/async/blocked_completion.h"
+#include "include/function2.hpp"
#include "common/Clock.h"
#include "common/errno.h"
@@ -37,12 +38,14 @@
#include "rgw_aio_throttle.h"
#include "rgw_tools.h"
#include "rgw_tracer.h"
+#include "rgw_oidc_provider.h"
#include "rgw_zone.h"
#include "rgw_rest_conn.h"
#include "rgw_service.h"
#include "rgw_lc.h"
#include "rgw_lc_tier.h"
+#include "rgw_mdlog.h"
#include "rgw_rest_admin.h"
#include "rgw_rest_bucket.h"
#include "rgw_rest_metadata.h"
@@ -52,6 +55,7 @@
#include "rgw_rest_realm.h"
#include "rgw_rest_user.h"
#include "services/svc_sys_obj.h"
+#include "services/svc_mdlog.h"
#include "services/svc_meta.h"
#include "services/svc_meta_be_sobj.h"
#include "services/svc_cls.h"
@@ -65,8 +69,15 @@
#include "services/svc_sys_obj_cache.h"
#include "cls/rgw/cls_rgw_client.h"
+#include "account.h"
+#include "buckets.h"
+#include "group.h"
+#include "groups.h"
+#include "roles.h"
+#include "users.h"
#include "rgw_pubsub.h"
#include "topic.h"
+#include "topics.h"
#define dout_subsys ceph_subsys_rgw
@@ -98,29 +109,47 @@ static int drain_aio(std::list<librados::AioCompletion*>& handles)
return ret;
}
-int RadosUser::list_buckets(const DoutPrefixProvider* dpp, const std::string& marker,
- const std::string& end_marker, uint64_t max, bool need_stats,
- BucketList &result, optional_yield y)
+// return the {user}.buckets or {account}.buckets object
+static rgw_raw_obj get_owner_buckets_obj(RGWSI_User* svc_user,
+ RGWSI_Zone* svc_zone,
+ const rgw_owner& owner)
{
- RGWUserBuckets ulist;
- bool is_truncated = false;
+ struct visitor {
+ RGWSI_User* svc_user;
+ RGWSI_Zone* svc_zone;
- int ret = store->ctl()->user->list_buckets(dpp, get_id(), marker, end_marker,
- max, need_stats, &ulist,
- &is_truncated, y);
- if (ret < 0)
- return ret;
+ rgw_raw_obj operator()(const rgw_user& user) {
+ return svc_user->get_buckets_obj(user);
+ }
+ rgw_raw_obj operator()(const rgw_account_id& id) {
+ const RGWZoneParams& zone = svc_zone->get_zone_params();
+ return rgwrados::account::get_buckets_obj(zone, id);
+ }
+ };
+ return std::visit(visitor{svc_user, svc_zone}, owner);
+}
- result.buckets.clear();
+int RadosStore::list_buckets(const DoutPrefixProvider* dpp,
+ const rgw_owner& owner, const std::string& tenant,
+ const std::string& marker, const std::string& end_marker,
+ uint64_t max, bool need_stats,
+ BucketList& listing, optional_yield y)
+{
+ librados::Rados& rados = *getRados()->get_rados_handle();
+ const rgw_raw_obj& obj = get_owner_buckets_obj(svc()->user, svc()->zone, owner);
- for (auto& ent : ulist.get_buckets()) {
- result.buckets.push_back(std::move(ent.second));
+ int ret = rgwrados::buckets::list(dpp, y, rados, obj, tenant,
+ marker, end_marker, max, listing);
+ if (ret < 0) {
+ return ret;
}
- if (is_truncated && !result.buckets.empty()) {
- result.next_marker = result.buckets.back().bucket.name;
- } else {
- result.next_marker.clear();
+ if (need_stats) {
+ ret = ctl()->bucket->read_buckets_stats(listing.buckets, y, dpp);
+ if (ret < 0 && ret != -ENOENT) {
+ ldpp_dout(dpp, 0) << "ERROR: could not get stats for buckets" << dendl;
+ return ret;
+ }
}
return 0;
}
@@ -184,24 +213,6 @@ int RadosUser::merge_and_store_attrs(const DoutPrefixProvider* dpp, Attrs& new_a
return store_user(dpp, y, false);
}
-int RadosUser::read_stats(const DoutPrefixProvider *dpp,
- optional_yield y, RGWStorageStats* stats,
- ceph::real_time* last_stats_sync,
- ceph::real_time* last_stats_update)
-{
- return store->ctl()->user->read_stats(dpp, get_id(), stats, y, last_stats_sync, last_stats_update);
-}
-
-int RadosUser::read_stats_async(const DoutPrefixProvider *dpp, boost::intrusive_ptr<ReadStatsCB> cb)
-{
- return store->svc()->user->read_stats_async(dpp, get_id(), cb);
-}
-
-int RadosUser::complete_flush_stats(const DoutPrefixProvider *dpp, optional_yield y)
-{
- return store->svc()->user->complete_flush_stats(dpp, get_id(), y);
-}
-
int RadosUser::read_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch,
uint32_t max_entries, bool* is_truncated,
RGWUsageIter& usage_iter,
@@ -271,6 +282,39 @@ int RadosUser::verify_mfa(const std::string& mfa_str, bool* verified,
return 0;
}
+int RadosUser::list_groups(const DoutPrefixProvider* dpp, optional_yield y,
+ std::string_view marker, uint32_t max_items,
+ GroupList& listing)
+{
+ RGWSI_SysObj& sysobj = *store->svc()->sysobj;
+ const RGWZoneParams& zone = store->svc()->zone->get_zone_params();
+
+ const auto& ids = info.group_ids;
+ for (auto id = ids.lower_bound(marker); id != ids.end(); ++id) {
+ if (listing.groups.size() >= max_items) {
+ listing.next_marker = *id;
+ return 0;
+ }
+
+ RGWGroupInfo info;
+ Attrs attrs_ignored;
+ ceph::real_time mtime_ignored;
+ RGWObjVersionTracker objv_ignored;
+ int r = rgwrados::group::read(dpp, y, sysobj, zone, *id, info,
+ attrs_ignored, mtime_ignored, objv_ignored);
+ if (r == -ENOENT) {
+ continue;
+ }
+ if (r < 0) {
+ return r;
+ }
+ listing.groups.push_back(std::move(info));
+ }
+
+ listing.next_marker.clear();
+ return 0;
+}
+
RadosBucket::~RadosBucket() {}
int RadosBucket::remove(const DoutPrefixProvider* dpp,
@@ -351,7 +395,8 @@ int RadosBucket::remove(const DoutPrefixProvider* dpp,
}
}
- ret = store->ctl()->bucket->sync_user_stats(dpp, info.owner, info, y, nullptr);
+ librados::Rados& rados = *store->getRados()->get_rados_handle();
+ ret = store->ctl()->bucket->sync_owner_stats(dpp, rados, info.owner, info, y, nullptr);
if (ret < 0) {
ldout(store->ctx(), 1) << "WARNING: failed sync user stats before bucket delete. ret=" << ret << dendl;
}
@@ -369,14 +414,15 @@ int RadosBucket::remove(const DoutPrefixProvider* dpp,
// if bucket has notification definitions associated with it
// they should be removed (note that any pending notifications on the bucket are still going to be sent)
- const RGWPubSub ps(store, info.owner.tenant, *store->svc()->site);
+ const RGWPubSub ps(store, info.bucket.tenant, *store->svc()->site);
const RGWPubSub::Bucket ps_bucket(ps, this);
const auto ps_ret = ps_bucket.remove_notifications(dpp, y);
if (ps_ret < 0 && ps_ret != -ENOENT) {
ldpp_dout(dpp, -1) << "ERROR: unable to remove notifications from bucket. ret=" << ps_ret << dendl;
}
- ret = store->ctl()->bucket->unlink_bucket(info.owner, info.bucket, y, dpp, false);
+ ret = store->ctl()->bucket->unlink_bucket(rados, info.owner,
+ info.bucket, y, dpp, false);
if (ret < 0) {
ldpp_dout(dpp, -1) << "ERROR: unable to remove user bucket information" << dendl;
}
@@ -501,7 +547,7 @@ int RadosBucket::remove_bypass_gc(int concurrent_max, bool
return ret;
}
- sync_user_stats(dpp, y, nullptr);
+ sync_owner_stats(dpp, y, nullptr);
if (ret < 0) {
ldpp_dout(dpp, 1) << "WARNING: failed sync user stats before bucket delete. ret=" << ret << dendl;
}
@@ -565,10 +611,11 @@ int RadosBucket::read_stats_async(const DoutPrefixProvider *dpp,
return store->getRados()->get_bucket_stats_async(dpp, get_info(), idx_layout, shard_id, ctx);
}
-int RadosBucket::sync_user_stats(const DoutPrefixProvider *dpp, optional_yield y,
- RGWBucketEnt* ent)
+int RadosBucket::sync_owner_stats(const DoutPrefixProvider *dpp, optional_yield y,
+ RGWBucketEnt* ent)
{
- return store->ctl()->bucket->sync_user_stats(dpp, info.owner, info, y, ent);
+ librados::Rados& rados = *store->getRados()->get_rados_handle();
+ return store->ctl()->bucket->sync_owner_stats(dpp, rados, info.owner, info, y, ent);
}
int RadosBucket::check_bucket_shards(const DoutPrefixProvider* dpp,
@@ -577,17 +624,19 @@ int RadosBucket::check_bucket_shards(const DoutPrefixProvider* dpp,
return store->getRados()->check_bucket_shards(info, num_objs, dpp, y);
}
-int RadosBucket::link(const DoutPrefixProvider* dpp, const rgw_user& new_user, optional_yield y, bool update_entrypoint, RGWObjVersionTracker* objv)
+int RadosBucket::link(const DoutPrefixProvider* dpp, const rgw_owner& new_owner,
+ optional_yield y, bool update_entrypoint, RGWObjVersionTracker* objv)
{
RGWBucketEntryPoint ep;
ep.bucket = info.bucket;
- ep.owner = new_user;
+ ep.owner = new_owner;
ep.creation_time = get_creation_time();
ep.linked = true;
Attrs ep_attrs;
rgw_ep_info ep_data{ep, ep_attrs};
- int r = store->ctl()->bucket->link_bucket(new_user, info.bucket,
+ librados::Rados& rados = *store->getRados()->get_rados_handle();
+ int r = store->ctl()->bucket->link_bucket(rados, new_owner, info.bucket,
get_creation_time(), y, dpp, update_entrypoint,
&ep_data);
if (r < 0)
@@ -599,20 +648,50 @@ int RadosBucket::link(const DoutPrefixProvider* dpp, const rgw_user& new_user, o
return r;
}
-int RadosBucket::unlink(const DoutPrefixProvider* dpp, const rgw_user& owner, optional_yield y, bool update_entrypoint)
+int RadosBucket::unlink(const DoutPrefixProvider* dpp, const rgw_owner& owner, optional_yield y, bool update_entrypoint)
{
- return store->ctl()->bucket->unlink_bucket(owner, info.bucket, y, dpp, update_entrypoint);
+ librados::Rados& rados = *store->getRados()->get_rados_handle();
+ return store->ctl()->bucket->unlink_bucket(rados, owner, info.bucket,
+ y, dpp, update_entrypoint);
}
-int RadosBucket::chown(const DoutPrefixProvider* dpp, const rgw_user& new_owner, optional_yield y)
+int RadosBucket::chown(const DoutPrefixProvider* dpp, const rgw_owner& new_owner, optional_yield y)
{
- std::string obj_marker;
- int r = this->unlink(dpp, info.owner, y);
+ // unlink from the owner, but don't update the entrypoint until link()
+ int r = this->unlink(dpp, info.owner, y, false);
+ if (r < 0) {
+ return r;
+ }
+
+ r = this->link(dpp, new_owner, y);
if (r < 0) {
return r;
}
- return this->link(dpp, new_owner, y);
+ // write updated owner to bucket instance metadata
+ info.owner = new_owner;
+
+ // update ACLOwner
+ if (auto i = attrs.find(RGW_ATTR_ACL); i != attrs.end()) {
+ try {
+ auto p = i->second.cbegin();
+
+ RGWAccessControlPolicy acl;
+ decode(acl, p);
+
+ acl.get_owner().id = new_owner;
+
+ bufferlist bl;
+ encode(acl, bl);
+
+ i->second = std::move(bl);
+ } catch (const buffer::error&) {
+ // not fatal
+ }
+ }
+
+ constexpr bool exclusive = false;
+ return put_info(dpp, exclusive, ceph::real_clock::now(), y);
}
int RadosBucket::put_info(const DoutPrefixProvider* dpp, bool exclusive, ceph::real_time _mtime, optional_yield y)
@@ -652,14 +731,22 @@ int RadosBucket::read_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch,
RGWUsageIter& usage_iter,
map<rgw_user_bucket, rgw_usage_log_entry>& usage)
{
- return store->getRados()->read_usage(dpp, info.owner, get_name(), start_epoch,
+ const rgw_user* user = std::get_if<rgw_user>(&info.owner);
+ if (!user) {
+ return -ENOTSUP; // not supported for account owners
+ }
+ return store->getRados()->read_usage(dpp, *user, get_name(), start_epoch,
end_epoch, max_entries, is_truncated,
usage_iter, usage);
}
int RadosBucket::trim_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch, optional_yield y)
{
- return store->getRados()->trim_usage(dpp, info.owner, get_name(), start_epoch, end_epoch, y);
+ const rgw_user* user = std::get_if<rgw_user>(&info.owner);
+ if (!user) {
+ return -ENOTSUP; // not supported for account owners
+ }
+ return store->getRados()->trim_usage(dpp, *user, get_name(), start_epoch, end_epoch, y);
}
int RadosBucket::remove_objs_from_index(const DoutPrefixProvider *dpp, std::list<rgw_obj_index_key>& objs_to_unlink)
@@ -947,8 +1034,12 @@ int RadosStore::get_user_by_access_key(const DoutPrefixProvider* dpp, const std:
RGWUserInfo uinfo;
User* u;
RGWObjVersionTracker objv_tracker;
+ Attrs attrs;
- int r = ctl()->user->get_info_by_access_key(dpp, key, &uinfo, y, RGWUserCtl::GetParams().set_objv_tracker(&objv_tracker));
+ int r = ctl()->user->get_info_by_access_key(
+ dpp, key, &uinfo, y,
+ RGWUserCtl::GetParams().set_objv_tracker(&objv_tracker)
+ .set_attrs(&attrs));
if (r < 0)
return r;
@@ -957,6 +1048,7 @@ int RadosStore::get_user_by_access_key(const DoutPrefixProvider* dpp, const std:
return -ENOMEM;
u->get_version_tracker() = objv_tracker;
+ u->get_attrs() = std::move(attrs);
user->reset(u);
return 0;
@@ -967,8 +1059,12 @@ int RadosStore::get_user_by_email(const DoutPrefixProvider* dpp, const std::stri
RGWUserInfo uinfo;
User* u;
RGWObjVersionTracker objv_tracker;
+ Attrs attrs;
- int r = ctl()->user->get_info_by_email(dpp, email, &uinfo, y, RGWUserCtl::GetParams().set_objv_tracker(&objv_tracker));
+ int r = ctl()->user->get_info_by_email(
+ dpp, email, &uinfo, y,
+ RGWUserCtl::GetParams().set_objv_tracker(&objv_tracker)
+ .set_attrs(&attrs));
if (r < 0)
return r;
@@ -977,6 +1073,7 @@ int RadosStore::get_user_by_email(const DoutPrefixProvider* dpp, const std::stri
return -ENOMEM;
u->get_version_tracker() = objv_tracker;
+ u->get_attrs() = std::move(attrs);
user->reset(u);
return 0;
@@ -987,8 +1084,12 @@ int RadosStore::get_user_by_swift(const DoutPrefixProvider* dpp, const std::stri
RGWUserInfo uinfo;
User* u;
RGWObjVersionTracker objv_tracker;
+ Attrs attrs;
- int r = ctl()->user->get_info_by_swift(dpp, user_str, &uinfo, y, RGWUserCtl::GetParams().set_objv_tracker(&objv_tracker));
+ int r = ctl()->user->get_info_by_swift(
+ dpp, user_str, &uinfo, y,
+ RGWUserCtl::GetParams().set_objv_tracker(&objv_tracker)
+ .set_attrs(&attrs));
if (r < 0)
return r;
@@ -997,11 +1098,433 @@ int RadosStore::get_user_by_swift(const DoutPrefixProvider* dpp, const std::stri
return -ENOMEM;
u->get_version_tracker() = objv_tracker;
+ u->get_attrs() = std::move(attrs);
user->reset(u);
return 0;
}
+int RadosStore::load_account_by_id(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view id,
+ RGWAccountInfo& info,
+ Attrs& attrs,
+ RGWObjVersionTracker& objv)
+{
+ ceph::real_time mtime; // ignored
+ return rgwrados::account::read(
+ dpp, y, *svc()->sysobj,
+ svc()->zone->get_zone_params(),
+ id, info, attrs, mtime, objv);
+}
+
+int RadosStore::load_account_by_name(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view tenant,
+ std::string_view name,
+ RGWAccountInfo& info,
+ Attrs& attrs,
+ RGWObjVersionTracker& objv)
+{
+ return rgwrados::account::read_by_name(
+ dpp, y, *svc()->sysobj,
+ svc()->zone->get_zone_params(),
+ tenant, name, info, attrs, objv);
+}
+
+int RadosStore::load_account_by_email(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view email,
+ RGWAccountInfo& info,
+ Attrs& attrs,
+ RGWObjVersionTracker& objv)
+{
+ return rgwrados::account::read_by_email(
+ dpp, y, *svc()->sysobj,
+ svc()->zone->get_zone_params(),
+ email, info, attrs, objv);
+}
+
+static int write_mdlog_entry(const DoutPrefixProvider* dpp, optional_yield y,
+ RGWSI_MDLog& mdlog_svc,
+ const std::string& section,
+ const std::string& key,
+ const RGWObjVersionTracker& objv)
+{
+ RGWMetadataLogData entry;
+ entry.read_version = objv.read_version;
+ entry.write_version = objv.write_version;
+ entry.status = MDLOG_STATUS_COMPLETE;
+
+ bufferlist bl;
+ encode(entry, bl);
+
+ const std::string hash_key = fmt::format("{}:{}", section, key);
+ return mdlog_svc.add_entry(dpp, hash_key, section, key, bl, y);
+}
+
+int RadosStore::store_account(const DoutPrefixProvider* dpp,
+ optional_yield y, bool exclusive,
+ const RGWAccountInfo& info,
+ const RGWAccountInfo* old_info,
+ const Attrs& attrs,
+ RGWObjVersionTracker& objv)
+{
+ ceph::real_time mtime = ceph::real_clock::now();
+ int r = rgwrados::account::write(
+ dpp, y, *svc()->sysobj, svc()->zone->get_zone_params(),
+ info, old_info, attrs, mtime, exclusive, objv);
+ if (r < 0) {
+ return r;
+ }
+
+ return write_mdlog_entry(dpp, y, *svc()->mdlog, "account", info.id, objv);
+}
+
+int RadosStore::delete_account(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ const RGWAccountInfo& info,
+ RGWObjVersionTracker& objv)
+{
+ int r = rgwrados::account::remove(
+ dpp, y, *svc()->sysobj,
+ svc()->zone->get_zone_params(),
+ info, objv);
+ if (r < 0) {
+ return r;
+ }
+
+ return write_mdlog_entry(dpp, y, *svc()->mdlog, "account", info.id, objv);
+}
+
+int RadosStore::load_stats(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ const rgw_owner& owner,
+ RGWStorageStats& stats,
+ ceph::real_time& last_synced,
+ ceph::real_time& last_updated)
+{
+ librados::Rados& rados = *getRados()->get_rados_handle();
+ const rgw_raw_obj& obj = get_owner_buckets_obj(svc()->user, svc()->zone, owner);
+ return rgwrados::buckets::read_stats(dpp, y, rados, obj, stats,
+ &last_synced, &last_updated);
+}
+
+int RadosStore::load_stats_async(const DoutPrefixProvider* dpp,
+ const rgw_owner& owner,
+ boost::intrusive_ptr<ReadStatsCB> cb)
+{
+ librados::Rados& rados = *getRados()->get_rados_handle();
+ const rgw_raw_obj& obj = get_owner_buckets_obj(svc()->user, svc()->zone, owner);
+ return rgwrados::buckets::read_stats_async(dpp, rados, obj, std::move(cb));
+}
+
+int RadosStore::reset_stats(const DoutPrefixProvider *dpp,
+ optional_yield y,
+ const rgw_owner& owner)
+{
+ librados::Rados& rados = *getRados()->get_rados_handle();
+ const rgw_raw_obj& obj = get_owner_buckets_obj(svc()->user, svc()->zone, owner);
+ return rgwrados::buckets::reset_stats(dpp, y, rados, obj);
+}
+
+int RadosStore::complete_flush_stats(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ const rgw_owner& owner)
+{
+ librados::Rados& rados = *getRados()->get_rados_handle();
+ const rgw_raw_obj& obj = get_owner_buckets_obj(svc()->user, svc()->zone, owner);
+ return rgwrados::buckets::complete_flush_stats(dpp, y, rados, obj);
+}
+
+int RadosStore::load_owner_by_email(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view email,
+ rgw_owner& owner)
+{
+ // the email index stores ids which can either be a user or account
+ RGWUID uid;
+ int r = svc()->user->read_email_index(dpp, y, email, uid);
+ if (r < 0) {
+ return r;
+ }
+ owner = parse_owner(uid.id);
+ return 0;
+}
+
+int RadosStore::count_account_roles(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view account_id,
+ uint32_t& count)
+{
+ librados::Rados& rados = *getRados()->get_rados_handle();
+ const RGWZoneParams& zone = svc()->zone->get_zone_params();
+ const rgw_raw_obj& obj = rgwrados::account::get_roles_obj(zone, account_id);
+ return rgwrados::account::resource_count(dpp, y, rados, obj, count);
+}
+
+int RadosStore::list_account_roles(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view account_id,
+ std::string_view path_prefix,
+ std::string_view marker,
+ uint32_t max_items,
+ RoleList& listing)
+{
+ // fetch the list of role ids from cls_role
+ librados::Rados& rados = *getRados()->get_rados_handle();
+ const RGWZoneParams& zone = svc()->zone->get_zone_params();
+ const rgw_raw_obj& obj = rgwrados::account::get_roles_obj(zone, account_id);
+ std::vector<std::string> ids;
+ int r = rgwrados::roles::list(dpp, y, rados, obj, marker, path_prefix,
+ max_items, ids, listing.next_marker);
+ if (r < 0) {
+ return r;
+ }
+
+ // load the role metadata for each
+ for (const auto& id : ids) {
+ std::unique_ptr<rgw::sal::RGWRole> role = get_role(id);
+ r = role->read_info(dpp, y);
+ if (r == -ENOENT) {
+ continue;
+ }
+ if (r < 0) {
+ return r;
+ }
+ listing.roles.push_back(std::move(role->get_info()));
+ }
+
+ return 0;
+}
+
+int RadosStore::load_account_user_by_name(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view account_id,
+ std::string_view tenant,
+ std::string_view username,
+ std::unique_ptr<User>* user)
+{
+ rgw_user uid;
+ uid.tenant = tenant;
+
+ librados::Rados& rados = *getRados()->get_rados_handle();
+ const RGWZoneParams& zone = svc()->zone->get_zone_params();
+ const rgw_raw_obj& obj = rgwrados::account::get_users_obj(zone, account_id);
+ int r = rgwrados::users::get(dpp, y, rados, obj, username, uid.id);
+ if (r < 0) {
+ ldpp_dout(dpp, 20) << "failed to find account username " << username
+ << ": " << cpp_strerror(r) << dendl;
+ return r;
+ }
+
+ std::unique_ptr<User> u = get_user(uid);
+ r = u->load_user(dpp, y);
+ if (r < 0) {
+ ldpp_dout(dpp, 20) << "failed to load account user " << uid
+ << ": " << cpp_strerror(r) << dendl;
+ return r;
+ }
+ *user = std::move(u);
+ return 0;
+}
+
+int RadosStore::count_account_users(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view account_id,
+ uint32_t& count)
+{
+ librados::Rados& rados = *getRados()->get_rados_handle();
+ const RGWZoneParams& zone = svc()->zone->get_zone_params();
+ const rgw_raw_obj& obj = rgwrados::account::get_users_obj(zone, account_id);
+ return rgwrados::account::resource_count(dpp, y, rados, obj, count);
+}
+
+int RadosStore::list_account_users(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view account_id,
+ std::string_view tenant,
+ std::string_view path_prefix,
+ std::string_view marker,
+ uint32_t max_items,
+ UserList& listing)
+{
+ // fetch the list of user ids from cls_user
+ librados::Rados& rados = *getRados()->get_rados_handle();
+ const RGWZoneParams& zone = svc()->zone->get_zone_params();
+ const rgw_raw_obj& obj = rgwrados::account::get_users_obj(zone, account_id);
+ std::vector<std::string> ids;
+ int r = rgwrados::users::list(dpp, y, rados, obj, marker, path_prefix,
+ max_items, ids, listing.next_marker);
+ if (r < 0) {
+ return r;
+ }
+
+ // load the user metadata for each
+ for (auto& id : ids) {
+ rgw_user uid;
+ uid.tenant = tenant;
+ uid.id = std::move(id);
+
+ RGWUserInfo info;
+ r = ctl()->user->get_info_by_uid(dpp, uid, &info, y);
+ if (r == -ENOENT) {
+ continue;
+ }
+ if (r < 0) {
+ return r;
+ }
+ listing.users.push_back(std::move(info));
+ }
+
+ return 0;
+}
+
+int RadosStore::load_group_by_id(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view id,
+ RGWGroupInfo& info, Attrs& attrs,
+ RGWObjVersionTracker& objv)
+{
+ const RGWZoneParams& zone = svc()->zone->get_zone_params();
+ ceph::real_time mtime_ignored;
+ return rgwrados::group::read(dpp, y, *svc()->sysobj, zone, id,
+ info, attrs, mtime_ignored, objv);
+}
+
+int RadosStore::load_group_by_name(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view account_id,
+ std::string_view name,
+ RGWGroupInfo& info, Attrs& attrs,
+ RGWObjVersionTracker& objv)
+{
+ const RGWZoneParams& zone = svc()->zone->get_zone_params();
+ return rgwrados::group::read_by_name(dpp, y, *svc()->sysobj, zone, account_id,
+ name, info, attrs, objv);
+}
+
+int RadosStore::store_group(const DoutPrefixProvider* dpp, optional_yield y,
+ const RGWGroupInfo& info, const Attrs& attrs,
+ RGWObjVersionTracker& objv, bool exclusive,
+ const RGWGroupInfo* old_info)
+{
+ librados::Rados& rados = *getRados()->get_rados_handle();
+ const RGWZoneParams& zone = svc()->zone->get_zone_params();
+ ceph::real_time mtime = ceph::real_clock::now();
+ int r = rgwrados::group::write(dpp, y, *svc()->sysobj, rados, zone, info,
+ old_info, attrs, mtime, exclusive, objv);
+ if (r < 0) {
+ return r;
+ }
+
+ return write_mdlog_entry(dpp, y, *svc()->mdlog, "group", info.id, objv);
+}
+
+int RadosStore::remove_group(const DoutPrefixProvider* dpp, optional_yield y,
+ const RGWGroupInfo& info,
+ RGWObjVersionTracker& objv)
+{
+ librados::Rados& rados = *getRados()->get_rados_handle();
+ const RGWZoneParams& zone = svc()->zone->get_zone_params();
+ int r = rgwrados::group::remove(dpp, y, *svc()->sysobj, rados, zone, info, objv);
+ if (r < 0) {
+ return r;
+ }
+
+ return write_mdlog_entry(dpp, y, *svc()->mdlog, "group", info.id, objv);
+}
+
+int RadosStore::list_group_users(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view tenant,
+ std::string_view id,
+ std::string_view marker,
+ uint32_t max_items,
+ UserList& listing)
+{
+ // fetch the list of user ids from cls_user
+ librados::Rados& rados = *getRados()->get_rados_handle();
+ const RGWZoneParams& zone = svc()->zone->get_zone_params();
+ const rgw_raw_obj& obj = rgwrados::group::get_users_obj(zone, id);
+ const std::string path_prefix; // empty
+ std::vector<std::string> ids;
+ int r = rgwrados::users::list(dpp, y, rados, obj, marker, path_prefix,
+ max_items, ids, listing.next_marker);
+ if (r < 0) {
+ return r;
+ }
+
+ // load the user metadata for each
+ for (auto& id : ids) {
+ rgw_user uid;
+ uid.tenant = tenant;
+ uid.id = std::move(id);
+
+ RGWUserInfo info;
+ r = ctl()->user->get_info_by_uid(dpp, uid, &info, y);
+ if (r == -ENOENT) {
+ continue;
+ }
+ if (r < 0) {
+ return r;
+ }
+ listing.users.push_back(std::move(info));
+ }
+
+ return 0;
+}
+
+int RadosStore::count_account_groups(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view account_id,
+ uint32_t& count)
+{
+ librados::Rados& rados = *getRados()->get_rados_handle();
+ const RGWZoneParams& zone = svc()->zone->get_zone_params();
+ const rgw_raw_obj& obj = rgwrados::account::get_groups_obj(zone, account_id);
+ return rgwrados::account::resource_count(dpp, y, rados, obj, count);
+}
+
+int RadosStore::list_account_groups(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view account_id,
+ std::string_view path_prefix,
+ std::string_view marker,
+ uint32_t max_items,
+ GroupList& listing)
+{
+ // fetch the list of group ids from cls_user
+ librados::Rados& rados = *getRados()->get_rados_handle();
+ const RGWZoneParams& zone = svc()->zone->get_zone_params();
+ const rgw_raw_obj& obj = rgwrados::account::get_groups_obj(zone, account_id);
+ std::vector<std::string> ids;
+ int r = rgwrados::groups::list(dpp, y, rados, obj, marker, path_prefix,
+ max_items, ids, listing.next_marker);
+ if (r < 0) {
+ return r;
+ }
+
+ // load the group metadata for each
+ for (auto& id : ids) {
+ RGWGroupInfo info;
+ Attrs attrs;
+ ceph::real_time mtime_ignored;
+ RGWObjVersionTracker objv;
+ r = rgwrados::group::read(dpp, y, *svc()->sysobj, zone, id,
+ info, attrs, mtime_ignored, objv);
+ if (r == -ENOENT) {
+ continue;
+ }
+ if (r < 0) {
+ return r;
+ }
+ listing.groups.push_back(std::move(info));
+ }
+
+ return 0;
+}
+
std::unique_ptr<Object> RadosStore::get_object(const rgw_obj_key& k)
{
return std::make_unique<RadosObject>(this, k);
@@ -1171,9 +1694,10 @@ int RadosStore::write_topic_v2(const rgw_pubsub_topic& topic, bool exclusive,
optional_yield y,
const DoutPrefixProvider* dpp)
{
+ librados::Rados& rados = *getRados()->get_rados_handle();
const RGWZoneParams& zone = svc()->zone->get_zone_params();
- return rgwrados::topic::write(dpp, y, *svc()->sysobj, svc()->mdlog, zone,
- topic, objv_tracker, {}, exclusive);
+ return rgwrados::topic::write(dpp, y, *svc()->sysobj, svc()->mdlog, rados,
+ zone, topic, objv_tracker, {}, exclusive);
}
int RadosStore::remove_topic_v2(const std::string& topic_name,
@@ -1182,10 +1706,24 @@ int RadosStore::remove_topic_v2(const std::string& topic_name,
optional_yield y,
const DoutPrefixProvider* dpp)
{
+ librados::Rados& rados = *getRados()->get_rados_handle();
const RGWZoneParams& zone = svc()->zone->get_zone_params();
- const std::string key = get_topic_metadata_key(tenant, topic_name);
return rgwrados::topic::remove(dpp, y, *svc()->sysobj, svc()->mdlog,
- zone, key, objv_tracker);
+ rados, zone, tenant, topic_name, objv_tracker);
+}
+
+int RadosStore::list_account_topics(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view account_id,
+ std::string_view marker,
+ uint32_t max_items,
+ TopicList& listing)
+{
+ librados::Rados& rados = *getRados()->get_rados_handle();
+ const RGWZoneParams& zone = svc()->zone->get_zone_params();
+ const rgw_raw_obj& obj = rgwrados::account::get_topics_obj(zone, account_id);
+ return rgwrados::topics::list(dpp, y, rados, obj, marker, max_items,
+ listing.topics, listing.next_marker);
}
int RadosStore::remove_bucket_mapping_from_topics(
@@ -1217,7 +1755,7 @@ int RadosStore::update_bucket_topic_mapping(const rgw_pubsub_topic& topic,
const DoutPrefixProvider* dpp) {
librados::Rados& rados = *getRados()->get_rados_handle();
const RGWZoneParams& zone = svc()->zone->get_zone_params();
- const std::string key = get_topic_metadata_key(topic.user.tenant, topic.name);
+ const std::string key = get_topic_metadata_key(topic);
int ret = 0;
if (add_mapping) {
ret = rgwrados::topic::link_bucket(dpp, y, rados, zone, key, bucket_key);
@@ -1243,7 +1781,7 @@ int RadosStore::get_bucket_topic_mapping(const rgw_pubsub_topic& topic,
{
librados::Rados& rados = *getRados()->get_rados_handle();
const RGWZoneParams& zone = svc()->zone->get_zone_params();
- const std::string key = get_topic_metadata_key(topic.user.tenant, topic.name);
+ const std::string key = get_topic_metadata_key(topic);
constexpr int max_chunk = 1024;
std::string marker;
@@ -1432,12 +1970,14 @@ std::unique_ptr<LuaManager> RadosStore::get_lua_manager(const std::string& luaro
std::unique_ptr<RGWRole> RadosStore::get_role(std::string name,
std::string tenant,
+ rgw_account_id account_id,
std::string path,
std::string trust_policy,
+ std::string description,
std::string max_session_duration_str,
std::multimap<std::string,std::string> tags)
{
- return std::make_unique<RadosRole>(this, name, tenant, path, trust_policy, max_session_duration_str, tags);
+ return std::make_unique<RadosRole>(this, name, tenant, std::move(account_id), path, trust_policy, std::move(description), max_session_duration_str, tags);
}
std::unique_ptr<RGWRole> RadosStore::get_role(std::string id)
@@ -1450,13 +1990,17 @@ std::unique_ptr<RGWRole> RadosStore::get_role(const RGWRoleInfo& info)
return std::make_unique<RadosRole>(this, info);
}
-int RadosStore::get_roles(const DoutPrefixProvider *dpp,
- optional_yield y,
- const std::string& path_prefix,
- const std::string& tenant,
- vector<std::unique_ptr<RGWRole>>& roles)
+int RadosStore::list_roles(const DoutPrefixProvider *dpp,
+ optional_yield y,
+ const std::string& tenant,
+ const std::string& path_prefix,
+ const std::string& marker,
+ uint32_t max_items,
+ RoleList& listing)
{
- auto pool = svc()->zone->get_zone_params().roles_pool;
+ listing.roles.clear();
+
+ const auto& pool = svc()->zone->get_zone_params().roles_pool;
std::string prefix;
// List all roles if path prefix is empty
@@ -1467,59 +2011,132 @@ int RadosStore::get_roles(const DoutPrefixProvider *dpp,
}
//Get the filtered objects
- list<std::string> result;
- bool is_truncated;
RGWListRawObjsCtx ctx;
- do {
- list<std::string> oids;
- int r = rados->list_raw_objects(dpp, pool, prefix, 1000, ctx, oids, &is_truncated);
- if (r < 0) {
- ldpp_dout(dpp, 0) << "ERROR: listing filtered objects failed: "
- << prefix << ": " << cpp_strerror(-r) << dendl;
- return r;
- }
- for (const auto& iter : oids) {
- result.push_back(iter.substr(RGWRole::role_path_oid_prefix.size()));
- }
- } while (is_truncated);
+ int r = rados->list_raw_objects_init(dpp, pool, marker, &ctx);
+ if (r < 0) {
+ return r;
+ }
+
+ bool is_truncated = false;
+ list<std::string> oids;
+ r = rados->list_raw_objects(dpp, pool, prefix, max_items,
+ ctx, oids, &is_truncated);
+ if (r == -ENOENT) {
+ r = 0;
+ } else if (r < 0) {
+ return r;
+ }
+
+ for (const auto& oid : oids) {
+ const std::string key = oid.substr(RGWRole::role_path_oid_prefix.size());
- for (const auto& it : result) {
//Find the role oid prefix from the end
- size_t pos = it.rfind(RGWRole::role_oid_prefix);
+ size_t pos = key.rfind(RGWRole::role_oid_prefix);
if (pos == std::string::npos) {
- continue;
+ continue;
}
// Split the result into path and info_oid + id
- std::string path = it.substr(0, pos);
+ std::string path = key.substr(0, pos);
/*Make sure that prefix is part of path (False results could've been returned)
because of the role info oid + id appended to the path)*/
if(path_prefix.empty() || path.find(path_prefix) != std::string::npos) {
//Get id from info oid prefix + id
- std::string id = it.substr(pos + RGWRole::role_oid_prefix.length());
+ std::string id = key.substr(pos + RGWRole::role_oid_prefix.length());
std::unique_ptr<rgw::sal::RGWRole> role = get_role(id);
- int ret = role->read_info(dpp, y);
- if (ret < 0) {
- return ret;
+ r = role->read_info(dpp, y);
+ if (r < 0) {
+ return r;
}
- roles.push_back(std::move(role));
+ listing.roles.push_back(std::move(role->get_info()));
}
}
+ if (is_truncated) {
+ listing.next_marker = rados->list_raw_objs_get_cursor(ctx);
+ } else {
+ listing.next_marker.clear();
+ }
return 0;
}
-std::unique_ptr<RGWOIDCProvider> RadosStore::get_oidc_provider()
+static constexpr std::string_view oidc_url_oid_prefix = "oidc_url.";
+
+static std::string oidc_provider_oid(std::string_view account,
+ std::string_view prefix,
+ std::string_view url)
{
- return std::make_unique<RadosOIDCProvider>(this);
+ return string_cat_reserve(account, prefix, url);
}
-int RadosStore::get_oidc_providers(const DoutPrefixProvider *dpp,
- const std::string& tenant,
- vector<std::unique_ptr<RGWOIDCProvider>>& providers, optional_yield y)
+int RadosStore::store_oidc_provider(const DoutPrefixProvider *dpp,
+ optional_yield y,
+ const RGWOIDCProviderInfo& info,
+ bool exclusive)
{
- std::string prefix = tenant + RGWOIDCProvider::oidc_url_oid_prefix;
+ auto sysobj = svc()->sysobj;
+ std::string oid = oidc_provider_oid(info.tenant, oidc_url_oid_prefix,
+ url_remove_prefix(info.provider_url));
+
+ // TODO: add support for oidc metadata sync
+ bufferlist bl;
+ using ceph::encode;
+ encode(info, bl);
+ return rgw_put_system_obj(dpp, sysobj, svc()->zone->get_zone_params().oidc_pool, oid, bl, exclusive, nullptr, real_time(), y);
+}
+
+int RadosStore::load_oidc_provider(const DoutPrefixProvider *dpp,
+ optional_yield y,
+ std::string_view account,
+ std::string_view url,
+ RGWOIDCProviderInfo& info)
+{
+ auto sysobj = svc()->sysobj;
+ auto& pool = svc()->zone->get_zone_params().oidc_pool;
+ std::string oid = oidc_provider_oid(account, oidc_url_oid_prefix, url);
+ bufferlist bl;
+
+ int ret = rgw_get_system_obj(sysobj, pool, oid, bl, nullptr, nullptr, y, dpp);
+ if (ret < 0) {
+ return ret;
+ }
+
+ try {
+ using ceph::decode;
+ auto iter = bl.cbegin();
+ decode(info, iter);
+ } catch (buffer::error& err) {
+ ldpp_dout(dpp, 0) << "ERROR: failed to decode oidc provider info from pool: " << pool.name <<
+ ": " << url << dendl;
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int RadosStore::delete_oidc_provider(const DoutPrefixProvider *dpp,
+ optional_yield y,
+ std::string_view account,
+ std::string_view url)
+{
+ auto& pool = svc()->zone->get_zone_params().oidc_pool;
+ std::string oid = oidc_provider_oid(account, oidc_url_oid_prefix, url);
+ int ret = rgw_delete_system_obj(dpp, svc()->sysobj, pool, oid, nullptr, y);
+ if (ret < 0) {
+ ldpp_dout(dpp, 0) << "ERROR: deleting oidc url from pool: " << pool.name << ": "
+ << url << ": " << cpp_strerror(-ret) << dendl;
+ }
+
+ return ret;
+}
+
+int RadosStore::get_oidc_providers(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view tenant,
+ vector<RGWOIDCProviderInfo>& providers)
+{
+ std::string prefix = string_cat_reserve(tenant, oidc_url_oid_prefix);
auto pool = svc()->zone->get_zone_params().oidc_pool;
//Get the filtered objects
@@ -1529,31 +2146,33 @@ int RadosStore::get_oidc_providers(const DoutPrefixProvider *dpp,
do {
list<std::string> oids;
int r = rados->list_raw_objects(dpp, pool, prefix, 1000, ctx, oids, &is_truncated);
+ if (r == -ENOENT) {
+ return 0;
+ }
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: listing filtered objects failed: OIDC pool: "
<< pool.name << ": " << prefix << ": " << cpp_strerror(-r) << dendl;
return r;
}
for (const auto& iter : oids) {
- std::unique_ptr<rgw::sal::RGWOIDCProvider> provider = get_oidc_provider();
bufferlist bl;
-
r = rgw_get_system_obj(svc()->sysobj, pool, iter, bl, nullptr, nullptr, y, dpp);
if (r < 0) {
return r;
}
+ RGWOIDCProviderInfo info;
try {
using ceph::decode;
auto iter = bl.cbegin();
- decode(*provider, iter);
+ decode(info, iter);
} catch (buffer::error& err) {
ldpp_dout(dpp, 0) << "ERROR: failed to decode oidc provider info from pool: "
<< pool.name << ": " << iter << dendl;
return -EIO;
}
- providers.push_back(std::move(provider));
+ providers.push_back(std::move(info));
}
} while (is_truncated);
@@ -1563,7 +2182,7 @@ int RadosStore::get_oidc_providers(const DoutPrefixProvider *dpp,
std::unique_ptr<Writer> RadosStore::get_append_writer(const DoutPrefixProvider *dpp,
optional_yield y,
rgw::sal::Object* obj,
- const rgw_user& owner,
+ const ACLOwner& owner,
const rgw_placement_rule *ptail_placement_rule,
const std::string& unique_tag,
uint64_t position,
@@ -1583,7 +2202,7 @@ std::unique_ptr<Writer> RadosStore::get_append_writer(const DoutPrefixProvider *
std::unique_ptr<Writer> RadosStore::get_atomic_writer(const DoutPrefixProvider *dpp,
optional_yield y,
rgw::sal::Object* obj,
- const rgw_user& owner,
+ const ACLOwner& owner,
const rgw_placement_rule *ptail_placement_rule,
uint64_t olh_epoch,
const std::string& unique_tag)
@@ -2155,7 +2774,7 @@ RadosObject::RadosDeleteOp::RadosDeleteOp(RadosObject *_source) :
int RadosObject::RadosDeleteOp::delete_obj(const DoutPrefixProvider* dpp, optional_yield y, uint32_t flags)
{
- parent_op.params.bucket_owner = params.bucket_owner.id;
+ parent_op.params.bucket_owner = params.bucket_owner;
parent_op.params.versioning_status = params.versioning_status;
parent_op.params.obj_owner = params.obj_owner;
parent_op.params.olh_epoch = params.olh_epoch;
@@ -2194,7 +2813,8 @@ int RadosObject::delete_object(const DoutPrefixProvider* dpp,
return del_op.delete_obj(y, dpp, flags & FLAG_LOG_OP);
}
-int RadosObject::copy_object(User* user,
+int RadosObject::copy_object(const ACLOwner& owner,
+ const rgw_user& remote_user,
req_info* info,
const rgw_zone_id& source_zone,
rgw::sal::Object* dest_object,
@@ -2223,7 +2843,8 @@ int RadosObject::copy_object(User* user,
optional_yield y)
{
return store->getRados()->copy_obj(*rados_ctx,
- user->get_id(),
+ owner,
+ remote_user,
info,
source_zone,
dest_object->get_obj(),
@@ -2259,22 +2880,23 @@ int RadosObject::RadosReadOp::iterate(const DoutPrefixProvider* dpp, int64_t ofs
return parent_op.iterate(dpp, ofs, end, cb, y);
}
-int RadosObject::swift_versioning_restore(bool& restored,
+int RadosObject::swift_versioning_restore(const ACLOwner& owner, const rgw_user& remote_user, bool& restored,
const DoutPrefixProvider* dpp, optional_yield y)
{
rgw_obj obj = get_obj();
return store->getRados()->swift_versioning_restore(*rados_ctx,
- bucket->get_owner(),
+ owner, remote_user,
bucket->get_info(),
obj,
restored,
dpp, y);
}
-int RadosObject::swift_versioning_copy(const DoutPrefixProvider* dpp, optional_yield y)
+int RadosObject::swift_versioning_copy(const ACLOwner& owner, const rgw_user& remote_user,
+ const DoutPrefixProvider* dpp, optional_yield y)
{
return store->getRados()->swift_versioning_copy(*rados_ctx,
- bucket->get_info().owner,
+ owner, remote_user,
bucket->get_info(),
get_obj(),
dpp,
@@ -2391,7 +3013,7 @@ int RadosMultipartUpload::abort(const DoutPrefixProvider *dpp, CephContext *cct,
}
std::unique_ptr<rgw::sal::Object::DeleteOp> del_op = meta_obj->get_delete_op();
- del_op->params.bucket_owner.id = bucket->get_info().owner;
+ del_op->params.bucket_owner = bucket->get_info().owner;
del_op->params.versioning_status = 0;
if (!remove_objs.empty()) {
del_op->params.remove_objs = &remove_objs;
@@ -2437,13 +3059,16 @@ int RadosMultipartUpload::init(const DoutPrefixProvider *dpp, optional_yield y,
obj->set_in_extra_data(true);
obj->set_hash_source(oid);
- RGWRados::Object op_target(store->getRados(),
- obj->get_bucket()->get_info(),
+
+ const RGWBucketInfo& bucket_info = obj->get_bucket()->get_info();
+
+ RGWRados::Object op_target(store->getRados(), bucket_info,
obj_ctx, obj->get_obj());
RGWRados::Object::Write obj_op(&op_target);
op_target.set_versioning_disabled(true); /* no versioning for multipart meta */
- obj_op.meta.owner = owner.id;
+ obj_op.meta.owner = owner;
+ obj_op.meta.bucket_owner = bucket_info.owner;
obj_op.meta.category = RGWObjCategory::MultiMeta;
obj_op.meta.flags = PUT_OBJ_CREATE_EXCL;
obj_op.meta.mtime = &mtime;
@@ -2747,8 +3372,8 @@ int RadosMultipartUpload::complete(const DoutPrefixProvider *dpp,
target_obj->set_atomic();
- RGWRados::Object op_target(store->getRados(),
- target_obj->get_bucket()->get_info(),
+ const RGWBucketInfo& bucket_info = target_obj->get_bucket()->get_info();
+ RGWRados::Object op_target(store->getRados(), bucket_info,
dynamic_cast<RadosObject*>(target_obj)->get_ctx(),
target_obj->get_obj());
RGWRados::Object::Write obj_op(&op_target);
@@ -2757,7 +3382,8 @@ int RadosMultipartUpload::complete(const DoutPrefixProvider *dpp,
obj_op.meta.remove_objs = &remove_objs;
obj_op.meta.ptag = &tag; /* use req_id as operation tag */
- obj_op.meta.owner = owner.id;
+ obj_op.meta.owner = owner;
+ obj_op.meta.bucket_owner = bucket_info.owner;
obj_op.meta.flags = PUT_OBJ_CREATE;
obj_op.meta.modify_tail = true;
obj_op.meta.completeMultipart = true;
@@ -2852,7 +3478,7 @@ std::unique_ptr<Writer> RadosMultipartUpload::get_writer(
const DoutPrefixProvider *dpp,
optional_yield y,
rgw::sal::Object* obj,
- const rgw_user& owner,
+ const ACLOwner& owner,
const rgw_placement_rule *ptail_placement_rule,
uint64_t part_num,
const std::string& part_num_str)
@@ -3555,70 +4181,6 @@ std::ostream& RadosLuaManager::PackagesWatcher::gen_prefix(std::ostream& out) co
return out << "rgw lua package reloader: ";
}
-int RadosOIDCProvider::store_url(const DoutPrefixProvider *dpp, const std::string& url, bool exclusive, optional_yield y)
-{
- auto sysobj = store->svc()->sysobj;
- std::string oid = tenant + get_url_oid_prefix() + url;
-
- bufferlist bl;
- using ceph::encode;
- encode(*this, bl);
- return rgw_put_system_obj(dpp, sysobj, store->svc()->zone->get_zone_params().oidc_pool, oid, bl, exclusive, nullptr, real_time(), y);
-}
-
-int RadosOIDCProvider::read_url(const DoutPrefixProvider *dpp, const std::string& url, const std::string& tenant, optional_yield y)
-{
- auto sysobj = store->svc()->sysobj;
- auto& pool = store->svc()->zone->get_zone_params().oidc_pool;
- std::string oid = tenant + get_url_oid_prefix() + url;
- bufferlist bl;
-
- int ret = rgw_get_system_obj(sysobj, pool, oid, bl, nullptr, nullptr, y, dpp);
- if (ret < 0) {
- return ret;
- }
-
- try {
- using ceph::decode;
- auto iter = bl.cbegin();
- decode(*this, iter);
- } catch (buffer::error& err) {
- ldpp_dout(dpp, 0) << "ERROR: failed to decode oidc provider info from pool: " << pool.name <<
- ": " << url << dendl;
- return -EIO;
- }
-
- return 0;
-}
-
-int RadosOIDCProvider::delete_obj(const DoutPrefixProvider *dpp, optional_yield y)
-{
- auto& pool = store->svc()->zone->get_zone_params().oidc_pool;
-
- std::string url, tenant;
- auto ret = get_tenant_url_from_arn(tenant, url);
- if (ret < 0) {
- ldpp_dout(dpp, 0) << "ERROR: failed to parse arn" << dendl;
- return -EINVAL;
- }
-
- if (this->tenant != tenant) {
- ldpp_dout(dpp, 0) << "ERROR: tenant in arn doesn't match that of user " << this->tenant << ", "
- << tenant << ": " << dendl;
- return -EINVAL;
- }
-
- // Delete url
- std::string oid = tenant + get_url_oid_prefix() + url;
- ret = rgw_delete_system_obj(dpp, store->svc()->sysobj, pool, oid, nullptr, y);
- if (ret < 0) {
- ldpp_dout(dpp, 0) << "ERROR: deleting oidc url from pool: " << pool.name << ": "
- << provider_url << ": " << cpp_strerror(-ret) << dendl;
- }
-
- return ret;
-}
-
int RadosRole::store_info(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y)
{
using ceph::encode;
@@ -3647,13 +4209,27 @@ int RadosRole::store_info(const DoutPrefixProvider *dpp, bool exclusive, optiona
}
}
+static std::string role_name_oid(const RGWRoleInfo& r, std::string_view prefix)
+{
+ if (!r.account_id.empty()) {
+ // names are case-insensitive, so store them in lower case
+ std::string lower_name = r.name;
+ boost::algorithm::to_lower(lower_name);
+ // use account id as prefix
+ return string_cat_reserve(r.account_id, prefix, lower_name);
+ } else {
+ // use tenant as prefix
+ return string_cat_reserve(r.tenant, prefix, r.name);
+ }
+}
+
int RadosRole::store_name(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y)
{
auto sysobj = store->svc()->sysobj;
RGWNameToId nameToId;
nameToId.obj_id = info.id;
- std::string oid = info.tenant + get_names_oid_prefix() + info.name;
+ std::string oid = role_name_oid(info, get_names_oid_prefix());
bufferlist bl;
using ceph::encode;
@@ -3664,6 +4240,14 @@ int RadosRole::store_name(const DoutPrefixProvider *dpp, bool exclusive, optiona
int RadosRole::store_path(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y)
{
+ if (!info.account_id.empty()) {
+ librados::Rados& rados = *store->getRados()->get_rados_handle();
+ const RGWZoneParams& zone = store->svc()->zone->get_zone_params();
+ const rgw_raw_obj& obj = rgwrados::account::get_roles_obj(zone, info.account_id);
+ constexpr uint32_t no_limit = std::numeric_limits<uint32_t>::max();
+ return rgwrados::roles::add(dpp, y, rados, obj, info, false, no_limit);
+ }
+
auto sysobj = store->svc()->sysobj;
std::string oid = info.tenant + get_path_oid_prefix() + info.path + get_info_oid_prefix() + info.id;
@@ -3699,7 +4283,7 @@ int RadosRole::read_id(const DoutPrefixProvider *dpp, const std::string& role_na
int RadosRole::read_name(const DoutPrefixProvider *dpp, optional_yield y)
{
auto sysobj = store->svc()->sysobj;
- std::string oid = info.tenant + get_names_oid_prefix() + info.name;
+ std::string oid = role_name_oid(info, get_names_oid_prefix());
bufferlist bl;
int ret = rgw_get_system_obj(sysobj, store->svc()->zone->get_zone_params().roles_pool, oid, bl, nullptr, nullptr, y, dpp);
@@ -3799,20 +4383,23 @@ int RadosRole::create(const DoutPrefixProvider *dpp, bool exclusive, const std::
}
//arn
- info.arn = role_arn_prefix + info.tenant + ":role" + info.path + info.name;
+ std::string_view account = !info.account_id.empty() ? info.account_id : info.tenant;
+ info.arn = string_cat_reserve(role_arn_prefix, account, ":role", info.path, info.name);
- // Creation time
- real_clock::time_point t = real_clock::now();
+ if (info.creation_date.empty()) {
+ // Creation time
+ real_clock::time_point t = real_clock::now();
- struct timeval tv;
- real_clock::to_timeval(t, tv);
+ struct timeval tv;
+ real_clock::to_timeval(t, tv);
- char buf[30];
- struct tm result;
- gmtime_r(&tv.tv_sec, &result);
- strftime(buf,30,"%Y-%m-%dT%H:%M:%S", &result);
- sprintf(buf + strlen(buf),".%dZ",(int)tv.tv_usec/1000);
- info.creation_date.assign(buf, strlen(buf));
+ char buf[30];
+ struct tm result;
+ gmtime_r(&tv.tv_sec, &result);
+ strftime(buf,30,"%Y-%m-%dT%H:%M:%S", &result);
+ sprintf(buf + strlen(buf),".%03dZ",(int)tv.tv_usec/1000);
+ info.creation_date.assign(buf, strlen(buf));
+ }
auto& pool = store->svc()->zone->get_zone_params().roles_pool;
ret = store_info(dpp, exclusive, y);
@@ -3849,7 +4436,7 @@ int RadosRole::create(const DoutPrefixProvider *dpp, bool exclusive, const std::
<< info.id << ": " << cpp_strerror(-info_ret) << dendl;
}
//Delete role name that was stored in previous call
- oid = info.tenant + get_names_oid_prefix() + info.name;
+ oid = role_name_oid(info, get_names_oid_prefix());
int name_ret = rgw_delete_system_obj(dpp, store->svc()->sysobj, pool, oid, nullptr, y);
if (name_ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: cleanup of role name from Role pool: "
@@ -3874,10 +4461,6 @@ int RadosRole::delete_obj(const DoutPrefixProvider *dpp, optional_yield y)
return ret;
}
- if (! info.perm_policy_map.empty()) {
- return -ERR_DELETE_CONFLICT;
- }
-
// Delete id & insert MD Log
RGWSI_MBSObj_RemoveParams params;
std::unique_ptr<RGWSI_MetaBackend::Context> ctx(store->svc()->role->svc.meta_be->alloc_ctx());
@@ -3889,7 +4472,7 @@ int RadosRole::delete_obj(const DoutPrefixProvider *dpp, optional_yield y)
}
// Delete name
- std::string oid = info.tenant + get_names_oid_prefix() + info.name;
+ std::string oid = role_name_oid(info, get_names_oid_prefix());
ret = rgw_delete_system_obj(dpp, store->svc()->sysobj, pool, oid, nullptr, y);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: deleting role name from Role pool: "
@@ -3897,13 +4480,24 @@ int RadosRole::delete_obj(const DoutPrefixProvider *dpp, optional_yield y)
}
// Delete path
- oid = info.tenant + get_path_oid_prefix() + info.path + get_info_oid_prefix() + info.id;
- ret = rgw_delete_system_obj(dpp, store->svc()->sysobj, pool, oid, nullptr, y);
- if (ret < 0) {
- ldpp_dout(dpp, 0) << "ERROR: deleting role path from Role pool: "
- << info.path << ": " << cpp_strerror(-ret) << dendl;
+ if (!info.account_id.empty()) {
+ librados::Rados& rados = *store->getRados()->get_rados_handle();
+ const RGWZoneParams& zone = store->svc()->zone->get_zone_params();
+ const rgw_raw_obj& obj = rgwrados::account::get_roles_obj(zone, info.account_id);
+ ret = rgwrados::roles::remove(dpp, y, rados, obj, info.name);
+ if (ret < 0) {
+ ldpp_dout(dpp, 4) << "ERROR: deleting role path from account list: "
+ << info.path << ": " << cpp_strerror(-ret) << dendl;
+ }
+ } else {
+ oid = info.tenant + get_path_oid_prefix() + info.path + get_info_oid_prefix() + info.id;
+ ret = rgw_delete_system_obj(dpp, store->svc()->sysobj, pool, oid, nullptr, y);
+ if (ret < 0) {
+ ldpp_dout(dpp, 4) << "ERROR: deleting role path from Role pool: "
+ << info.path << ": " << cpp_strerror(-ret) << dendl;
+ }
}
- return ret;
+ return 0;
}
} // namespace rgw::sal
diff --git a/src/rgw/driver/rados/rgw_sal_rados.h b/src/rgw/driver/rados/rgw_sal_rados.h
index 71f7a83a74c..78bd849717f 100644
--- a/src/rgw/driver/rados/rgw_sal_rados.h
+++ b/src/rgw/driver/rados/rgw_sal_rados.h
@@ -22,7 +22,6 @@
#include "rgw_sal_store.h"
#include "rgw_rados.h"
#include "rgw_notify.h"
-#include "rgw_oidc_provider.h"
#include "rgw_role.h"
#include "rgw_multi.h"
#include "rgw_putobj_processor.h"
@@ -144,10 +143,135 @@ class RadosStore : public StoreDriver {
virtual int get_user_by_access_key(const DoutPrefixProvider* dpp, const std::string& key, optional_yield y, std::unique_ptr<User>* user) override;
virtual int get_user_by_email(const DoutPrefixProvider* dpp, const std::string& email, optional_yield y, std::unique_ptr<User>* user) override;
virtual int get_user_by_swift(const DoutPrefixProvider* dpp, const std::string& user_str, optional_yield y, std::unique_ptr<User>* user) override;
+
+ int load_account_by_id(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view id,
+ RGWAccountInfo& info,
+ Attrs& attrs,
+ RGWObjVersionTracker& objv) override;
+ int load_account_by_name(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view tenant,
+ std::string_view name,
+ RGWAccountInfo& info,
+ Attrs& attrs,
+ RGWObjVersionTracker& objv) override;
+ int load_account_by_email(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view email,
+ RGWAccountInfo& info,
+ Attrs& attrs,
+ RGWObjVersionTracker& objv) override;
+ int store_account(const DoutPrefixProvider* dpp,
+ optional_yield y, bool exclusive,
+ const RGWAccountInfo& info,
+ const RGWAccountInfo* old_info,
+ const Attrs& attrs,
+ RGWObjVersionTracker& objv) override;
+ int delete_account(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ const RGWAccountInfo& info,
+ RGWObjVersionTracker& objv) override;
+
+ int load_stats(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ const rgw_owner& owner,
+ RGWStorageStats& stats,
+ ceph::real_time& last_synced,
+ ceph::real_time& last_updated) override;
+ int load_stats_async(const DoutPrefixProvider* dpp,
+ const rgw_owner& owner,
+ boost::intrusive_ptr<ReadStatsCB> cb) override;
+ int reset_stats(const DoutPrefixProvider *dpp,
+ optional_yield y,
+ const rgw_owner& owner) override;
+ int complete_flush_stats(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ const rgw_owner& owner) override;
+
+ int load_owner_by_email(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view email,
+ rgw_owner& owner) override;
+
+ int count_account_roles(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view account_id,
+ uint32_t& count) override;
+ int list_account_roles(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view account_id,
+ std::string_view path_prefix,
+ std::string_view marker,
+ uint32_t max_items,
+ RoleList& listing) override;
+
+ int load_account_user_by_name(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view account_id,
+ std::string_view tenant,
+ std::string_view username,
+ std::unique_ptr<User>* user) override;
+ int count_account_users(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view account_id,
+ uint32_t& count) override;
+ int list_account_users(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view account_id,
+ std::string_view tenant,
+ std::string_view path_prefix,
+ std::string_view marker,
+ uint32_t max_items,
+ UserList& listing) override;
+
+ int load_group_by_id(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view id,
+ RGWGroupInfo& info, Attrs& attrs,
+ RGWObjVersionTracker& objv) override;
+ int load_group_by_name(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view account_id,
+ std::string_view name,
+ RGWGroupInfo& info, Attrs& attrs,
+ RGWObjVersionTracker& objv) override;
+ int store_group(const DoutPrefixProvider* dpp, optional_yield y,
+ const RGWGroupInfo& info, const Attrs& attrs,
+ RGWObjVersionTracker& objv, bool exclusive,
+ const RGWGroupInfo* old_info) override;
+ int remove_group(const DoutPrefixProvider* dpp, optional_yield y,
+ const RGWGroupInfo& info,
+ RGWObjVersionTracker& objv) override;
+ int list_group_users(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view tenant,
+ std::string_view id,
+ std::string_view marker,
+ uint32_t max_items,
+ UserList& listing) override;
+ int count_account_groups(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view account_id,
+ uint32_t& count) override;
+ int list_account_groups(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view account_id,
+ std::string_view path_prefix,
+ std::string_view marker,
+ uint32_t max_items,
+ GroupList& listing) override;
+
virtual std::unique_ptr<Object> get_object(const rgw_obj_key& k) override;
std::unique_ptr<Bucket> get_bucket(const RGWBucketInfo& i) override;
int load_bucket(const DoutPrefixProvider* dpp, const rgw_bucket& b,
std::unique_ptr<Bucket>* bucket, optional_yield y) override;
+ int list_buckets(const DoutPrefixProvider* dpp,
+ const rgw_owner& owner, const std::string& tenant,
+ const std::string& marker, const std::string& end_marker,
+ uint64_t max, bool need_stats, BucketList& buckets,
+ optional_yield y) override;
virtual bool is_meta_master() override;
virtual Zone* get_zone() { return zone.get(); }
virtual std::string zone_unique_id(uint64_t unique_num) override;
@@ -189,6 +313,12 @@ class RadosStore : public StoreDriver {
RGWObjVersionTracker& objv_tracker,
optional_yield y,
const DoutPrefixProvider* dpp) override;
+ int list_account_topics(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view account_id,
+ std::string_view marker,
+ uint32_t max_items,
+ TopicList& listing) override;
int update_bucket_topic_mapping(const rgw_pubsub_topic& topic,
const std::string& bucket_key,
bool add_mapping,
@@ -238,25 +368,42 @@ class RadosStore : public StoreDriver {
std::unique_ptr<LuaManager> get_lua_manager(const std::string& luarocks_path) override;
virtual std::unique_ptr<RGWRole> get_role(std::string name,
std::string tenant,
+ rgw_account_id account_id,
std::string path="",
std::string trust_policy="",
+ std::string description="",
std::string max_session_duration_str="",
std::multimap<std::string,std::string> tags={}) override;
virtual std::unique_ptr<RGWRole> get_role(std::string id) override;
virtual std::unique_ptr<RGWRole> get_role(const RGWRoleInfo& info) override;
- virtual int get_roles(const DoutPrefixProvider *dpp,
- optional_yield y,
- const std::string& path_prefix,
- const std::string& tenant,
- std::vector<std::unique_ptr<RGWRole>>& roles) override;
- virtual std::unique_ptr<RGWOIDCProvider> get_oidc_provider() override;
- virtual int get_oidc_providers(const DoutPrefixProvider *dpp,
- const std::string& tenant,
- std::vector<std::unique_ptr<RGWOIDCProvider>>& providers, optional_yield y) override;
+ int list_roles(const DoutPrefixProvider *dpp,
+ optional_yield y,
+ const std::string& tenant,
+ const std::string& path_prefix,
+ const std::string& marker,
+ uint32_t max_items,
+ RoleList& listing) override;
+ int store_oidc_provider(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ const RGWOIDCProviderInfo& info,
+ bool exclusive) override;
+ int load_oidc_provider(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view tenant,
+ std::string_view url,
+ RGWOIDCProviderInfo& info) override;
+ int delete_oidc_provider(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view tenant,
+ std::string_view url) override;
+ int get_oidc_providers(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view tenant,
+ std::vector<RGWOIDCProviderInfo>& providers) override;
virtual std::unique_ptr<Writer> get_append_writer(const DoutPrefixProvider *dpp,
optional_yield y,
rgw::sal::Object* obj,
- const rgw_user& owner,
+ const ACLOwner& owner,
const rgw_placement_rule *ptail_placement_rule,
const std::string& unique_tag,
uint64_t position,
@@ -264,7 +411,7 @@ class RadosStore : public StoreDriver {
virtual std::unique_ptr<Writer> get_atomic_writer(const DoutPrefixProvider *dpp,
optional_yield y,
rgw::sal::Object* obj,
- const rgw_user& owner,
+ const ACLOwner& owner,
const rgw_placement_rule *ptail_placement_rule,
uint64_t olh_epoch,
const std::string& unique_tag) override;
@@ -310,17 +457,8 @@ class RadosUser : public StoreUser {
virtual std::unique_ptr<User> clone() override {
return std::unique_ptr<User>(new RadosUser(*this));
}
- int list_buckets(const DoutPrefixProvider* dpp, const std::string& marker, const std::string& end_marker,
- uint64_t max, bool need_stats, BucketList& buckets,
- optional_yield y) override;
virtual int read_attrs(const DoutPrefixProvider* dpp, optional_yield y) override;
virtual int merge_and_store_attrs(const DoutPrefixProvider* dpp, Attrs& new_attrs, optional_yield y) override;
- virtual int read_stats(const DoutPrefixProvider *dpp,
- optional_yield y, RGWStorageStats* stats,
- ceph::real_time* last_stats_sync = nullptr,
- ceph::real_time* last_stats_update = nullptr) override;
- virtual int read_stats_async(const DoutPrefixProvider *dpp, boost::intrusive_ptr<ReadStatsCB> cb) override;
- virtual int complete_flush_stats(const DoutPrefixProvider *dpp, optional_yield y) override;
virtual int read_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch, uint32_t max_entries,
bool* is_truncated, RGWUsageIter& usage_iter,
std::map<rgw_user_bucket, rgw_usage_log_entry>& usage) override;
@@ -330,6 +468,9 @@ class RadosUser : public StoreUser {
virtual int store_user(const DoutPrefixProvider* dpp, optional_yield y, bool exclusive, RGWUserInfo* old_info = nullptr) override;
virtual int remove_user(const DoutPrefixProvider* dpp, optional_yield y) override;
virtual int verify_mfa(const std::string& mfa_str, bool* verified, const DoutPrefixProvider* dpp, optional_yield y) override;
+ int list_groups(const DoutPrefixProvider* dpp, optional_yield y,
+ std::string_view marker, uint32_t max_items,
+ GroupList& listing) override;
friend class RadosBucket;
};
@@ -413,7 +554,8 @@ class RadosObject : public StoreObject {
}
virtual int delete_object(const DoutPrefixProvider* dpp,
optional_yield y, uint32_t flags) override;
- virtual int copy_object(User* user,
+ virtual int copy_object(const ACLOwner& owner,
+ const rgw_user& remote_user,
req_info* info, const rgw_zone_id& source_zone,
rgw::sal::Object* dest_object, rgw::sal::Bucket* dest_bucket,
rgw::sal::Bucket* src_bucket,
@@ -475,10 +617,10 @@ class RadosObject : public StoreObject {
virtual int dump_obj_layout(const DoutPrefixProvider *dpp, optional_yield y, Formatter* f) override;
/* Swift versioning */
- virtual int swift_versioning_restore(bool& restored,
+ virtual int swift_versioning_restore(const ACLOwner& owner, const rgw_user& remote_user, bool& restored,
const DoutPrefixProvider* dpp, optional_yield y) override;
- virtual int swift_versioning_copy(const DoutPrefixProvider* dpp,
- optional_yield y) override;
+ virtual int swift_versioning_copy(const ACLOwner& owner, const rgw_user& remote_user,
+ const DoutPrefixProvider* dpp, optional_yield y) override;
/* OPs */
virtual std::unique_ptr<ReadOp> get_read_op() override;
@@ -562,11 +704,11 @@ class RadosBucket : public StoreBucket {
virtual int read_stats_async(const DoutPrefixProvider *dpp,
const bucket_index_layout_generation& idx_layout,
int shard_id, boost::intrusive_ptr<ReadStatsCB> ctx) override;
- int sync_user_stats(const DoutPrefixProvider *dpp, optional_yield y,
- RGWBucketEnt* ent) override;
+ int sync_owner_stats(const DoutPrefixProvider *dpp, optional_yield y,
+ RGWBucketEnt* ent) override;
int check_bucket_shards(const DoutPrefixProvider* dpp, uint64_t num_objs,
optional_yield y) override;
- virtual int chown(const DoutPrefixProvider* dpp, const rgw_user& new_owner, optional_yield y) override;
+ virtual int chown(const DoutPrefixProvider* dpp, const rgw_owner& new_owner, optional_yield y) override;
virtual int put_info(const DoutPrefixProvider* dpp, bool exclusive, ceph::real_time mtime, optional_yield y) override;
virtual int check_empty(const DoutPrefixProvider* dpp, optional_yield y) override;
virtual int check_quota(const DoutPrefixProvider *dpp, RGWQuota& quota, uint64_t obj_size, optional_yield y, bool check_size_only = false) override;
@@ -606,8 +748,8 @@ class RadosBucket : public StoreBucket {
optional_yield y, const DoutPrefixProvider *dpp) override;
private:
- int link(const DoutPrefixProvider* dpp, const rgw_user& new_owner, optional_yield y, bool update_entrypoint = true, RGWObjVersionTracker* objv = nullptr);
- int unlink(const DoutPrefixProvider* dpp, const rgw_user& owner, optional_yield y, bool update_entrypoint = true);
+ int link(const DoutPrefixProvider* dpp, const rgw_owner& new_owner, optional_yield y, bool update_entrypoint = true, RGWObjVersionTracker* objv = nullptr);
+ int unlink(const DoutPrefixProvider* dpp, const rgw_owner& owner, optional_yield y, bool update_entrypoint = true);
friend class RadosUser;
};
@@ -673,7 +815,7 @@ public:
virtual std::unique_ptr<Writer> get_writer(const DoutPrefixProvider *dpp,
optional_yield y,
rgw::sal::Object* obj,
- const rgw_user& owner,
+ const ACLOwner& owner,
const rgw_placement_rule *ptail_placement_rule,
uint64_t part_num,
const std::string& part_num_str) override;
@@ -686,7 +828,7 @@ protected:
class MPRadosSerializer : public StoreMPSerializer {
librados::IoCtx ioctx;
- rados::cls::lock::Lock lock;
+ ::rados::cls::lock::Lock lock;
librados::ObjectWriteOperation op;
public:
@@ -700,7 +842,7 @@ public:
class LCRadosSerializer : public StoreLCSerializer {
librados::IoCtx* ioctx;
- rados::cls::lock::Lock lock;
+ ::rados::cls::lock::Lock lock;
public:
LCRadosSerializer(RadosStore* store, const std::string& oid, const std::string& lock_name, const std::string& cookie);
@@ -801,7 +943,7 @@ public:
RGWObjectCtx& obj_ctx,
const rgw_obj& obj,
RadosStore* _store, std::unique_ptr<Aio> _aio,
- const rgw_user& owner,
+ const ACLOwner& owner,
const rgw_placement_rule *ptail_placement_rule,
uint64_t olh_epoch,
const std::string& unique_tag,
@@ -849,7 +991,7 @@ public:
RGWObjectCtx& obj_ctx,
const rgw_obj& obj,
RadosStore* _store, std::unique_ptr<Aio> _aio,
- const rgw_user& owner,
+ const ACLOwner& owner,
const rgw_placement_rule *ptail_placement_rule,
const std::string& unique_tag,
uint64_t position,
@@ -898,7 +1040,7 @@ public:
RGWObjectCtx& obj_ctx,
const rgw_obj& obj,
RadosStore* _store, std::unique_ptr<Aio> _aio,
- const rgw_user& owner,
+ const ACLOwner& owner,
const rgw_placement_rule *ptail_placement_rule,
uint64_t part_num, const std::string& part_num_str, jspan_context& trace) :
StoreWriter(dpp, y),
@@ -970,32 +1112,18 @@ public:
int unwatch_reload(const DoutPrefixProvider* dpp);
};
-class RadosOIDCProvider : public RGWOIDCProvider {
- RadosStore* store;
-public:
- RadosOIDCProvider(RadosStore* _store) : store(_store) {}
- ~RadosOIDCProvider() = default;
-
- virtual int store_url(const DoutPrefixProvider *dpp, const std::string& url, bool exclusive, optional_yield y) override;
- virtual int read_url(const DoutPrefixProvider *dpp, const std::string& url, const std::string& tenant, optional_yield y) override;
- virtual int delete_obj(const DoutPrefixProvider *dpp, optional_yield y) override;
- void encode(bufferlist& bl) const {
- RGWOIDCProvider::encode(bl);
- }
- void decode(bufferlist::const_iterator& bl) {
- RGWOIDCProvider::decode(bl);
- }
-};
-
class RadosRole : public RGWRole {
RadosStore* store;
public:
RadosRole(RadosStore* _store, std::string name,
std::string tenant,
+ rgw_account_id account_id,
std::string path,
std::string trust_policy,
+ std::string description,
std::string max_session_duration,
- std::multimap<std::string,std::string> tags) : RGWRole(name, tenant, path, trust_policy, max_session_duration, tags), store(_store) {}
+ std::multimap<std::string,std::string> tags)
+ : RGWRole(name, tenant, std::move(account_id), path, trust_policy, std::move(description), max_session_duration, tags), store(_store) {}
RadosRole(RadosStore* _store, std::string id) : RGWRole(id), store(_store) {}
RadosRole(RadosStore* _store, const RGWRoleInfo& info) : RGWRole(info), store(_store) {}
RadosRole(RadosStore* _store) : store(_store) {}
@@ -1011,5 +1139,3 @@ public:
virtual int delete_obj(const DoutPrefixProvider *dpp, optional_yield y) override;
};
}} // namespace rgw::sal
-
-WRITE_CLASS_ENCODER(rgw::sal::RadosOIDCProvider)
diff --git a/src/rgw/driver/rados/rgw_service.cc b/src/rgw/driver/rados/rgw_service.cc
index e0d885b86a6..5e7dd81c716 100644
--- a/src/rgw/driver/rados/rgw_service.cc
+++ b/src/rgw/driver/rados/rgw_service.cc
@@ -29,6 +29,8 @@
#include "common/errno.h"
+#include "account.h"
+#include "group.h"
#include "rgw_bucket.h"
#include "rgw_cr_rados.h"
#include "rgw_datalog.h"
@@ -374,7 +376,8 @@ RGWCtlDef::_meta::_meta() {}
RGWCtlDef::_meta::~_meta() {}
-int RGWCtlDef::init(RGWServices& svc, rgw::sal::Driver* driver, const DoutPrefixProvider *dpp)
+int RGWCtlDef::init(RGWServices& svc, rgw::sal::Driver* driver,
+ librados::Rados& rados, const DoutPrefixProvider *dpp)
{
meta.mgr.reset(new RGWMetadataManager(svc.meta));
@@ -382,15 +385,19 @@ int RGWCtlDef::init(RGWServices& svc, rgw::sal::Driver* driver, const DoutPrefix
auto sync_module = svc.sync_modules->get_sync_module();
if (sync_module) {
- meta.bucket.reset(sync_module->alloc_bucket_meta_handler());
+ meta.bucket.reset(sync_module->alloc_bucket_meta_handler(rados));
meta.bucket_instance.reset(sync_module->alloc_bucket_instance_meta_handler(driver));
} else {
- meta.bucket.reset(RGWBucketMetaHandlerAllocator::alloc());
+ meta.bucket.reset(RGWBucketMetaHandlerAllocator::alloc(rados));
meta.bucket_instance.reset(RGWBucketInstanceMetaHandlerAllocator::alloc(driver));
}
meta.otp.reset(RGWOTPMetaHandlerAllocator::alloc());
meta.role = std::make_unique<rgw::sal::RGWRoleMetadataHandler>(driver, svc.role);
+ meta.account = rgwrados::account::create_metadata_handler(
+ *svc.sysobj, svc.zone->get_zone_params());
+ meta.group = rgwrados::group::create_metadata_handler(
+ *svc.sysobj, rados, svc.zone->get_zone_params());
user.reset(new RGWUserCtl(svc.zone, svc.user, (RGWUserMetadataHandler *)meta.user.get()));
bucket.reset(new RGWBucketCtl(svc.zone,
@@ -409,8 +416,8 @@ int RGWCtlDef::init(RGWServices& svc, rgw::sal::Driver* driver, const DoutPrefix
meta.topic_cache->init(svc.cache);
meta.topic = rgwrados::topic::create_metadata_handler(
- *svc.sysobj, svc.cache, *svc.mdlog, svc.zone->get_zone_params(),
- *meta.topic_cache);
+ *svc.sysobj, svc.cache, *svc.mdlog, rados,
+ svc.zone->get_zone_params(), *meta.topic_cache);
RGWOTPMetadataHandlerBase *otp_handler = static_cast<RGWOTPMetadataHandlerBase *>(meta.otp.get());
otp_handler->init(svc.zone, svc.meta_be_otp, svc.otp);
@@ -427,12 +434,13 @@ int RGWCtlDef::init(RGWServices& svc, rgw::sal::Driver* driver, const DoutPrefix
return 0;
}
-int RGWCtl::init(RGWServices *_svc, rgw::sal::Driver* driver, const DoutPrefixProvider *dpp)
+int RGWCtl::init(RGWServices *_svc, rgw::sal::Driver* driver,
+ librados::Rados& rados, const DoutPrefixProvider *dpp)
{
svc = _svc;
cct = svc->cct;
- int r = _ctl.init(*svc, driver, dpp);
+ int r = _ctl.init(*svc, driver, rados, dpp);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to start init ctls (" << cpp_strerror(-r) << dendl;
return r;
@@ -477,15 +485,28 @@ int RGWCtl::init(RGWServices *_svc, rgw::sal::Driver* driver, const DoutPrefixPr
r = meta.role->attach(meta.mgr);
if (r < 0) {
- ldout(cct, 0) << "ERROR: failed to start init otp ctl (" << cpp_strerror(-r) << dendl;
+ ldout(cct, 0) << "ERROR: failed to start init meta.role ctl (" << cpp_strerror(-r) << dendl;
+ return r;
+ }
+
+ r = _ctl.meta.account->attach(meta.mgr);
+ if (r < 0) {
+ ldout(cct, 0) << "ERROR: failed to start init meta.account ctl (" << cpp_strerror(-r) << dendl;
return r;
}
+
r = meta.topic->attach(meta.mgr);
if (r < 0) {
ldout(cct, 0) << "ERROR: failed to start init topic ctl ("
<< cpp_strerror(-r) << dendl;
return r;
}
+
+ r = _ctl.meta.group->attach(meta.mgr);
+ if (r < 0) {
+ ldout(cct, 0) << "ERROR: failed to start init meta.group ctl (" << cpp_strerror(-r) << dendl;
+ return r;
+ }
return 0;
}
diff --git a/src/rgw/driver/rados/rgw_service.h b/src/rgw/driver/rados/rgw_service.h
index ebab083f27e..ec7f73cf6eb 100644
--- a/src/rgw/driver/rados/rgw_service.h
+++ b/src/rgw/driver/rados/rgw_service.h
@@ -197,6 +197,8 @@ struct RGWCtlDef {
std::unique_ptr<RGWMetadataHandler> otp;
std::unique_ptr<RGWMetadataHandler> role;
std::unique_ptr<RGWMetadataHandler> topic;
+ std::unique_ptr<RGWMetadataHandler> account;
+ std::unique_ptr<RGWMetadataHandler> group;
std::unique_ptr<RGWChainedCacheImpl<rgwrados::topic::cache_entry>> topic_cache;
@@ -211,7 +213,8 @@ struct RGWCtlDef {
RGWCtlDef();
~RGWCtlDef();
- int init(RGWServices& svc, rgw::sal::Driver* driver, const DoutPrefixProvider *dpp);
+ int init(RGWServices& svc, rgw::sal::Driver* driver,
+ librados::Rados& rados, const DoutPrefixProvider *dpp);
};
struct RGWCtl {
@@ -237,5 +240,6 @@ struct RGWCtl {
RGWBucketCtl *bucket{nullptr};
RGWOTPCtl *otp{nullptr};
- int init(RGWServices *_svc, rgw::sal::Driver* driver, const DoutPrefixProvider *dpp);
+ int init(RGWServices *_svc, rgw::sal::Driver* driver,
+ librados::Rados& rados, const DoutPrefixProvider *dpp);
};
diff --git a/src/rgw/driver/rados/rgw_sync_module.cc b/src/rgw/driver/rados/rgw_sync_module.cc
index 5a1e70be34e..a19248f2e9a 100644
--- a/src/rgw/driver/rados/rgw_sync_module.cc
+++ b/src/rgw/driver/rados/rgw_sync_module.cc
@@ -16,9 +16,9 @@
#define dout_subsys ceph_subsys_rgw
-RGWMetadataHandler *RGWSyncModuleInstance::alloc_bucket_meta_handler()
+RGWMetadataHandler *RGWSyncModuleInstance::alloc_bucket_meta_handler(librados::Rados& rados)
{
- return RGWBucketMetaHandlerAllocator::alloc();
+ return RGWBucketMetaHandlerAllocator::alloc(rados);
}
RGWBucketInstanceMetadataHandlerBase* RGWSyncModuleInstance::alloc_bucket_instance_meta_handler(rgw::sal::Driver* driver)
diff --git a/src/rgw/driver/rados/rgw_sync_module.h b/src/rgw/driver/rados/rgw_sync_module.h
index 38abb3d1adf..4ca691ca665 100644
--- a/src/rgw/driver/rados/rgw_sync_module.h
+++ b/src/rgw/driver/rados/rgw_sync_module.h
@@ -3,6 +3,7 @@
#pragma once
+#include "include/rados/librados_fwd.hpp"
#include "rgw_common.h"
#include "rgw_coroutine.h"
@@ -55,7 +56,7 @@ public:
virtual bool supports_user_writes() {
return false;
}
- virtual RGWMetadataHandler *alloc_bucket_meta_handler();
+ virtual RGWMetadataHandler *alloc_bucket_meta_handler(librados::Rados& rados);
virtual RGWBucketInstanceMetadataHandlerBase *alloc_bucket_instance_meta_handler(rgw::sal::Driver* driver);
// indication whether the sync module start with full sync (default behavior)
diff --git a/src/rgw/driver/rados/rgw_sync_module_aws.cc b/src/rgw/driver/rados/rgw_sync_module_aws.cc
index 19be83351e1..9d18bc9472b 100644
--- a/src/rgw/driver/rados/rgw_sync_module_aws.cc
+++ b/src/rgw/driver/rados/rgw_sync_module_aws.cc
@@ -615,9 +615,9 @@ struct AWSSyncConfig {
const rgw_obj_key& obj) {
string bucket_str;
string owner;
- if (!bucket_info.owner.tenant.empty()) {
- bucket_str = owner = bucket_info.owner.tenant + "-";
- owner += bucket_info.owner.id;
+ if (!bucket_info.bucket.tenant.empty()) {
+ bucket_str = owner = bucket_info.bucket.tenant + "-";
+ owner += to_string(bucket_info.owner);
}
bucket_str += bucket_info.bucket.name;
diff --git a/src/rgw/driver/rados/rgw_sync_module_es.cc b/src/rgw/driver/rados/rgw_sync_module_es.cc
index e3353dc1fc7..414fbeac4c9 100644
--- a/src/rgw/driver/rados/rgw_sync_module_es.cc
+++ b/src/rgw/driver/rados/rgw_sync_module_es.cc
@@ -229,7 +229,7 @@ struct ElasticConfig {
bool should_handle_operation(RGWBucketInfo& bucket_info) {
return index_buckets.exists(bucket_info.bucket.name) &&
- allow_owners.exists(bucket_info.owner.to_str());
+ allow_owners.exists(to_string(bucket_info.owner));
}
};
@@ -501,12 +501,12 @@ struct es_obj_metadata {
const RGWAccessControlList& acl = policy.get_acl();
- permissions.insert(policy.get_owner().id.to_str());
+ permissions.insert(to_string(policy.get_owner().id));
for (const auto& acliter : acl.get_grant_map()) {
const ACLGrant& grant = acliter.second;
const auto* user = grant.get_user();
if (user && (grant.get_permission().get_permissions() & RGW_PERM_READ) != 0) {
- permissions.insert(user->id.to_str());
+ permissions.insert(to_string(user->id));
}
}
} else if (attr_name == RGW_ATTR_TAGS) {
diff --git a/src/rgw/driver/rados/rgw_user.cc b/src/rgw/driver/rados/rgw_user.cc
index 4e48506202f..e154813aac1 100644
--- a/src/rgw/driver/rados/rgw_user.cc
+++ b/src/rgw/driver/rados/rgw_user.cc
@@ -5,8 +5,10 @@
#include "rgw_user.h"
+#include "rgw_account.h"
#include "rgw_bucket.h"
#include "rgw_quota.h"
+#include "rgw_rest_iam.h" // validate_iam_user_name()
#include "services/svc_user.h"
#include "services/svc_meta.h"
@@ -163,7 +165,7 @@ static void dump_user_info(Formatter *f, RGWUserInfo &info,
encode_json("user_quota", info.quota.user_quota, f);
encode_json("temp_url_keys", info.temp_url_keys, f);
- string user_source_type;
+ std::string_view user_source_type;
switch ((RGWIdentityType)info.type) {
case TYPE_RGW:
user_source_type = "rgw";
@@ -177,6 +179,9 @@ static void dump_user_info(Formatter *f, RGWUserInfo &info,
case TYPE_NONE:
user_source_type = "none";
break;
+ case TYPE_ROOT:
+ user_source_type = "root";
+ break;
default:
user_source_type = "none";
break;
@@ -491,6 +496,41 @@ int RGWAccessKeyPool::check_op(RGWUserAdminOpState& op_state,
return 0;
}
+void rgw_generate_secret_key(CephContext* cct,
+ std::string& secret_key)
+{
+ char secret_key_buf[SECRET_KEY_LEN + 1];
+ gen_rand_alphanumeric_plain(cct, secret_key_buf, sizeof(secret_key_buf));
+ secret_key = secret_key_buf;
+}
+
+int rgw_generate_access_key(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ rgw::sal::Driver* driver,
+ std::string& access_key_id)
+{
+ std::string id;
+ int r = 0;
+
+ do {
+ id.resize(PUBLIC_ID_LEN + 1);
+ gen_rand_alphanumeric_upper(dpp->get_cct(), id.data(), id.size());
+ id.pop_back(); // remove trailing null
+
+ if (!validate_access_key(id))
+ continue;
+
+ std::unique_ptr<rgw::sal::User> duplicate_check;
+ r = driver->get_user_by_access_key(dpp, id, y, &duplicate_check);
+ } while (r == 0);
+
+ if (r == -ENOENT) {
+ access_key_id = std::move(id);
+ return 0;
+ }
+ return r;
+}
+
// Generate a new random key
int RGWAccessKeyPool::generate_key(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state,
optional_yield y, std::string *err_msg)
@@ -498,7 +538,6 @@ int RGWAccessKeyPool::generate_key(const DoutPrefixProvider *dpp, RGWUserAdminOp
std::string id;
std::string key;
- std::pair<std::string, RGWAccessKey> key_pair;
RGWAccessKey new_key;
std::unique_ptr<rgw::sal::User> duplicate_check;
@@ -553,23 +592,16 @@ int RGWAccessKeyPool::generate_key(const DoutPrefixProvider *dpp, RGWUserAdminOp
key = op_state.get_secret_key();
} else {
- char secret_key_buf[SECRET_KEY_LEN + 1];
- gen_rand_alphanumeric_plain(g_ceph_context, secret_key_buf, sizeof(secret_key_buf));
- key = secret_key_buf;
+ rgw_generate_secret_key(dpp->get_cct(), key);
}
// Generate the access key
if (key_type == KEY_TYPE_S3 && gen_access) {
- char public_id_buf[PUBLIC_ID_LEN + 1];
-
- do {
- int id_buf_size = sizeof(public_id_buf);
- gen_rand_alphanumeric_upper(g_ceph_context, public_id_buf, id_buf_size);
- id = public_id_buf;
- if (!validate_access_key(id))
- continue;
-
- } while (!driver->get_user_by_access_key(dpp, id, y, &duplicate_check));
+ int r = rgw_generate_access_key(dpp, y, driver, id);
+ if (r < 0) {
+ set_err_msg(err_msg, "failed to generate s3 access key");
+ return -ERR_INVALID_ACCESS_KEY;
+ }
}
if (key_type == KEY_TYPE_SWIFT) {
@@ -590,13 +622,16 @@ int RGWAccessKeyPool::generate_key(const DoutPrefixProvider *dpp, RGWUserAdminOp
new_key.id = id;
new_key.key = key;
- key_pair.first = id;
- key_pair.second = new_key;
+ if (op_state.create_date) {
+ new_key.create_date = *op_state.create_date;
+ } else {
+ new_key.create_date = ceph::real_clock::now();
+ }
if (key_type == KEY_TYPE_S3) {
- access_keys->insert(key_pair);
+ access_keys->emplace(id, new_key);
} else if (key_type == KEY_TYPE_SWIFT) {
- swift_keys->insert(key_pair);
+ swift_keys->emplace(id, new_key);
}
return 0;
@@ -660,6 +695,9 @@ int RGWAccessKeyPool::modify_key(RGWUserAdminOpState& op_state, std::string *err
if (op_state.access_key_active) {
modify_key.active = *op_state.access_key_active;
}
+ if (op_state.create_date) {
+ modify_key.create_date = *op_state.create_date;
+ }
if (key_type == KEY_TYPE_S3) {
(*access_keys)[id] = modify_key;
@@ -1571,8 +1609,9 @@ int RGWUser::execute_rename(const DoutPrefixProvider *dpp, RGWUserAdminOpState&
rgw::sal::BucketList listing;
do {
- ret = old_user->list_buckets(dpp, listing.next_marker, "",
- max_entries, false, listing, y);
+ ret = driver->list_buckets(dpp, old_user->get_id(), old_user->get_tenant(),
+ listing.next_marker, "", max_entries, false,
+ listing, y);
if (ret < 0) {
set_err_msg(err_msg, "unable to list user buckets");
return ret;
@@ -1614,6 +1653,92 @@ int RGWUser::execute_rename(const DoutPrefixProvider *dpp, RGWUserAdminOpState&
return update(dpp, op_state, err_msg, y);
}
+// when setting RGWUserInfo::account_id, verify that the account metadata
+// exists and matches the user's tenant
+static int validate_account_tenant(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ rgw::sal::Driver* driver,
+ std::string_view account_id,
+ std::string_view tenant,
+ std::string& err)
+{
+ RGWAccountInfo info;
+ rgw::sal::Attrs attrs;
+ RGWObjVersionTracker objv;
+ int r = driver->load_account_by_id(dpp, y, account_id, info, attrs, objv);
+ if (r < 0) {
+ err = "Failed to load account by id";
+ return r;
+ }
+ if (info.tenant != tenant) {
+ err = "User tenant does not match account tenant";
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int adopt_user_bucket(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ rgw::sal::Driver* driver,
+ const rgw_bucket& bucketid,
+ const rgw_owner& new_owner)
+{
+ // retry in case of racing writes to the bucket instance metadata
+ static constexpr auto max_retries = 10;
+ int tries = 0;
+ int r = 0;
+
+ do {
+ ldpp_dout(dpp, 1) << "adopting bucket " << bucketid << "..." << dendl;
+
+ std::unique_ptr<rgw::sal::Bucket> bucket;
+ r = driver->load_bucket(dpp, bucketid, &bucket, y);
+ if (r < 0) {
+ ldpp_dout(dpp, 1) << "failed to load bucket " << bucketid
+ << ": " << cpp_strerror(r) << dendl;
+ return r;
+ }
+
+ r = bucket->chown(dpp, new_owner, y);
+ if (r < 0) {
+ ldpp_dout(dpp, 1) << "failed to chown bucket " << bucketid
+ << ": " << cpp_strerror(r) << dendl;
+ }
+ ++tries;
+ } while (r == -ECANCELED && tries < max_retries);
+
+ return r;
+}
+
+static int adopt_user_buckets(const DoutPrefixProvider* dpp, optional_yield y,
+ rgw::sal::Driver* driver, const rgw_user& user,
+ const rgw_account_id& account_id)
+{
+ const size_t max_chunk = dpp->get_cct()->_conf->rgw_list_buckets_max_chunk;
+ constexpr bool need_stats = false;
+
+ ldpp_dout(dpp, 1) << "adopting all buckets owned by " << user
+ << " into account " << account_id << dendl;
+
+ rgw::sal::BucketList listing;
+ do {
+ int r = driver->list_buckets(dpp, user, user.tenant, listing.next_marker,
+ "", max_chunk, need_stats, listing, y);
+ if (r < 0) {
+ return r;
+ }
+
+ for (const auto& ent : listing.buckets) {
+ r = adopt_user_bucket(dpp, y, driver, ent.bucket, account_id);
+ if (r < 0 && r != -ENOENT) {
+ return r;
+ }
+ }
+ } while (!listing.next_marker.empty());
+
+ return 0;
+}
+
int RGWUser::execute_add(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, std::string *err_msg,
optional_yield y)
{
@@ -1628,6 +1753,12 @@ int RGWUser::execute_add(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_
user_info.display_name = display_name;
user_info.type = TYPE_RGW;
+ // tenant must not look like a valid account id
+ if (rgw::account::validate_id(uid.tenant)) {
+ set_err_msg(err_msg, "tenant must not be formatted as an account id");
+ return -EINVAL;
+ }
+
if (!user_email.empty())
user_info.user_email = user_email;
@@ -1674,6 +1805,50 @@ int RGWUser::execute_add(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_
user_info.placement_tags = op_state.placement_tags;
}
+ if (!op_state.account_id.empty()) {
+ if (!rgw::account::validate_id(op_state.account_id, err_msg)) {
+ return -EINVAL;
+ }
+ // tenant must match account.tenant
+ std::string err;
+ int ret = validate_account_tenant(dpp, y, driver, op_state.account_id,
+ user_info.user_id.tenant, err);
+ if (ret < 0) {
+ set_err_msg(err_msg, err);
+ return ret;
+ }
+ user_info.account_id = op_state.account_id;
+ }
+
+ if (op_state.account_root) {
+ if (user_info.account_id.empty()) {
+ set_err_msg(err_msg, "account-root user must belong to an account");
+ return -EINVAL;
+ }
+ user_info.type = TYPE_ROOT;
+ }
+
+ if (!user_info.account_id.empty()) {
+ // validate user name according to iam api
+ std::string err;
+ if (!validate_iam_user_name(user_info.display_name, err)) {
+ set_err_msg(err_msg, err);
+ return -EINVAL;
+ }
+ }
+
+ if (!op_state.path.empty()) {
+ user_info.path = op_state.path;
+ } else {
+ user_info.path = "/";
+ }
+
+ if (op_state.create_date) {
+ user_info.create_date = *op_state.create_date;
+ } else {
+ user_info.create_date = ceph::real_clock::now();
+ }
+
// update the request
op_state.set_user_info(user_info);
op_state.set_populated();
@@ -1772,8 +1947,9 @@ int RGWUser::execute_remove(const DoutPrefixProvider *dpp, RGWUserAdminOpState&
rgw::sal::BucketList listing;
do {
- ret = user->list_buckets(dpp, listing.next_marker, string(),
- max_buckets, false, listing, y);
+ ret = driver->list_buckets(dpp, user->get_id(), user->get_tenant(),
+ listing.next_marker, string(),
+ max_buckets, false, listing, y);
if (ret < 0) {
set_err_msg(err_msg, "unable to list user buckets");
return ret;
@@ -1928,8 +2104,9 @@ int RGWUser::execute_modify(const DoutPrefixProvider *dpp, RGWUserAdminOpState&
rgw::sal::BucketList listing;
do {
- ret = user->list_buckets(dpp, listing.next_marker, string(),
- max_buckets, false, listing, y);
+ ret = driver->list_buckets(dpp, user->get_id(), user->get_tenant(),
+ listing.next_marker, string(),
+ max_buckets, false, listing, y);
if (ret < 0) {
set_err_msg(err_msg, "could not get buckets for uid: " + user_id.to_str());
return ret;
@@ -1961,6 +2138,61 @@ int RGWUser::execute_modify(const DoutPrefixProvider *dpp, RGWUserAdminOpState&
user_info.placement_tags = op_state.placement_tags;
}
+ if (!op_state.account_id.empty()) {
+ if (!rgw::account::validate_id(op_state.account_id, err_msg)) {
+ return -EINVAL;
+ }
+ if (user_info.account_id != op_state.account_id) {
+ // allow users to migrate into an account, but don't allow them to leave
+ if (!user_info.account_id.empty()) {
+ set_err_msg(err_msg, "users cannot be moved out of their account");
+ return -EINVAL;
+ }
+ user_info.account_id = op_state.account_id;
+
+ // tenant must match new account.tenant
+ std::string err;
+ ret = validate_account_tenant(dpp, y, driver, op_state.account_id,
+ user_info.user_id.tenant, err);
+ if (ret < 0) {
+ set_err_msg(err_msg, err);
+ return ret;
+ }
+ // change account on user's buckets
+ ret = adopt_user_buckets(dpp, y, driver, user_info.user_id,
+ user_info.account_id);
+ if (ret < 0) {
+ set_err_msg(err_msg, "failed to change ownership of user's buckets");
+ return ret;
+ }
+ }
+ }
+
+ if (op_state.account_root_specified) {
+ if (op_state.account_root && user_info.account_id.empty()) {
+ set_err_msg(err_msg, "account-root user must belong to an account");
+ return -EINVAL;
+ }
+ user_info.type = op_state.account_root ? TYPE_ROOT : TYPE_RGW;
+ }
+
+ if (!user_info.account_id.empty()) {
+ // validate user name according to iam api
+ std::string err;
+ if (!validate_iam_user_name(user_info.display_name, err)) {
+ set_err_msg(err_msg, err);
+ return -EINVAL;
+ }
+ }
+
+ if (!op_state.path.empty()) {
+ user_info.path = op_state.path;
+ }
+
+ if (op_state.create_date) {
+ user_info.create_date = *op_state.create_date;
+ }
+
op_state.set_user_info(user_info);
// if we're supposed to modify keys, do so
@@ -2121,8 +2353,15 @@ int RGWUserAdminOp_User::info(const DoutPrefixProvider *dpp,
ruser = driver->get_user(info.user_id);
+ rgw_owner owner = info.user_id;
+ if (!info.account_id.empty()) {
+ ldpp_dout(dpp, 4) << "Reading stats for user account "
+ << info.account_id << dendl;
+ owner = info.account_id;
+ }
+
if (op_state.sync_stats) {
- ret = rgw_user_sync_all_stats(dpp, driver, ruser.get(), y);
+ ret = rgw_sync_all_stats(dpp, y, driver, owner, ruser->get_tenant());
if (ret < 0) {
return ret;
}
@@ -2131,7 +2370,10 @@ int RGWUserAdminOp_User::info(const DoutPrefixProvider *dpp,
RGWStorageStats stats;
RGWStorageStats *arg_stats = NULL;
if (op_state.fetch_stats) {
- int ret = ruser->read_stats(dpp, y, &stats);
+ ceph::real_time last_synced; // ignored
+ ceph::real_time last_updated; // ignored
+ int ret = driver->load_stats(dpp, y, owner, stats,
+ last_synced, last_updated);
if (ret < 0 && ret != -ENOENT) {
return ret;
}
@@ -2644,6 +2886,7 @@ int RGWUserCtl::get_info_by_email(const DoutPrefixProvider *dpp,
return svc.user->get_user_info_by_email(op->ctx(), email,
info,
params.objv_tracker,
+ params.attrs,
params.mtime,
y,
dpp);
@@ -2660,6 +2903,7 @@ int RGWUserCtl::get_info_by_swift(const DoutPrefixProvider *dpp,
return svc.user->get_user_info_by_swift(op->ctx(), swift_name,
info,
params.objv_tracker,
+ params.attrs,
params.mtime,
y,
dpp);
@@ -2676,6 +2920,7 @@ int RGWUserCtl::get_info_by_access_key(const DoutPrefixProvider *dpp,
return svc.user->get_user_info_by_access_key(op->ctx(), access_key,
info,
params.objv_tracker,
+ params.attrs,
params.mtime,
y,
dpp);
@@ -2727,49 +2972,6 @@ int RGWUserCtl::remove_info(const DoutPrefixProvider *dpp,
});
}
-int RGWUserCtl::list_buckets(const DoutPrefixProvider *dpp,
- const rgw_user& user,
- const string& marker,
- const string& end_marker,
- uint64_t max,
- bool need_stats,
- RGWUserBuckets *buckets,
- bool *is_truncated,
- optional_yield y,
- uint64_t default_max)
-{
- if (!max) {
- max = default_max;
- }
-
- int ret = svc.user->list_buckets(dpp, user, marker, end_marker,
- max, buckets, is_truncated, y);
- if (ret < 0) {
- return ret;
- }
- if (need_stats) {
- map<string, RGWBucketEnt>& m = buckets->get_buckets();
- ret = ctl.bucket->read_buckets_stats(m, y, dpp);
- if (ret < 0 && ret != -ENOENT) {
- ldpp_dout(dpp, 0) << "ERROR: could not get stats for buckets" << dendl;
- return ret;
- }
- }
- return 0;
-}
-
-int RGWUserCtl::read_stats(const DoutPrefixProvider *dpp,
- const rgw_user& user, RGWStorageStats *stats,
- optional_yield y,
- ceph::real_time *last_stats_sync,
- ceph::real_time *last_stats_update)
-{
- return be_handler->call([&](RGWSI_MetaBackend_Handler::Op *op) {
- return svc.user->read_stats(dpp, op->ctx(), user, stats,
- last_stats_sync, last_stats_update, y);
- });
-}
-
RGWMetadataHandler *RGWUserMetaHandlerAllocator::alloc(RGWSI_User *user_svc) {
return new RGWUserMetadataHandler(user_svc);
}
diff --git a/src/rgw/driver/rados/rgw_user.h b/src/rgw/driver/rados/rgw_user.h
index 46500fb1aca..d3303f96257 100644
--- a/src/rgw/driver/rados/rgw_user.h
+++ b/src/rgw/driver/rados/rgw_user.h
@@ -30,32 +30,39 @@ class RGWUserCtl;
class RGWBucketCtl;
class RGWUserBuckets;
+// generate a random secret access key of SECRET_KEY_LEN=40
+void rgw_generate_secret_key(CephContext* cct,
+ std::string& secret_key);
+
+// generate a unique random access key id of PUBLIC_ID_LEN=20
+int rgw_generate_access_key(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ rgw::sal::Driver* driver,
+ std::string& access_key_id);
+
/**
- * A string wrapper that includes encode/decode functions
- * for easily accessing a UID in all forms
+ * A string wrapper that includes encode/decode functions for easily accessing
+ * a UID in all forms. In some objects, this may refer to an account id instead
+ * of a user.
*/
struct RGWUID
{
- rgw_user user_id;
+ std::string id;
void encode(bufferlist& bl) const {
- std::string s;
- user_id.to_str(s);
using ceph::encode;
- encode(s, bl);
+ encode(id, bl);
}
void decode(bufferlist::const_iterator& bl) {
- std::string s;
using ceph::decode;
- decode(s, bl);
- user_id.from_str(s);
+ decode(id, bl);
}
void dump(Formatter *f) const {
- f->dump_string("user_id", user_id.to_str());
+ f->dump_string("user_id", id);
}
static void generate_test_instances(std::list<RGWUID*>& o) {
o.push_back(new RGWUID);
o.push_back(new RGWUID);
- o.back()->user_id.from_str("test:tester");
+ o.back()->id = "test:tester";
}
};
WRITE_CLASS_ENCODER(RGWUID)
@@ -68,7 +75,9 @@ struct bucket_meta_entry {
uint64_t count;
};
-extern int rgw_user_sync_all_stats(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, rgw::sal::User* user, optional_yield y);
+int rgw_sync_all_stats(const DoutPrefixProvider *dpp,
+ optional_yield y, rgw::sal::Driver* driver,
+ const rgw_owner& owner, const std::string& tenant);
extern int rgw_user_get_all_buckets_stats(const DoutPrefixProvider *dpp,
rgw::sal::Driver* driver, rgw::sal::User* user,
std::map<std::string, bucket_meta_entry>& buckets_usage_map, optional_yield y);
@@ -116,6 +125,7 @@ struct RGWUserAdminOpState {
__u8 suspended{0};
__u8 admin{0};
__u8 system{0};
+ __u8 account_root{0};
__u8 exclusive{0};
__u8 fetch_stats{0};
__u8 sync_stats{0};
@@ -123,6 +133,9 @@ struct RGWUserAdminOpState {
RGWObjVersionTracker objv;
uint32_t op_mask{0};
std::map<int, std::string> temp_url_keys;
+ std::string account_id;
+ std::string path;
+ std::optional<ceph::real_time> create_date;
// subuser attributes
std::string subuser;
@@ -163,6 +176,7 @@ struct RGWUserAdminOpState {
bool suspension_op{false};
bool admin_specified{false};
bool system_specified{false};
+ bool account_root_specified{false};
bool key_op{false};
bool temp_url_key_specified{false};
bool found_by_uid{false};
@@ -227,9 +241,7 @@ struct RGWUserAdminOpState {
overwrite_new_user = b;
}
- void set_user_email(std::string& email) {
- /* always lowercase email address */
- boost::algorithm::to_lower(email);
+ void set_user_email(const std::string& email) {
user_email = email;
user_email_specified = true;
}
@@ -295,6 +307,11 @@ struct RGWUserAdminOpState {
system_specified = true;
}
+ void set_account_root(__u8 is_account_root) {
+ account_root = is_account_root;
+ account_root_specified = true;
+ }
+
void set_exclusive(__u8 is_exclusive) {
exclusive = is_exclusive;
}
@@ -875,23 +892,6 @@ public:
int remove_info(const DoutPrefixProvider *dpp,
const RGWUserInfo& info, optional_yield y,
const RemoveParams& params = {});
-
- int list_buckets(const DoutPrefixProvider *dpp,
- const rgw_user& user,
- const std::string& marker,
- const std::string& end_marker,
- uint64_t max,
- bool need_stats,
- RGWUserBuckets *buckets,
- bool *is_truncated,
- optional_yield y,
- uint64_t default_max = 1000);
-
- int read_stats(const DoutPrefixProvider *dpp,
- const rgw_user& user, RGWStorageStats *stats,
- optional_yield y,
- ceph::real_time *last_stats_sync = nullptr, /* last time a full stats sync completed */
- ceph::real_time *last_stats_update = nullptr); /* last time a stats update was done */
};
class RGWUserMetaHandlerAllocator {
diff --git a/src/rgw/driver/rados/rgw_zone.h b/src/rgw/driver/rados/rgw_zone.h
index 345cbc6a217..c542abc76d6 100644
--- a/src/rgw/driver/rados/rgw_zone.h
+++ b/src/rgw/driver/rados/rgw_zone.h
@@ -115,6 +115,8 @@ struct RGWZoneParams : RGWSystemMetaObj {
rgw_pool oidc_pool;
rgw_pool notif_pool;
rgw_pool topics_pool;
+ rgw_pool account_pool;
+ rgw_pool group_pool;
RGWAccessKey system_key;
@@ -178,6 +180,8 @@ struct RGWZoneParams : RGWSystemMetaObj {
encode(oidc_pool, bl);
encode(notif_pool, bl);
encode(topics_pool, bl);
+ encode(account_pool, bl);
+ encode(group_pool, bl);
ENCODE_FINISH(bl);
}
@@ -253,8 +257,12 @@ struct RGWZoneParams : RGWSystemMetaObj {
}
if (struct_v >= 15) {
decode(topics_pool, bl);
+ decode(account_pool, bl);
+ decode(group_pool, bl);
} else {
topics_pool = name + ".rgw.meta:topics";
+ account_pool = name + ".rgw.meta:accounts";
+ group_pool = name + ".rgw.meta:groups";
}
DECODE_FINISH(bl);
}
diff --git a/src/rgw/driver/rados/roles.cc b/src/rgw/driver/rados/roles.cc
new file mode 100644
index 00000000000..62a0aef8aad
--- /dev/null
+++ b/src/rgw/driver/rados/roles.cc
@@ -0,0 +1,174 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab ft=cpp
+
+/*
+ * Ceph - scalable distributed file system
+ *
+ * Copyright contributors to the Ceph project
+ *
+ * This is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License version 2.1, as published by the Free Software
+ * Foundation. See file COPYING.
+ *
+ */
+
+#include "roles.h"
+
+#include "include/rados/librados.hpp"
+#include "common/ceph_json.h"
+#include "common/dout.h"
+#include "cls/user/cls_user_client.h"
+#include "rgw_role.h"
+#include "rgw_sal.h"
+
+namespace rgwrados::roles {
+
+int add(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ librados::Rados& rados,
+ const rgw_raw_obj& obj,
+ const rgw::sal::RGWRoleInfo& role,
+ bool exclusive, uint32_t limit)
+{
+ resource_metadata meta;
+ meta.role_id = role.id;
+
+ cls_user_account_resource resource;
+ resource.name = role.name;
+ resource.path = role.path;
+ encode(meta, resource.metadata);
+
+ rgw_rados_ref ref;
+ int r = rgw_get_rados_ref(dpp, &rados, obj, &ref);
+ if (r < 0) {
+ return r;
+ }
+
+ librados::ObjectWriteOperation op;
+ ::cls_user_account_resource_add(op, resource, exclusive, limit);
+ return ref.operate(dpp, &op, y);
+}
+
+int get(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ librados::Rados& rados,
+ const rgw_raw_obj& obj,
+ std::string_view name,
+ std::string& role_id)
+{
+ cls_user_account_resource resource;
+
+ rgw_rados_ref ref;
+ int r = rgw_get_rados_ref(dpp, &rados, obj, &ref);
+ if (r < 0) {
+ return r;
+ }
+
+ librados::ObjectReadOperation op;
+ int ret = 0;
+ ::cls_user_account_resource_get(op, name, resource, &ret);
+
+ r = ref.operate(dpp, &op, nullptr, y);
+ if (r < 0) {
+ return r;
+ }
+ if (ret < 0) {
+ return ret;
+ }
+
+ resource_metadata meta;
+ try {
+ auto p = resource.metadata.cbegin();
+ decode(meta, p);
+ } catch (const buffer::error&) {
+ return -EIO;
+ }
+ role_id = std::move(meta.role_id);
+ return 0;
+}
+
+int remove(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ librados::Rados& rados,
+ const rgw_raw_obj& obj,
+ std::string_view name)
+{
+ rgw_rados_ref ref;
+ int r = rgw_get_rados_ref(dpp, &rados, obj, &ref);
+ if (r < 0) {
+ return r;
+ }
+
+ librados::ObjectWriteOperation op;
+ ::cls_user_account_resource_rm(op, name);
+ return ref.operate(dpp, &op, y);
+}
+
+int list(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ librados::Rados& rados,
+ const rgw_raw_obj& obj,
+ std::string_view marker,
+ std::string_view path_prefix,
+ uint32_t max_items,
+ std::vector<std::string>& ids,
+ std::string& next_marker)
+{
+ rgw_rados_ref ref;
+ int r = rgw_get_rados_ref(dpp, &rados, obj, &ref);
+ if (r < 0) {
+ return r;
+ }
+
+ librados::ObjectReadOperation op;
+ std::vector<cls_user_account_resource> entries;
+ bool truncated = false;
+ int ret = 0;
+ ::cls_user_account_resource_list(op, marker, path_prefix, max_items,
+ entries, &truncated, &next_marker, &ret);
+
+ r = ref.operate(dpp, &op, nullptr, y);
+ if (r == -ENOENT) {
+ next_marker.clear();
+ return 0;
+ }
+ if (r < 0) {
+ return r;
+ }
+ if (ret < 0) {
+ return ret;
+ }
+
+ for (auto& resource : entries) {
+ resource_metadata meta;
+ try {
+ auto p = resource.metadata.cbegin();
+ decode(meta, p);
+ } catch (const buffer::error&) {
+ return -EIO;
+ }
+ ids.push_back(std::move(meta.role_id));
+ }
+
+ if (!truncated) {
+ next_marker.clear();
+ }
+ return 0;
+}
+
+
+void resource_metadata::dump(ceph::Formatter* f) const
+{
+ encode_json("role_id", role_id, f);
+}
+
+void resource_metadata::generate_test_instances(std::list<resource_metadata*>& o)
+{
+ o.push_back(new resource_metadata);
+ auto m = new resource_metadata;
+ m->role_id = "id";
+ o.push_back(m);
+}
+
+} // namespace rgwrados::roles
diff --git a/src/rgw/driver/rados/roles.h b/src/rgw/driver/rados/roles.h
new file mode 100644
index 00000000000..eb53820039e
--- /dev/null
+++ b/src/rgw/driver/rados/roles.h
@@ -0,0 +1,86 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab ft=cpp
+
+/*
+ * Ceph - scalable distributed file system
+ *
+ * Copyright contributors to the Ceph project
+ *
+ * This is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License version 2.1, as published by the Free Software
+ * Foundation. See file COPYING.
+ *
+ */
+
+#pragma once
+
+#include <list>
+#include <string>
+#include "include/rados/librados_fwd.hpp"
+#include "include/encoding.h"
+#include "rgw_sal_fwd.h"
+
+namespace ceph { class Formatter; }
+class DoutPrefixProvider;
+class optional_yield;
+struct rgw_raw_obj;
+
+
+namespace rgwrados::roles {
+
+/// Add the given role to the list.
+int add(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ librados::Rados& rados,
+ const rgw_raw_obj& obj,
+ const rgw::sal::RGWRoleInfo& role,
+ bool exclusive, uint32_t limit);
+
+/// Look up a role's id by name in the list.
+int get(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ librados::Rados& rados,
+ const rgw_raw_obj& obj,
+ std::string_view name,
+ std::string& role_id);
+
+/// Remove the given role from the list.
+int remove(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ librados::Rados& rados,
+ const rgw_raw_obj& obj,
+ std::string_view name);
+
+/// Return a paginated listing of role ids.
+int list(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ librados::Rados& rados,
+ const rgw_raw_obj& obj,
+ std::string_view marker,
+ std::string_view path_prefix,
+ uint32_t max_items,
+ std::vector<std::string>& ids,
+ std::string& next_marker);
+
+// role-specific metadata for cls_user_account_resource
+struct resource_metadata {
+ std::string role_id;
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(role_id, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(role_id, bl);
+ DECODE_FINISH(bl);
+ }
+
+ void dump(ceph::Formatter* f) const;
+ static void generate_test_instances(std::list<resource_metadata*>& o);
+};
+WRITE_CLASS_ENCODER(resource_metadata);
+
+} // namespace rgwrados::roles
diff --git a/src/rgw/driver/rados/topic.cc b/src/rgw/driver/rados/topic.cc
index 86ce6bb819b..7769649fbbf 100644
--- a/src/rgw/driver/rados/topic.cc
+++ b/src/rgw/driver/rados/topic.cc
@@ -15,6 +15,8 @@
#include "topic.h"
#include "common/errno.h"
+#include "account.h"
+#include "rgw_account.h"
#include "rgw_common.h"
#include "rgw_metadata.h"
#include "rgw_metadata_lister.h"
@@ -26,6 +28,7 @@
#include "rgw_zone.h"
#include "svc_mdlog.h"
#include "svc_sys_obj_cache.h"
+#include "topics.h"
namespace rgwrados::topic {
@@ -95,11 +98,12 @@ int read(const DoutPrefixProvider* dpp, optional_yield y,
}
int write(const DoutPrefixProvider* dpp, optional_yield y,
- RGWSI_SysObj& sysobj, RGWSI_MDLog* mdlog, const RGWZoneParams& zone,
+ RGWSI_SysObj& sysobj, RGWSI_MDLog* mdlog,
+ librados::Rados& rados, const RGWZoneParams& zone,
const rgw_pubsub_topic& info, RGWObjVersionTracker& objv,
ceph::real_time mtime, bool exclusive)
{
- const std::string topic_key = get_topic_metadata_key(info.user.tenant, info.name);
+ const std::string topic_key = get_topic_metadata_key(info);
const rgw_raw_obj obj = get_topic_obj(zone, topic_key);
bufferlist bl;
@@ -113,6 +117,17 @@ int write(const DoutPrefixProvider* dpp, optional_yield y,
return r;
}
+ if (const auto* id = std::get_if<rgw_account_id>(&info.owner); id) {
+ // link the topic to its account
+ const auto& topics = account::get_topics_obj(zone, *id);
+ r = topics::add(dpp, y, rados, topics, info, false,
+ std::numeric_limits<uint32_t>::max());
+ if (r < 0) {
+ ldpp_dout(dpp, 0) << "WARNING: could not link topic to account "
+ << *id << ": " << cpp_strerror(r) << dendl;
+ } // not fatal
+ }
+
// record in the mdlog on success
if (mdlog) {
return mdlog->complete_entry(dpp, y, "topic", topic_key, &objv);
@@ -121,9 +136,13 @@ int write(const DoutPrefixProvider* dpp, optional_yield y,
}
int remove(const DoutPrefixProvider* dpp, optional_yield y,
- RGWSI_SysObj& sysobj, RGWSI_MDLog* mdlog, const RGWZoneParams& zone,
- const std::string& topic_key, RGWObjVersionTracker& objv)
+ RGWSI_SysObj& sysobj, RGWSI_MDLog* mdlog,
+ librados::Rados& rados, const RGWZoneParams& zone,
+ const std::string& tenant, const std::string& name,
+ RGWObjVersionTracker& objv)
{
+ const std::string topic_key = get_topic_metadata_key(tenant, name);
+
// delete topic info
const rgw_raw_obj topic = get_topic_obj(zone, topic_key);
int r = rgw_delete_system_obj(dpp, &sysobj, topic.pool, topic.oid, &objv, y);
@@ -142,6 +161,16 @@ int remove(const DoutPrefixProvider* dpp, optional_yield y,
<< buckets.oid << " with: " << cpp_strerror(r) << dendl;
} // not fatal
+ if (rgw::account::validate_id(tenant)) {
+ // unlink the name from its account
+ const auto& topics = account::get_topics_obj(zone, tenant);
+ r = topics::remove(dpp, y, rados, topics, name);
+ if (r < 0) {
+ ldpp_dout(dpp, 0) << "ERROR: could not unlink from account "
+ << tenant << ": " << cpp_strerror(r) << dendl;
+ } // not fatal
+ }
+
// record in the mdlog on success
if (mdlog) {
return mdlog->complete_entry(dpp, y, "topic", topic_key, &objv);
@@ -265,14 +294,16 @@ class MetadataHandler : public RGWMetadataHandler {
RGWSI_SysObj& sysobj;
RGWSI_SysObj_Cache* cache_svc;
RGWSI_MDLog& mdlog;
+ librados::Rados& rados;
const RGWZoneParams& zone;
RGWChainedCacheImpl<cache_entry>& cache;
public:
MetadataHandler(RGWSI_SysObj& sysobj, RGWSI_SysObj_Cache* cache_svc,
- RGWSI_MDLog& mdlog, const RGWZoneParams& zone,
+ RGWSI_MDLog& mdlog, librados::Rados& rados,
+ const RGWZoneParams& zone,
RGWChainedCacheImpl<cache_entry>& cache)
: sysobj(sysobj), cache_svc(cache_svc), mdlog(mdlog),
- zone(zone), cache(cache)
+ rados(rados), zone(zone), cache(cache)
{}
std::string get_type() final { return "topic"; }
@@ -316,16 +347,17 @@ class MetadataHandler : public RGWMetadataHandler {
auto mtime = robj->get_mtime();
constexpr bool exclusive = false;
- int r = write(dpp, y, sysobj, &mdlog, zone, info,
- objv_tracker, mtime, exclusive);
+ int r = write(dpp, y, sysobj, &mdlog, rados, zone,
+ info, objv_tracker, mtime, exclusive);
if (r < 0) {
return r;
}
- if (!info.dest.push_endpoint.empty() && info.dest.persistent) {
- r = rgw::notify::add_persistent_topic(info.name, y);
+ if (!info.dest.push_endpoint.empty() && info.dest.persistent &&
+ !info.dest.persistent_queue.empty()) {
+ r = rgw::notify::add_persistent_topic(info.dest.persistent_queue, y);
if (r < 0) {
ldpp_dout(dpp, 1) << "ERROR: failed to create queue for persistent topic "
- << info.name << " with: " << cpp_strerror(r) << dendl;
+ << info.dest.persistent_queue << " with: " << cpp_strerror(r) << dendl;
return r;
}
}
@@ -335,19 +367,33 @@ class MetadataHandler : public RGWMetadataHandler {
int remove(std::string& entry, RGWObjVersionTracker& objv_tracker,
optional_yield y, const DoutPrefixProvider *dpp) override
{
- int r = topic::remove(dpp, y, sysobj, &mdlog, zone, entry, objv_tracker);
- if (r < 0) {
- return r;
- }
- // delete persistent topic queue. expect ENOENT for non-persistent topics
std::string name;
std::string tenant;
parse_topic_metadata_key(entry, tenant, name);
- r = rgw::notify::remove_persistent_topic(name, y);
- if (r < 0 && r != -ENOENT) {
- ldpp_dout(dpp, 1) << "Failed to delete queue for persistent topic: "
- << name << " with error: " << r << dendl;
- } // not fatal
+
+ rgw_pubsub_topic info;
+ int r = read(dpp, y, sysobj, cache_svc, zone, entry,
+ info, cache, nullptr, &objv_tracker);
+ if (r < 0) {
+ return r;
+ }
+
+ r = topic::remove(dpp, y, sysobj, &mdlog, rados, zone,
+ tenant, name, objv_tracker);
+ if (r < 0) {
+ return r;
+ }
+
+ const rgw_pubsub_dest& dest = info.dest;
+ if (!dest.push_endpoint.empty() && dest.persistent &&
+ !dest.persistent_queue.empty()) {
+ // delete persistent topic queue
+ r = rgw::notify::remove_persistent_topic(dest.persistent_queue, y);
+ if (r < 0 && r != -ENOENT) {
+ ldpp_dout(dpp, 1) << "Failed to delete queue for persistent topic: "
+ << name << " with error: " << r << dendl;
+ } // not fatal
+ }
return 0;
}
@@ -397,12 +443,13 @@ class MetadataHandler : public RGWMetadataHandler {
auto create_metadata_handler(RGWSI_SysObj& sysobj,
RGWSI_SysObj_Cache* cache_svc,
- RGWSI_MDLog& mdlog, const RGWZoneParams& zone,
+ RGWSI_MDLog& mdlog, librados::Rados& rados,
+ const RGWZoneParams& zone,
RGWChainedCacheImpl<cache_entry>& cache)
-> std::unique_ptr<RGWMetadataHandler>
{
return std::make_unique<MetadataHandler>(sysobj, cache_svc, mdlog,
- zone, cache);
+ rados, zone, cache);
}
} // rgwrados::topic
diff --git a/src/rgw/driver/rados/topic.h b/src/rgw/driver/rados/topic.h
index 3799d001ec7..bcd838773bc 100644
--- a/src/rgw/driver/rados/topic.h
+++ b/src/rgw/driver/rados/topic.h
@@ -52,14 +52,16 @@ int read(const DoutPrefixProvider* dpp, optional_yield y,
/// Write or overwrite topic info.
int write(const DoutPrefixProvider* dpp, optional_yield y,
- RGWSI_SysObj& sysobj, RGWSI_MDLog* mdlog, const RGWZoneParams& zone,
+ RGWSI_SysObj& sysobj, RGWSI_MDLog* mdlog,
+ librados::Rados& rados, const RGWZoneParams& zone,
const rgw_pubsub_topic& info, RGWObjVersionTracker& objv,
ceph::real_time mtime, bool exclusive);
/// Remove a topic by metadata key.
int remove(const DoutPrefixProvider* dpp, optional_yield y,
RGWSI_SysObj& sysobj, RGWSI_MDLog* mdlog,
- const RGWZoneParams& zone, const std::string& topic_key,
+ librados::Rados& rados, const RGWZoneParams& zone,
+ const std::string& tenant, const std::string& name,
RGWObjVersionTracker& objv);
@@ -87,7 +89,8 @@ int list_buckets(const DoutPrefixProvider* dpp, optional_yield y,
/// Topic metadata handler factory.
auto create_metadata_handler(RGWSI_SysObj& sysobj,
RGWSI_SysObj_Cache* cache_svc,
- RGWSI_MDLog& mdlog, const RGWZoneParams& zone,
+ RGWSI_MDLog& mdlog, librados::Rados& rados,
+ const RGWZoneParams& zone,
RGWChainedCacheImpl<cache_entry>& cache)
-> std::unique_ptr<RGWMetadataHandler>;
diff --git a/src/rgw/driver/rados/topics.cc b/src/rgw/driver/rados/topics.cc
new file mode 100644
index 00000000000..0da4680d672
--- /dev/null
+++ b/src/rgw/driver/rados/topics.cc
@@ -0,0 +1,109 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab ft=cpp
+
+/*
+ * Ceph - scalable distributed file system
+ *
+ * Copyright contributors to the Ceph project
+ *
+ * This is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License version 2.1, as published by the Free Software
+ * Foundation. See file COPYING.
+ *
+ */
+
+#include "topics.h"
+
+#include "include/rados/librados.hpp"
+#include "common/dout.h"
+#include "cls/user/cls_user_client.h"
+#include "rgw_pubsub.h"
+#include "rgw_sal.h"
+
+namespace rgwrados::topics {
+
+int add(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ librados::Rados& rados,
+ const rgw_raw_obj& obj,
+ const rgw_pubsub_topic& topic,
+ bool exclusive, uint32_t limit)
+{
+ cls_user_account_resource resource;
+ resource.name = topic.name;
+
+ rgw_rados_ref ref;
+ int r = rgw_get_rados_ref(dpp, &rados, obj, &ref);
+ if (r < 0) {
+ return r;
+ }
+
+ librados::ObjectWriteOperation op;
+ ::cls_user_account_resource_add(op, resource, exclusive, limit);
+ return ref.operate(dpp, &op, y);
+}
+
+int remove(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ librados::Rados& rados,
+ const rgw_raw_obj& obj,
+ std::string_view name)
+{
+ rgw_rados_ref ref;
+ int r = rgw_get_rados_ref(dpp, &rados, obj, &ref);
+ if (r < 0) {
+ return r;
+ }
+
+ librados::ObjectWriteOperation op;
+ ::cls_user_account_resource_rm(op, name);
+ return ref.operate(dpp, &op, y);
+}
+
+int list(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ librados::Rados& rados,
+ const rgw_raw_obj& obj,
+ std::string_view marker,
+ uint32_t max_items,
+ std::vector<std::string>& names,
+ std::string& next_marker)
+{
+ rgw_rados_ref ref;
+ int r = rgw_get_rados_ref(dpp, &rados, obj, &ref);
+ if (r < 0) {
+ return r;
+ }
+
+ librados::ObjectReadOperation op;
+ const std::string path_prefix; // unused
+ std::vector<cls_user_account_resource> entries;
+ bool truncated = false;
+ int ret = 0;
+ ::cls_user_account_resource_list(op, marker, path_prefix, max_items,
+ entries, &truncated, &next_marker, &ret);
+
+ r = ref.operate(dpp, &op, nullptr, y);
+ if (r == -ENOENT) {
+ next_marker.clear();
+ return 0;
+ }
+ if (r < 0) {
+ return r;
+ }
+ if (ret < 0) {
+ return ret;
+ }
+
+ for (auto& resource : entries) {
+ names.push_back(std::move(resource.name));
+ }
+
+ if (!truncated) {
+ next_marker.clear();
+ }
+ return 0;
+}
+
+} // namespace rgwrados::topics
diff --git a/src/rgw/driver/rados/topics.h b/src/rgw/driver/rados/topics.h
new file mode 100644
index 00000000000..113db96a90f
--- /dev/null
+++ b/src/rgw/driver/rados/topics.h
@@ -0,0 +1,57 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab ft=cpp
+
+/*
+ * Ceph - scalable distributed file system
+ *
+ * Copyright contributors to the Ceph project
+ *
+ * This is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License version 2.1, as published by the Free Software
+ * Foundation. See file COPYING.
+ *
+ */
+
+#pragma once
+
+#include <cstdint>
+#include <string>
+#include <vector>
+#include "include/rados/librados_fwd.hpp"
+#include "rgw_sal_fwd.h"
+
+class DoutPrefixProvider;
+class optional_yield;
+struct rgw_raw_obj;
+struct rgw_pubsub_topic;
+
+
+namespace rgwrados::topics {
+
+/// Add the given topic to the list.
+int add(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ librados::Rados& rados,
+ const rgw_raw_obj& obj,
+ const rgw_pubsub_topic& info,
+ bool exclusive, uint32_t limit);
+
+/// Remove the given topic from the list.
+int remove(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ librados::Rados& rados,
+ const rgw_raw_obj& obj,
+ std::string_view name);
+
+/// Return a paginated listing of topic names.
+int list(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ librados::Rados& rados,
+ const rgw_raw_obj& obj,
+ std::string_view marker,
+ uint32_t max_items,
+ std::vector<std::string>& names,
+ std::string& next_marker);
+
+} // namespace rgwrados::topics
diff --git a/src/rgw/driver/rados/users.cc b/src/rgw/driver/rados/users.cc
new file mode 100644
index 00000000000..702863a768e
--- /dev/null
+++ b/src/rgw/driver/rados/users.cc
@@ -0,0 +1,174 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab ft=cpp
+
+/*
+ * Ceph - scalable distributed file system
+ *
+ * Copyright contributors to the Ceph project
+ *
+ * This is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License version 2.1, as published by the Free Software
+ * Foundation. See file COPYING.
+ *
+ */
+
+#include "users.h"
+
+#include "include/rados/librados.hpp"
+#include "common/ceph_json.h"
+#include "common/dout.h"
+#include "cls/user/cls_user_client.h"
+#include "rgw_common.h"
+#include "rgw_sal.h"
+
+namespace rgwrados::users {
+
+int add(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ librados::Rados& rados,
+ const rgw_raw_obj& obj,
+ const RGWUserInfo& user,
+ bool exclusive, uint32_t limit)
+{
+ resource_metadata meta;
+ meta.user_id = user.user_id.id;
+
+ cls_user_account_resource resource;
+ resource.name = user.display_name;
+ resource.path = user.path;
+ encode(meta, resource.metadata);
+
+ rgw_rados_ref ref;
+ int r = rgw_get_rados_ref(dpp, &rados, obj, &ref);
+ if (r < 0) {
+ return r;
+ }
+
+ librados::ObjectWriteOperation op;
+ ::cls_user_account_resource_add(op, resource, exclusive, limit);
+ return ref.operate(dpp, &op, y);
+}
+
+int get(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ librados::Rados& rados,
+ const rgw_raw_obj& obj,
+ std::string_view name,
+ std::string& user_id)
+{
+ cls_user_account_resource resource;
+
+ rgw_rados_ref ref;
+ int r = rgw_get_rados_ref(dpp, &rados, obj, &ref);
+ if (r < 0) {
+ return r;
+ }
+
+ librados::ObjectReadOperation op;
+ int ret = 0;
+ ::cls_user_account_resource_get(op, name, resource, &ret);
+
+ r = ref.operate(dpp, &op, nullptr, y);
+ if (r < 0) {
+ return r;
+ }
+ if (ret < 0) {
+ return ret;
+ }
+
+ resource_metadata meta;
+ try {
+ auto p = resource.metadata.cbegin();
+ decode(meta, p);
+ } catch (const buffer::error&) {
+ return -EIO;
+ }
+ user_id = std::move(meta.user_id);
+ return 0;
+}
+
+int remove(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ librados::Rados& rados,
+ const rgw_raw_obj& obj,
+ std::string_view name)
+{
+ rgw_rados_ref ref;
+ int r = rgw_get_rados_ref(dpp, &rados, obj, &ref);
+ if (r < 0) {
+ return r;
+ }
+
+ librados::ObjectWriteOperation op;
+ ::cls_user_account_resource_rm(op, name);
+ return ref.operate(dpp, &op, y);
+}
+
+int list(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ librados::Rados& rados,
+ const rgw_raw_obj& obj,
+ std::string_view marker,
+ std::string_view path_prefix,
+ uint32_t max_items,
+ std::vector<std::string>& ids,
+ std::string& next_marker)
+{
+ rgw_rados_ref ref;
+ int r = rgw_get_rados_ref(dpp, &rados, obj, &ref);
+ if (r < 0) {
+ return r;
+ }
+
+ librados::ObjectReadOperation op;
+ std::vector<cls_user_account_resource> entries;
+ bool truncated = false;
+ int ret = 0;
+ ::cls_user_account_resource_list(op, marker, path_prefix, max_items,
+ entries, &truncated, &next_marker, &ret);
+
+ r = ref.operate(dpp, &op, nullptr, y);
+ if (r == -ENOENT) {
+ next_marker.clear();
+ return 0;
+ }
+ if (r < 0) {
+ return r;
+ }
+ if (ret < 0) {
+ return ret;
+ }
+
+ for (auto& resource : entries) {
+ resource_metadata meta;
+ try {
+ auto p = resource.metadata.cbegin();
+ decode(meta, p);
+ } catch (const buffer::error&) {
+ return -EIO;
+ }
+ ids.push_back(std::move(meta.user_id));
+ }
+
+ if (!truncated) {
+ next_marker.clear();
+ }
+ return 0;
+}
+
+
+void resource_metadata::dump(ceph::Formatter* f) const
+{
+ encode_json("user_id", user_id, f);
+}
+
+void resource_metadata::generate_test_instances(std::list<resource_metadata*>& o)
+{
+ o.push_back(new resource_metadata);
+ auto m = new resource_metadata;
+ m->user_id = "uid";
+ o.push_back(m);
+}
+
+} // namespace rgwrados::users
diff --git a/src/rgw/driver/rados/users.h b/src/rgw/driver/rados/users.h
new file mode 100644
index 00000000000..5a5094b6b54
--- /dev/null
+++ b/src/rgw/driver/rados/users.h
@@ -0,0 +1,87 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab ft=cpp
+
+/*
+ * Ceph - scalable distributed file system
+ *
+ * Copyright contributors to the Ceph project
+ *
+ * This is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License version 2.1, as published by the Free Software
+ * Foundation. See file COPYING.
+ *
+ */
+
+#pragma once
+
+#include <list>
+#include <string>
+#include "include/rados/librados_fwd.hpp"
+#include "include/encoding.h"
+#include "rgw_sal_fwd.h"
+
+namespace ceph { class Formatter; }
+class DoutPrefixProvider;
+class optional_yield;
+struct rgw_raw_obj;
+struct RGWUserInfo;
+
+
+namespace rgwrados::users {
+
+/// Add the given user to the list.
+int add(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ librados::Rados& rados,
+ const rgw_raw_obj& obj,
+ const RGWUserInfo& user,
+ bool exclusive, uint32_t limit);
+
+/// Look up a user's id by name in the list.
+int get(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ librados::Rados& rados,
+ const rgw_raw_obj& obj,
+ std::string_view name,
+ std::string& user_id);
+
+/// Remove the given user from the list.
+int remove(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ librados::Rados& rados,
+ const rgw_raw_obj& obj,
+ std::string_view name);
+
+/// Return a paginated listing of user ids.
+int list(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ librados::Rados& rados,
+ const rgw_raw_obj& obj,
+ std::string_view marker,
+ std::string_view path_prefix,
+ uint32_t max_items,
+ std::vector<std::string>& ids,
+ std::string& next_marker);
+
+// user-specific metadata for cls_user_account_resource
+struct resource_metadata {
+ std::string user_id;
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(user_id, bl);
+ ENCODE_FINISH(bl);
+ }
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(user_id, bl);
+ DECODE_FINISH(bl);
+ }
+
+ void dump(ceph::Formatter* f) const;
+ static void generate_test_instances(std::list<resource_metadata*>& o);
+};
+WRITE_CLASS_ENCODER(resource_metadata);
+
+} // namespace rgwrados::users
diff --git a/src/rgw/rgw_account.cc b/src/rgw/rgw_account.cc
new file mode 100644
index 00000000000..44aa9a3d4c7
--- /dev/null
+++ b/src/rgw/rgw_account.cc
@@ -0,0 +1,529 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab ft=cpp
+
+/*
+ * Ceph - scalable distributed file system
+ *
+ * Copyright contributors to the Ceph project
+ *
+ * This is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License version 2.1, as published by the Free Software
+ * Foundation. See file COPYING.
+ *
+ */
+
+#include "rgw_account.h"
+
+#include <algorithm>
+#include <fmt/format.h>
+
+#include "common/random_string.h"
+#include "common/utf8.h"
+
+#include "rgw_oidc_provider.h"
+#include "rgw_role.h"
+#include "rgw_sal.h"
+
+#define dout_subsys ceph_subsys_rgw
+
+namespace rgw::account {
+
+// account ids start with 'RGW' followed by 17 numeric digits
+static constexpr std::string_view id_prefix = "RGW";
+static constexpr std::size_t id_len = 20;
+
+std::string generate_id(CephContext* cct)
+{
+ // fill with random numeric digits
+ std::string id = gen_rand_numeric(cct, id_len);
+ // overwrite the prefix bytes
+ std::copy(id_prefix.begin(), id_prefix.end(), id.begin());
+ return id;
+}
+
+bool validate_id(std::string_view id, std::string* err_msg)
+{
+ if (id.size() != id_len) {
+ if (err_msg) {
+ *err_msg = fmt::format("account id must be {} bytes long", id_len);
+ }
+ return false;
+ }
+ if (id.compare(0, id_prefix.size(), id_prefix) != 0) {
+ if (err_msg) {
+ *err_msg = fmt::format("account id must start with {}", id_prefix);
+ }
+ return false;
+ }
+ auto suffix = id.substr(id_prefix.size());
+ // all remaining bytes must be digits
+ constexpr auto digit = [] (int c) { return std::isdigit(c); };
+ if (!std::all_of(suffix.begin(), suffix.end(), digit)) {
+ if (err_msg) {
+ *err_msg = "account id must end with numeric digits";
+ }
+ return false;
+ }
+ return true;
+}
+
+bool validate_name(std::string_view name, std::string* err_msg)
+{
+ if (name.empty()) {
+ if (err_msg) {
+ *err_msg = "account name must not be empty";
+ }
+ return false;
+ }
+ // must not contain the tenant delimiter $
+ if (name.find('$') != name.npos) {
+ if (err_msg) {
+ *err_msg = "account name must not contain $";
+ }
+ return false;
+ }
+ // must not contain the metadata section delimeter :
+ if (name.find(':') != name.npos) {
+ if (err_msg) {
+ *err_msg = "account name must not contain :";
+ }
+ return false;
+ }
+ // must be valid utf8
+ if (check_utf8(name.data(), name.size()) != 0) {
+ if (err_msg) {
+ *err_msg = "account name must be valid utf8";
+ }
+ return false;
+ }
+ return true;
+}
+
+
+int create(const DoutPrefixProvider* dpp,
+ rgw::sal::Driver* driver,
+ AdminOpState& op_state,
+ std::string& err_msg,
+ RGWFormatterFlusher& flusher,
+ optional_yield y)
+{
+ // validate account name if specified
+ if (!op_state.account_name.empty() &&
+ !validate_name(op_state.account_name, &err_msg)) {
+ return -EINVAL;
+ }
+
+ auto info = RGWAccountInfo{
+ .tenant = op_state.tenant,
+ .name = op_state.account_name,
+ .email = op_state.email,
+ };
+
+ if (op_state.max_users) {
+ info.max_users = *op_state.max_users;
+ }
+ if (op_state.max_roles) {
+ info.max_roles = *op_state.max_roles;
+ }
+ if (op_state.max_groups) {
+ info.max_groups = *op_state.max_groups;
+ }
+ if (op_state.max_access_keys) {
+ info.max_access_keys = *op_state.max_access_keys;
+ }
+ if (op_state.max_buckets) {
+ info.max_buckets = *op_state.max_buckets;
+ }
+
+ // account id is optional, but must be valid
+ if (op_state.account_id.empty()) {
+ info.id = generate_id(dpp->get_cct());
+ } else if (!validate_id(op_state.account_id, &err_msg)) {
+ return -EINVAL;
+ } else {
+ info.id = op_state.account_id;
+ }
+
+ constexpr RGWAccountInfo* old_info = nullptr;
+ constexpr bool exclusive = true;
+ rgw::sal::Attrs attrs;
+ RGWObjVersionTracker objv;
+ objv.generate_new_write_ver(dpp->get_cct());
+
+ int ret = driver->store_account(dpp, y, exclusive, info,
+ old_info, attrs, objv);
+ if (ret < 0) {
+ return ret;
+ }
+
+ flusher.start(0);
+ encode_json("AccountInfo", info, flusher.get_formatter());
+ flusher.flush();
+
+ return 0;
+}
+
+int modify(const DoutPrefixProvider* dpp,
+ rgw::sal::Driver* driver,
+ AdminOpState& op_state,
+ std::string& err_msg,
+ RGWFormatterFlusher& flusher,
+ optional_yield y)
+{
+ int ret = 0;
+ RGWAccountInfo info;
+ rgw::sal::Attrs attrs;
+ RGWObjVersionTracker objv;
+ if (!op_state.account_id.empty()) {
+ ret = driver->load_account_by_id(dpp, y, op_state.account_id,
+ info, attrs, objv);
+ } else if (!op_state.account_name.empty()) {
+ ret = driver->load_account_by_name(dpp, y, op_state.tenant,
+ op_state.account_name,
+ info, attrs, objv);
+ } else if (!op_state.email.empty()) {
+ ret = driver->load_account_by_email(dpp, y, op_state.email,
+ info, attrs, objv);
+ } else {
+ err_msg = "requires --account-id or --account-name or --email";
+ return -EINVAL;
+ }
+ if (ret < 0) {
+ return ret;
+ }
+ const RGWAccountInfo old_info = info;
+
+ if (!op_state.tenant.empty() && op_state.tenant != info.tenant) {
+ err_msg = "cannot modify account tenant";
+ return -EINVAL;
+ }
+
+ if (!op_state.account_name.empty()) {
+ // name must be valid
+ if (!validate_name(op_state.account_name, &err_msg)) {
+ return -EINVAL;
+ }
+ info.name = op_state.account_name;
+ }
+
+ if (!op_state.email.empty()) {
+ info.email = op_state.email;
+ }
+
+ if (op_state.max_users) {
+ info.max_users = *op_state.max_users;
+ }
+ if (op_state.max_roles) {
+ info.max_roles = *op_state.max_roles;
+ }
+ if (op_state.max_groups) {
+ info.max_groups = *op_state.max_groups;
+ }
+ if (op_state.max_access_keys) {
+ info.max_access_keys = *op_state.max_access_keys;
+ }
+ if (op_state.max_buckets) {
+ info.max_buckets = *op_state.max_buckets;
+ }
+
+ if (op_state.quota_max_size) {
+ info.quota.max_size = *op_state.quota_max_size;
+ }
+ if (op_state.quota_max_objects) {
+ info.quota.max_objects = *op_state.quota_max_objects;
+ }
+ if (op_state.quota_enabled) {
+ info.quota.enabled = *op_state.quota_enabled;
+ }
+
+ constexpr bool exclusive = false;
+
+ ret = driver->store_account(dpp, y, exclusive, info, &old_info, attrs, objv);
+ if (ret < 0) {
+ return ret;
+ }
+
+ flusher.start(0);
+ encode_json("AccountInfo", info, flusher.get_formatter());
+ flusher.flush();
+
+ return 0;
+}
+
+int remove(const DoutPrefixProvider* dpp,
+ rgw::sal::Driver* driver,
+ AdminOpState& op_state,
+ std::string& err_msg,
+ RGWFormatterFlusher& flusher,
+ optional_yield y)
+{
+ int ret = 0;
+ RGWAccountInfo info;
+ rgw::sal::Attrs attrs;
+ RGWObjVersionTracker objv;
+
+ if (!op_state.account_id.empty()) {
+ ret = driver->load_account_by_id(dpp, y, op_state.account_id,
+ info, attrs, objv);
+ } else if (!op_state.account_name.empty()) {
+ ret = driver->load_account_by_name(dpp, y, op_state.tenant,
+ op_state.account_name,
+ info, attrs, objv);
+ } else if (!op_state.email.empty()) {
+ ret = driver->load_account_by_email(dpp, y, op_state.email,
+ info, attrs, objv);
+ } else {
+ err_msg = "requires --account-id or --account-name or --email";
+ return -EINVAL;
+ }
+ if (ret < 0) {
+ return ret;
+ }
+
+ // make sure the account is empty
+ constexpr std::string_view path_prefix; // empty
+ const std::string marker; // empty
+ constexpr uint32_t max_items = 1;
+
+ rgw::sal::UserList users;
+ ret = driver->list_account_users(dpp, y, info.id, info.tenant, path_prefix,
+ marker, max_items, users);
+ if (ret < 0) {
+ return ret;
+ }
+ if (!users.users.empty()) {
+ err_msg = "The account cannot be deleted until all users are removed.";
+ return -ENOTEMPTY;
+ }
+
+ constexpr bool need_stats = false;
+ rgw::sal::BucketList buckets;
+ ret = driver->list_buckets(dpp, info.id, info.tenant, marker, marker,
+ max_items, need_stats, buckets, y);
+ if (ret < 0) {
+ return ret;
+ }
+ if (!buckets.buckets.empty()) {
+ err_msg = "The account cannot be deleted until all buckets are removed.";
+ return -ENOTEMPTY;
+ }
+
+ rgw::sal::RoleList roles;
+ ret = driver->list_account_roles(dpp, y, info.id, path_prefix,
+ marker, max_items, roles);
+ if (ret < 0) {
+ return ret;
+ }
+ if (!roles.roles.empty()) {
+ err_msg = "The account cannot be deleted until all roles are removed.";
+ return -ENOTEMPTY;
+ }
+
+ rgw::sal::GroupList groups;
+ ret = driver->list_account_groups(dpp, y, info.id, path_prefix,
+ marker, max_items, groups);
+ if (ret < 0) {
+ return ret;
+ }
+ if (!groups.groups.empty()) {
+ err_msg = "The account cannot be deleted until all groups are removed.";
+ return -ENOTEMPTY;
+ }
+
+ std::vector<RGWOIDCProviderInfo> providers;
+ ret = driver->get_oidc_providers(dpp, y, info.id, providers);
+ if (ret < 0) {
+ return ret;
+ }
+ if (!providers.empty()) {
+ err_msg = "The account cannot be deleted until all OpenIDConnectProviders are removed.";
+ return -ENOTEMPTY;
+ }
+
+ return driver->delete_account(dpp, y, info, objv);
+}
+
+int info(const DoutPrefixProvider* dpp,
+ rgw::sal::Driver* driver,
+ AdminOpState& op_state,
+ std::string& err_msg,
+ RGWFormatterFlusher& flusher,
+ optional_yield y)
+{
+ int ret = 0;
+ RGWAccountInfo info;
+ rgw::sal::Attrs attrs;
+ RGWObjVersionTracker objv;
+
+ if (!op_state.account_id.empty()) {
+ ret = driver->load_account_by_id(dpp, y, op_state.account_id,
+ info, attrs, objv);
+ } else if (!op_state.account_name.empty()) {
+ ret = driver->load_account_by_name(dpp, y, op_state.tenant,
+ op_state.account_name,
+ info, attrs, objv);
+ } else if (!op_state.email.empty()) {
+ ret = driver->load_account_by_email(dpp, y, op_state.email,
+ info, attrs, objv);
+ } else {
+ err_msg = "requires --account-id or --account-name or --email";
+ return -EINVAL;
+ }
+ if (ret < 0) {
+ return ret;
+ }
+
+ flusher.start(0);
+ encode_json("AccountInfo", info, flusher.get_formatter());
+ flusher.flush();
+
+ return 0;
+}
+
+int stats(const DoutPrefixProvider* dpp,
+ rgw::sal::Driver* driver,
+ AdminOpState& op_state,
+ bool sync_stats,
+ bool reset_stats,
+ std::string& err_msg,
+ RGWFormatterFlusher& flusher,
+ optional_yield y)
+{
+ int ret = 0;
+ RGWAccountInfo info;
+ rgw::sal::Attrs attrs; // ignored
+ RGWObjVersionTracker objv; // ignored
+
+ if (!op_state.account_id.empty()) {
+ // look up account by id
+ ret = driver->load_account_by_id(dpp, y, op_state.account_id,
+ info, attrs, objv);
+ } else if (!op_state.account_name.empty()) {
+ // look up account by tenant/name
+ ret = driver->load_account_by_name(dpp, y, op_state.tenant,
+ op_state.account_name,
+ info, attrs, objv);
+ } else {
+ err_msg = "requires account id or name";
+ return -EINVAL;
+ }
+ if (ret < 0) {
+ err_msg = "failed to load account";
+ return ret;
+ }
+
+ const rgw_owner owner = rgw_account_id{info.id};
+
+ if (sync_stats) {
+ ret = rgw_sync_all_stats(dpp, y, driver, owner, info.tenant);
+ if (ret < 0) {
+ err_msg = "failed to sync account stats";
+ return ret;
+ }
+ } else if (reset_stats) {
+ ret = driver->reset_stats(dpp, y, owner);
+ if (ret < 0) {
+ err_msg = "failed to reset account stats";
+ return ret;
+ }
+ }
+
+ RGWStorageStats stats;
+ ceph::real_time last_synced;
+ ceph::real_time last_updated;
+ ret = driver->load_stats(dpp, y, owner, stats,
+ last_synced, last_updated);
+ if (ret < 0) {
+ return ret;
+ }
+
+ flusher.start(0);
+ auto f = flusher.get_formatter();
+ f->open_object_section("AccountStats");
+ encode_json("stats", stats, f);
+ encode_json("last_synced", last_synced, f);
+ encode_json("last_updated", last_updated, f);
+ f->close_section(); // AccountStats
+ flusher.flush();
+
+ return 0;
+}
+
+int list_users(const DoutPrefixProvider* dpp, rgw::sal::Driver* driver,
+ AdminOpState& op_state, const std::string& path_prefix,
+ const std::string& marker, bool max_entries_specified,
+ int max_entries, std::string& err_msg,
+ RGWFormatterFlusher& flusher, optional_yield y)
+{
+ int ret = 0;
+ RGWAccountInfo info;
+ rgw::sal::Attrs attrs; // ignored
+ RGWObjVersionTracker objv; // ignored
+
+ if (!op_state.account_id.empty()) {
+ // look up account by id
+ ret = driver->load_account_by_id(dpp, y, op_state.account_id,
+ info, attrs, objv);
+ } else if (!op_state.account_name.empty()) {
+ // look up account by tenant/name
+ ret = driver->load_account_by_name(dpp, y, op_state.tenant,
+ op_state.account_name,
+ info, attrs, objv);
+ } else {
+ err_msg = "requires account id or name";
+ return -EINVAL;
+ }
+ if (ret < 0) {
+ err_msg = "failed to load account";
+ return ret;
+ }
+
+ rgw::sal::UserList listing;
+ listing.next_marker = marker;
+
+ Formatter* formatter = flusher.get_formatter();
+ flusher.start(0);
+
+ int32_t remaining = std::numeric_limits<int32_t>::max();
+ if (max_entries_specified) {
+ remaining = max_entries;
+ formatter->open_object_section("result");
+ }
+ formatter->open_array_section("keys");
+
+ do {
+ constexpr int32_t max_chunk = 100;
+ int32_t count = std::min(max_chunk, remaining);
+
+ ret = driver->list_account_users(dpp, y, info.id, info.tenant,
+ path_prefix, listing.next_marker,
+ count, listing);
+ if (ret == -ENOENT) {
+ ret = 0;
+ } else if (ret < 0) {
+ err_msg = "failed to list users";
+ return ret;
+ }
+
+ for (const auto& user : listing.users) {
+ encode_json("key", user.user_id, formatter);
+ }
+ flusher.flush();
+
+ remaining -= listing.users.size();
+ } while (!listing.next_marker.empty() && remaining > 0);
+
+ formatter->close_section(); // keys
+
+ if (max_entries_specified) {
+ if (!listing.next_marker.empty()) {
+ encode_json("marker", listing.next_marker, formatter);
+ }
+ formatter->close_section(); // result
+ }
+ flusher.flush();
+ return 0;
+}
+
+} // namespace rgw::account
diff --git a/src/rgw/rgw_account.h b/src/rgw/rgw_account.h
new file mode 100644
index 00000000000..f942d674b0e
--- /dev/null
+++ b/src/rgw/rgw_account.h
@@ -0,0 +1,90 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab ft=cpp
+
+/*
+ * Ceph - scalable distributed file system
+ *
+ * Copyright contributors to the Ceph project
+ *
+ * This is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License version 2.1, as published by the Free Software
+ * Foundation. See file COPYING.
+ *
+ */
+
+#pragma once
+
+#include <cstdint>
+#include <optional>
+#include <string>
+
+#include "include/common_fwd.h"
+
+#include "rgw_sal_fwd.h"
+
+class DoutPrefixProvider;
+class RGWFormatterFlusher;
+class optional_yield;
+
+namespace rgw::account {
+
+/// generate a randomized account id in a specific format
+std::string generate_id(CephContext* cct);
+
+/// validate that an account id matches the generated format
+bool validate_id(std::string_view id, std::string* err_msg = nullptr);
+
+/// check an account name for any invalid characters
+bool validate_name(std::string_view name, std::string* err_msg = nullptr);
+
+
+struct AdminOpState {
+ std::string account_id;
+ std::string tenant;
+ std::string account_name;
+ std::string email;
+ std::optional<int32_t> max_users;
+ std::optional<int32_t> max_roles;
+ std::optional<int32_t> max_groups;
+ std::optional<int32_t> max_access_keys;
+ std::optional<int32_t> max_buckets;
+ std::optional<int64_t> quota_max_size;
+ std::optional<int64_t> quota_max_objects;
+ std::optional<bool> quota_enabled;
+};
+
+/// create an account
+int create(const DoutPrefixProvider* dpp, rgw::sal::Driver* driver,
+ AdminOpState& op_state, std::string& err_msg,
+ RGWFormatterFlusher& flusher, optional_yield y);
+
+/// modify an existing account
+int modify(const DoutPrefixProvider* dpp, rgw::sal::Driver* driver,
+ AdminOpState& op_state, std::string& err_msg,
+ RGWFormatterFlusher& flusher, optional_yield y);
+
+/// remove an existing account
+int remove(const DoutPrefixProvider* dpp, rgw::sal::Driver* driver,
+ AdminOpState& op_state, std::string& err_msg,
+ RGWFormatterFlusher& flusher, optional_yield y);
+
+/// dump RGWAccountInfo
+int info(const DoutPrefixProvider* dpp, rgw::sal::Driver* driver,
+ AdminOpState& op_state, std::string& err_msg,
+ RGWFormatterFlusher& flusher, optional_yield y);
+
+/// dump account storage stats
+int stats(const DoutPrefixProvider* dpp, rgw::sal::Driver* driver,
+ AdminOpState& op_state, bool sync_stats,
+ bool reset_stats, std::string& err_msg,
+ RGWFormatterFlusher& flusher, optional_yield y);
+
+/// list account users
+int list_users(const DoutPrefixProvider* dpp, rgw::sal::Driver* driver,
+ AdminOpState& op_state, const std::string& path_prefix,
+ const std::string& marker, bool max_entries_specified,
+ int max_entries, std::string& err_msg,
+ RGWFormatterFlusher& flusher, optional_yield y);
+
+} // namespace rgw::account
diff --git a/src/rgw/rgw_acl.cc b/src/rgw/rgw_acl.cc
index 4369bedfc76..f8a2eb8e546 100644
--- a/src/rgw/rgw_acl.cc
+++ b/src/rgw/rgw_acl.cc
@@ -6,6 +6,7 @@
#include <iostream>
#include <map>
+#include "include/function2.hpp"
#include "include/types.h"
#include "common/Formatter.h"
@@ -72,7 +73,7 @@ void RGWAccessControlList::register_grant(const ACLGrant& grant)
ACLPermission perm = grant.get_permission();
if (const auto* user = grant.get_user(); user) {
- acl_user_map[user->id.to_str()] |= perm.get_permissions();
+ acl_user_map[to_string(user->id)] |= perm.get_permissions();
} else if (const auto* email = grant.get_email(); email) {
acl_user_map[email->address] |= perm.get_permissions();
} else if (const auto* group = grant.get_group(); group) {
@@ -92,7 +93,7 @@ void RGWAccessControlList::add_grant(const ACLGrant& grant)
{
std::string id;
if (const auto* user = grant.get_user(); user) {
- id = user->id.to_str();
+ id = to_string(user->id);
} else if (const auto* email = grant.get_email(); email) {
id = email->address;
} // other types share the empty key in the grant multimap
@@ -100,11 +101,11 @@ void RGWAccessControlList::add_grant(const ACLGrant& grant)
register_grant(grant);
}
-void RGWAccessControlList::remove_canon_user_grant(const rgw_user& user_id)
+void RGWAccessControlList::remove_canon_user_grant(const rgw_owner& owner)
{
- const std::string& key = user_id.to_str();
- grant_map.erase(key);
- acl_user_map.erase(key);
+ const std::string& id = to_string(owner);
+ grant_map.erase(id);
+ acl_user_map.erase(id);
}
uint32_t RGWAccessControlList::get_perm(const DoutPrefixProvider* dpp,
@@ -245,6 +246,14 @@ bool RGWAccessControlPolicy::is_public(const DoutPrefixProvider *dpp) const
}
+bool ACLOwner::empty() const
+{
+ return std::visit(fu2::overload(
+ [] (const rgw_user& uid) { return uid.empty(); },
+ [] (const rgw_account_id& aid) { return aid.empty(); }
+ ), id);
+}
+
void ACLPermission::generate_test_instances(list<ACLPermission*>& o)
{
ACLPermission *p = new ACLPermission;
@@ -346,7 +355,7 @@ void RGWAccessControlPolicy::generate_test_instances(list<RGWAccessControlPolicy
RGWAccessControlList *l = *iter;
p->acl = *l;
- p->owner.id.id = "rgw";
+ p->owner.id = rgw_user{"rgw"};
p->owner.display_name = "radosgw";
o.push_back(p);
@@ -394,14 +403,14 @@ void RGWAccessControlList::dump(Formatter *f) const
void ACLOwner::dump(Formatter *f) const
{
- encode_json("id", id.to_str(), f);
+ encode_json("id", to_string(id), f);
encode_json("display_name", display_name, f);
}
void ACLOwner::decode_json(JSONObj *obj) {
string id_str;
JSONDecoder::decode_json("id", id_str, obj);
- id.from_str(id_str);
+ id = parse_owner(id_str);
JSONDecoder::decode_json("display_name", display_name, obj);
}
diff --git a/src/rgw/rgw_acl.h b/src/rgw/rgw_acl.h
index e2887a7049b..fdc9961e8dc 100644
--- a/src/rgw/rgw_acl.h
+++ b/src/rgw/rgw_acl.h
@@ -18,7 +18,7 @@
// acl grantee types
struct ACLGranteeCanonicalUser {
- rgw_user id;
+ rgw_owner id;
std::string name;
friend auto operator<=>(const ACLGranteeCanonicalUser&,
@@ -90,7 +90,7 @@ public:
encode(type, bl);
if (const ACLGranteeCanonicalUser* user = get_user(); user) {
- encode(user->id.to_str(), bl);
+ encode(to_string(user->id), bl);
} else {
encode(std::string{}, bl); // encode empty id
}
@@ -133,7 +133,7 @@ public:
ACLGranteeCanonicalUser user;
std::string s;
decode(s, bl);
- user.id.from_str(s);
+ user.id = parse_owner(s);
std::string uri;
decode(uri, bl);
@@ -180,7 +180,7 @@ public:
static ACLGroupTypeEnum uri_to_group(std::string_view uri);
- void set_canon(const rgw_user& id, const std::string& name, uint32_t perm) {
+ void set_canon(const rgw_owner& id, const std::string& name, uint32_t perm) {
grantee = ACLGranteeCanonicalUser{id, name};
permission.set_permissions(perm);
}
@@ -330,13 +330,15 @@ public:
void dump(Formatter *f) const;
static void generate_test_instances(std::list<RGWAccessControlList*>& o);
+ bool empty() const { return grant_map.empty(); }
+
void add_grant(const ACLGrant& grant);
- void remove_canon_user_grant(const rgw_user& user_id);
+ void remove_canon_user_grant(const rgw_owner& user_id);
ACLGrantMap& get_grant_map() { return grant_map; }
const ACLGrantMap& get_grant_map() const { return grant_map; }
- void create_default(const rgw_user& id, const std::string& name) {
+ void create_default(const rgw_owner& id, const std::string& name) {
acl_user_map.clear();
acl_group_map.clear();
referer_list.clear();
@@ -352,13 +354,12 @@ public:
WRITE_CLASS_ENCODER(RGWAccessControlList)
struct ACLOwner {
- rgw_user id;
+ rgw_owner id;
std::string display_name;
void encode(bufferlist& bl) const {
ENCODE_START(3, 2, bl);
- std::string s;
- id.to_str(s);
+ const std::string s = to_string(id);
encode(s, bl);
encode(display_name, bl);
ENCODE_FINISH(bl);
@@ -367,7 +368,7 @@ struct ACLOwner {
DECODE_START_LEGACY_COMPAT_LEN(3, 2, 2, bl);
std::string s;
decode(s, bl);
- id.from_str(s);
+ id = parse_owner(s);
decode(display_name, bl);
DECODE_FINISH(bl);
}
@@ -375,6 +376,8 @@ struct ACLOwner {
void decode_json(JSONObj *obj);
static void generate_test_instances(std::list<ACLOwner*>& o);
+ bool empty() const;
+
auto operator<=>(const ACLOwner&) const = default;
};
WRITE_CLASS_ENCODER(ACLOwner)
@@ -418,11 +421,13 @@ public:
DECODE_FINISH(bl);
}
+ bool empty() const { return acl.empty() && owner.empty(); }
+
void set_owner(const ACLOwner& o) { owner = o; }
const ACLOwner& get_owner() const { return owner; }
ACLOwner& get_owner() { return owner; }
- void create_default(const rgw_user& id, const std::string& name) {
+ void create_default(const rgw_owner& id, const std::string& name) {
acl.create_default(id, name);
owner.id = id;
owner.display_name = name;
diff --git a/src/rgw/rgw_acl_s3.cc b/src/rgw/rgw_acl_s3.cc
index e45fb552aff..133a7afd2c9 100644
--- a/src/rgw/rgw_acl_s3.cc
+++ b/src/rgw/rgw_acl_s3.cc
@@ -171,8 +171,7 @@ bool ACLOwner_S3::xml_end(const char *el) {
void to_xml(const ACLOwner& o, std::ostream& out)
{
- string s;
- o.id.to_str(s);
+ const std::string s = to_string(o.id);
if (s.empty())
return;
out << "<Owner>" << "<ID>" << s << "</ID>";
@@ -299,7 +298,47 @@ struct s3_acl_header {
const char *http_header;
};
+static int read_owner_display_name(const DoutPrefixProvider* dpp,
+ optional_yield y, rgw::sal::Driver* driver,
+ const rgw_owner& owner, std::string& name)
+{
+ return std::visit(fu2::overload(
+ [&] (const rgw_user& uid) {
+ auto user = driver->get_user(uid);
+ int r = user->load_user(dpp, y);
+ if (r >= 0) {
+ name = user->get_display_name();
+ }
+ return r;
+ },
+ [&] (const rgw_account_id& account_id) {
+ RGWAccountInfo info;
+ rgw::sal::Attrs attrs;
+ RGWObjVersionTracker objv;
+ int r = driver->load_account_by_id(dpp, y, account_id, info, attrs, objv);
+ if (r >= 0) {
+ name = info.name;
+ }
+ return r;
+ }), owner);
+}
+
+static int read_aclowner_by_email(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ rgw::sal::Driver* driver,
+ std::string_view email,
+ ACLOwner& aclowner)
+{
+ int ret = driver->load_owner_by_email(dpp, y, email, aclowner.id);
+ if (ret < 0) {
+ return ret;
+ }
+ return read_owner_display_name(dpp, y, driver, aclowner.id,
+ aclowner.display_name);
+}
+
static int parse_grantee_str(const DoutPrefixProvider* dpp,
+ optional_yield y,
rgw::sal::Driver* driver,
const std::string& grantee_str,
const s3_acl_header* perm,
@@ -316,19 +355,21 @@ static int parse_grantee_str(const DoutPrefixProvider* dpp,
string id_val = rgw_trim_quotes(id_val_quoted);
if (strcasecmp(id_type.c_str(), "emailAddress") == 0) {
- std::unique_ptr<rgw::sal::User> user;
- ret = driver->get_user_by_email(dpp, id_val, null_yield, &user);
+ ACLOwner owner;
+ ret = read_aclowner_by_email(dpp, y, driver, id_val, owner);
if (ret < 0)
return ret;
- grant.set_canon(user->get_id(), user->get_display_name(), rgw_perm);
+ grant.set_canon(owner.id, owner.display_name, rgw_perm);
} else if (strcasecmp(id_type.c_str(), "id") == 0) {
- std::unique_ptr<rgw::sal::User> user = driver->get_user(rgw_user(id_val));
- ret = user->load_user(dpp, null_yield);
+ ACLOwner owner;
+ owner.id = parse_owner(id_val);
+ ret = read_owner_display_name(dpp, y, driver,
+ owner.id, owner.display_name);
if (ret < 0)
return ret;
- grant.set_canon(user->get_id(), user->get_display_name(), rgw_perm);
+ grant.set_canon(owner.id, owner.display_name, rgw_perm);
} else if (strcasecmp(id_type.c_str(), "uri") == 0) {
ACLGroupTypeEnum gid = rgw::s3::acl_uri_to_group(id_val);
if (gid == ACL_GROUP_NONE)
@@ -342,7 +383,8 @@ static int parse_grantee_str(const DoutPrefixProvider* dpp,
return 0;
}
-static int parse_acl_header(const DoutPrefixProvider* dpp, rgw::sal::Driver* driver,
+static int parse_acl_header(const DoutPrefixProvider* dpp,
+ optional_yield y, rgw::sal::Driver* driver,
const RGWEnv& env, const s3_acl_header* perm,
RGWAccessControlList& acl)
{
@@ -353,7 +395,7 @@ static int parse_acl_header(const DoutPrefixProvider* dpp, rgw::sal::Driver* dri
for (std::string_view grantee : ceph::split(hacl, ",")) {
ACLGrant grant;
- int ret = parse_grantee_str(dpp, driver, std::string{grantee}, perm, grant);
+ int ret = parse_grantee_str(dpp, y, driver, std::string{grantee}, perm, grant);
if (ret < 0)
return ret;
@@ -366,7 +408,7 @@ static int parse_acl_header(const DoutPrefixProvider* dpp, rgw::sal::Driver* dri
static int create_canned(const ACLOwner& owner, const ACLOwner& bucket_owner,
const string& canned_acl, RGWAccessControlList& acl)
{
- const rgw_user& bid = bucket_owner.id;
+ const rgw_owner& bid = bucket_owner.id;
const std::string& bname = bucket_owner.display_name;
/* owner gets full control */
@@ -454,28 +496,29 @@ static int resolve_grant(const DoutPrefixProvider* dpp, optional_yield y,
{
const uint32_t perm = xml_grant.permission->flags;
- std::unique_ptr<rgw::sal::User> user;
+ ACLOwner owner;
switch (xml_grant.type.get_type()) {
case ACL_TYPE_EMAIL_USER:
if (xml_grant.email.empty()) {
return -EINVAL;
}
- if (driver->get_user_by_email(dpp, xml_grant.email, y, &user) < 0) {
+ if (read_aclowner_by_email(dpp, y, driver, xml_grant.email, owner) < 0) {
ldpp_dout(dpp, 10) << "grant user email not found or other error" << dendl;
err_msg = "The e-mail address you provided does not match any account on record.";
return -ERR_UNRESOLVABLE_EMAIL;
}
- grant.set_canon(user->get_id(), user->get_display_name(), perm);
+ grant.set_canon(owner.id, owner.display_name, perm);
return 0;
case ACL_TYPE_CANON_USER:
- user = driver->get_user(rgw_user{xml_grant.id});
- if (user->load_user(dpp, y) < 0) {
+ owner.id = parse_owner(xml_grant.id);
+ if (read_owner_display_name(dpp, y, driver, owner.id,
+ owner.display_name) < 0) {
ldpp_dout(dpp, 10) << "grant user does not exist: " << xml_grant.id << dendl;
err_msg = "Invalid CanonicalUser id";
return -EINVAL;
}
- grant.set_canon(user->get_id(), user->get_display_name(), perm);
+ grant.set_canon(owner.id, owner.display_name, perm);
return 0;
case ACL_TYPE_GROUP:
@@ -590,21 +633,18 @@ int parse_policy(const DoutPrefixProvider* dpp, optional_yield y,
return -EINVAL;
}
+ ACLOwner& owner = policy.get_owner();
+ owner.id = parse_owner(xml_owner->id);
+
// owner must exist
- std::unique_ptr<rgw::sal::User> user =
- driver->get_user(rgw_user{xml_owner->id});
- if (user->load_user(dpp, y) < 0) {
- ldpp_dout(dpp, 10) << "acl owner does not exist" << dendl;
+ int r = read_owner_display_name(dpp, y, driver, owner.id, owner.display_name);
+ if (r < 0) {
+ ldpp_dout(dpp, 10) << "acl owner " << owner.id << " does not exist" << dendl;
err_msg = "Invalid Owner ID";
return -EINVAL;
}
-
- ACLOwner& owner = policy.get_owner();
- owner.id = xml_owner->id;
if (!xml_owner->display_name.empty()) {
owner.display_name = xml_owner->display_name;
- } else {
- owner.display_name = user->get_display_name();
}
const auto xml_acl = static_cast<ACLOwner_S3*>(
@@ -619,7 +659,7 @@ int parse_policy(const DoutPrefixProvider* dpp, optional_yield y,
ACLGrant_S3* xml_grant = static_cast<ACLGrant_S3*>(iter.get_next());
while (xml_grant) {
ACLGrant grant;
- int r = resolve_grant(dpp, y, driver, *xml_grant, grant, err_msg);
+ r = resolve_grant(dpp, y, driver, *xml_grant, grant, err_msg);
if (r < 0) {
return r;
}
@@ -641,7 +681,7 @@ int create_canned_acl(const ACLOwner& owner,
const std::string& canned_acl,
RGWAccessControlPolicy& policy)
{
- if (owner.id == rgw_user("anonymous")) {
+ if (owner.id == parse_owner("anonymous")) {
policy.set_owner(bucket_owner);
} else {
policy.set_owner(owner);
@@ -650,6 +690,7 @@ int create_canned_acl(const ACLOwner& owner,
}
int create_policy_from_headers(const DoutPrefixProvider* dpp,
+ optional_yield y,
rgw::sal::Driver* driver,
const ACLOwner& owner,
const RGWEnv& env,
@@ -659,7 +700,7 @@ int create_policy_from_headers(const DoutPrefixProvider* dpp,
auto& acl = policy.get_acl();
for (const s3_acl_header* p = acl_header_perms; p->rgw_perm; p++) {
- int r = parse_acl_header(dpp, driver, env, p, acl);
+ int r = parse_acl_header(dpp, y, driver, env, p, acl);
if (r < 0) {
return r;
}
diff --git a/src/rgw/rgw_acl_s3.h b/src/rgw/rgw_acl_s3.h
index 2341461783f..22b34c21f45 100644
--- a/src/rgw/rgw_acl_s3.h
+++ b/src/rgw/rgw_acl_s3.h
@@ -39,6 +39,7 @@ int create_canned_acl(const ACLOwner& owner,
/// Construct a policy from x-amz-grant-* request headers.
int create_policy_from_headers(const DoutPrefixProvider* dpp,
+ optional_yield y,
rgw::sal::Driver* driver,
const ACLOwner& owner,
const RGWEnv& env,
diff --git a/src/rgw/rgw_acl_swift.cc b/src/rgw/rgw_acl_swift.cc
index 260f4530d41..67e0daf5b72 100644
--- a/src/rgw/rgw_acl_swift.cc
+++ b/src/rgw/rgw_acl_swift.cc
@@ -171,14 +171,13 @@ namespace rgw::swift {
int create_container_policy(const DoutPrefixProvider *dpp,
rgw::sal::Driver* driver,
- const rgw_user& id,
- const std::string& name,
+ const ACLOwner& owner,
const char* read_list,
const char* write_list,
uint32_t& rw_mask,
RGWAccessControlPolicy& policy)
{
- policy.create_default(id, name);
+ policy.create_default(owner.id, owner.display_name);
auto& acl = policy.get_acl();
if (read_list) {
@@ -245,7 +244,7 @@ void format_container_acls(const RGWAccessControlPolicy& policy,
std::string id;
std::string url_spec;
if (const auto user = grant.get_user(); user) {
- id = user->id.to_str();
+ id = to_string(user->id);
} else if (const auto group = grant.get_group(); group) {
if (group->type == ACL_GROUP_ALL_USERS) {
id = SWIFT_GROUP_ALL_USERS;
@@ -279,12 +278,11 @@ void format_container_acls(const RGWAccessControlPolicy& policy,
int create_account_policy(const DoutPrefixProvider* dpp,
rgw::sal::Driver* driver,
- const rgw_user& id,
- const std::string& name,
+ const ACLOwner& owner,
const std::string& acl_str,
RGWAccessControlPolicy& policy)
{
- policy.create_default(id, name);
+ policy.create_default(owner.id, owner.display_name);
auto& acl = policy.get_acl();
JSONParser parser;
@@ -342,7 +340,7 @@ auto format_account_acl(const RGWAccessControlPolicy& policy)
if (owner.id == user->id) {
continue;
}
- id = user->id.to_str();
+ id = to_string(user->id);
} else if (const auto group = grant.get_group(); group) {
if (group->type != ACL_GROUP_ALL_USERS) {
continue;
diff --git a/src/rgw/rgw_acl_swift.h b/src/rgw/rgw_acl_swift.h
index a16bea894db..fe7d9032bec 100644
--- a/src/rgw/rgw_acl_swift.h
+++ b/src/rgw/rgw_acl_swift.h
@@ -7,6 +7,7 @@
#include "rgw_sal_fwd.h"
#include "rgw_user_types.h"
+struct ACLOwner;
class DoutPrefixProvider;
class RGWAccessControlPolicy;
@@ -16,8 +17,7 @@ namespace rgw::swift {
/// X-Container-Read/X-Container-Write.
int create_container_policy(const DoutPrefixProvider *dpp,
rgw::sal::Driver* driver,
- const rgw_user& id,
- const std::string& name,
+ const ACLOwner& owner,
const char* read_list,
const char* write_list,
uint32_t& rw_mask,
@@ -35,8 +35,7 @@ void format_container_acls(const RGWAccessControlPolicy& policy,
/// Create a policy based on swift account acl header X-Account-Access-Control.
int create_account_policy(const DoutPrefixProvider* dpp,
rgw::sal::Driver* driver,
- const rgw_user& id,
- const std::string& name,
+ const ACLOwner& owner,
const std::string& acl_str,
RGWAccessControlPolicy& policy);
diff --git a/src/rgw/rgw_acl_types.h b/src/rgw/rgw_acl_types.h
index 3f9f1715aba..d844567c344 100644
--- a/src/rgw/rgw_acl_types.h
+++ b/src/rgw/rgw_acl_types.h
@@ -46,28 +46,33 @@ struct RGWAccessKey {
std::string key; // SecretKey
std::string subuser;
bool active = true;
+ ceph::real_time create_date;
RGWAccessKey() {}
RGWAccessKey(std::string _id, std::string _key)
: id(std::move(_id)), key(std::move(_key)) {}
void encode(bufferlist& bl) const {
- ENCODE_START(3, 2, bl);
+ ENCODE_START(4, 2, bl);
encode(id, bl);
encode(key, bl);
encode(subuser, bl);
encode(active, bl);
+ encode(create_date, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& bl) {
- DECODE_START_LEGACY_COMPAT_LEN_32(3, 2, 2, bl);
+ DECODE_START_LEGACY_COMPAT_LEN_32(4, 2, 2, bl);
decode(id, bl);
decode(key, bl);
decode(subuser, bl);
if (struct_v >= 3) {
decode(active, bl);
}
+ if (struct_v >= 4) {
+ decode(create_date, bl);
+ }
DECODE_FINISH(bl);
}
void dump(Formatter *f) const;
diff --git a/src/rgw/rgw_admin.cc b/src/rgw/rgw_admin.cc
index 59fa82acafb..d5f06520c16 100644
--- a/src/rgw/rgw_admin.cc
+++ b/src/rgw/rgw_admin.cc
@@ -65,6 +65,7 @@ extern "C" {
#include "rgw_sal.h"
#include "rgw_sal_config.h"
#include "rgw_data_access.h"
+#include "rgw_account.h"
#include "services/svc_sync_modules.h"
#include "services/svc_cls.h"
@@ -143,6 +144,9 @@ void usage()
cout << " user check check user info\n";
cout << " user stats show user stats as accounted by quota subsystem\n";
cout << " user list list users\n";
+ cout << " user policy attach attach a managed policy\n";
+ cout << " user policy detach detach a managed policy\n";
+ cout << " user policy list attached list attached managed policies\n";
cout << " caps add add user capabilities\n";
cout << " caps rm remove user capabilities\n";
cout << " subuser create create a new subuser\n" ;
@@ -150,6 +154,12 @@ void usage()
cout << " subuser rm remove subuser\n";
cout << " key create create access key\n";
cout << " key rm remove access key\n";
+ cout << " account create create a new account\n";
+ cout << " account modify modify an existing account\n";
+ cout << " account get get account info\n";
+ cout << " account stats dump account storage stats\n";
+ cout << " account rm remove an account\n";
+ cout << " account list list all account ids\n";
cout << " bucket list list buckets (specify --allow-unordered for faster, unsorted listing)\n";
cout << " bucket limit check show bucket sharding stats\n";
cout << " bucket link link bucket to specified user\n";
@@ -187,9 +197,9 @@ void usage()
cout << " period list list all periods\n";
cout << " period update update the staging period\n";
cout << " period commit commit the staging period\n";
- cout << " quota set set quota params\n";
- cout << " quota enable enable quota\n";
- cout << " quota disable disable quota\n";
+ cout << " quota set set quota params for a user/bucket/account\n";
+ cout << " quota enable enable quota for a user/bucket/account\n";
+ cout << " quota disable disable quota for a user/bucket/account\n";
cout << " ratelimit get get ratelimit params\n";
cout << " ratelimit set set ratelimit params\n";
cout << " ratelimit enable enable ratelimit\n";
@@ -294,6 +304,9 @@ void usage()
cout << " role-policy list list policies attached to a role\n";
cout << " role-policy get get the specified inline policy document embedded with the given role\n";
cout << " role-policy delete remove policy attached to a role\n";
+ cout << " role policy attach attach a managed policy\n";
+ cout << " role policy detach detach a managed policy\n";
+ cout << " role policy list attached list attached managed policies\n";
cout << " role update update max_session_duration of a role\n";
cout << " reshard add schedule a resharding of a bucket\n";
cout << " reshard list list all bucket resharding or scheduled to be resharded\n";
@@ -330,6 +343,12 @@ void usage()
cout << " --uid=<id> user id\n";
cout << " --new-uid=<id> new user id\n";
cout << " --subuser=<name> subuser name\n";
+ cout << " --account-name=<name> account name\n";
+ cout << " --account-id=<id> account id\n";
+ cout << " --max-users max number of users for an account\n";
+ cout << " --max-roles max number of roles for an account\n";
+ cout << " --max-groups max number of groups for an account\n";
+ cout << " --max-access-keys max number of keys per user for an account\n";
cout << " --access-key=<key> S3 access key\n";
cout << " --email=<email> user's email address\n";
cout << " --secret/--secret-key=<key> specify secret key\n";
@@ -471,6 +490,8 @@ void usage()
cout << " --policy-name name of the policy document\n";
cout << " --policy-doc permission policy document\n";
cout << " --path-prefix path prefix for filtering roles\n";
+ cout << " --description Role description\n";
+ cout << " --policy-arn ARN of a managed policy\n";
cout << "\nMFA options:\n";
cout << " --totp-serial a string that represents the ID of a TOTP token\n";
cout << " --totp-seed the secret seed that is used to calculate the TOTP\n";
@@ -645,6 +666,9 @@ enum class OPT {
USER_CHECK,
USER_STATS,
USER_LIST,
+ USER_POLICY_ATTACH,
+ USER_POLICY_DETACH,
+ USER_POLICY_LIST_ATTACHED,
SUBUSER_CREATE,
SUBUSER_MODIFY,
SUBUSER_RM,
@@ -819,6 +843,9 @@ enum class OPT {
ROLE_POLICY_LIST,
ROLE_POLICY_GET,
ROLE_POLICY_DELETE,
+ ROLE_POLICY_ATTACH,
+ ROLE_POLICY_DETACH,
+ ROLE_POLICY_LIST_ATTACHED,
ROLE_UPDATE,
RESHARD_ADD,
RESHARD_LIST,
@@ -846,7 +873,13 @@ enum class OPT {
SCRIPT_PACKAGE_ADD,
SCRIPT_PACKAGE_RM,
SCRIPT_PACKAGE_LIST,
- SCRIPT_PACKAGE_RELOAD
+ SCRIPT_PACKAGE_RELOAD,
+ ACCOUNT_CREATE,
+ ACCOUNT_MODIFY,
+ ACCOUNT_GET,
+ ACCOUNT_STATS,
+ ACCOUNT_RM,
+ ACCOUNT_LIST,
};
}
@@ -864,6 +897,9 @@ static SimpleCmd::Commands all_cmds = {
{ "user check", OPT::USER_CHECK },
{ "user stats", OPT::USER_STATS },
{ "user list", OPT::USER_LIST },
+ { "user policy attach", OPT::USER_POLICY_ATTACH },
+ { "user policy detach", OPT::USER_POLICY_DETACH },
+ { "user policy list attached", OPT::USER_POLICY_LIST_ATTACHED },
{ "subuser create", OPT::SUBUSER_CREATE },
{ "subuser modify", OPT::SUBUSER_MODIFY },
{ "subuser rm", OPT::SUBUSER_RM },
@@ -1052,6 +1088,9 @@ static SimpleCmd::Commands all_cmds = {
{ "role-policy get", OPT::ROLE_POLICY_GET },
{ "role policy delete", OPT::ROLE_POLICY_DELETE },
{ "role-policy delete", OPT::ROLE_POLICY_DELETE },
+ { "role policy attach", OPT::ROLE_POLICY_ATTACH },
+ { "role policy detach", OPT::ROLE_POLICY_DETACH },
+ { "role policy list attached", OPT::ROLE_POLICY_LIST_ATTACHED },
{ "role update", OPT::ROLE_UPDATE },
{ "reshard bucket", OPT::BUCKET_RESHARD },
{ "reshard add", OPT::RESHARD_ADD },
@@ -1083,6 +1122,12 @@ static SimpleCmd::Commands all_cmds = {
{ "script-package rm", OPT::SCRIPT_PACKAGE_RM },
{ "script-package list", OPT::SCRIPT_PACKAGE_LIST },
{ "script-package reload", OPT::SCRIPT_PACKAGE_RELOAD },
+ { "account create", OPT::ACCOUNT_CREATE },
+ { "account modify", OPT::ACCOUNT_MODIFY },
+ { "account get", OPT::ACCOUNT_GET },
+ { "account stats", OPT::ACCOUNT_STATS },
+ { "account rm", OPT::ACCOUNT_RM },
+ { "account list", OPT::ACCOUNT_LIST },
};
static SimpleCmd::Aliases cmd_aliases = {
@@ -1128,7 +1173,7 @@ static void show_perm_policy(string perm_policy, Formatter* formatter)
formatter->flush(cout);
}
-static void show_policy_names(std::vector<string> policy_names, Formatter* formatter)
+static void show_policy_names(const std::vector<string>& policy_names, Formatter* formatter)
{
formatter->open_array_section("PolicyNames");
for (const auto& it : policy_names) {
@@ -1138,24 +1183,14 @@ static void show_policy_names(std::vector<string> policy_names, Formatter* forma
formatter->flush(cout);
}
-static void show_role_info(rgw::sal::RGWRole* role, Formatter* formatter)
+static void show_policy_arns(const boost::container::flat_set<std::string>& arns,
+ Formatter* formatter)
{
- formatter->open_object_section("role");
- role->dump(formatter);
- formatter->close_section();
- formatter->flush(cout);
-}
-
-static void show_roles_info(vector<std::unique_ptr<rgw::sal::RGWRole>>& roles, Formatter* formatter)
-{
- formatter->open_array_section("Roles");
- for (const auto& it : roles) {
- formatter->open_object_section("role");
- it->dump(formatter);
- formatter->close_section();
+ formatter->open_array_section("AttachedPolicies");
+ for (const auto& arn : arns) {
+ formatter->dump_string("PolicyArn", arn);
}
formatter->close_section();
- formatter->flush(cout);
}
static void show_reshard_status(
@@ -3156,8 +3191,6 @@ class SyncPolicyContext
rgw_sync_policy_info *policy{nullptr};
- std::optional<rgw_user> owner;
-
public:
SyncPolicyContext(rgw::sal::ConfigStore* cfgstore,
std::optional<rgw_bucket> _bucket)
@@ -3183,8 +3216,6 @@ public:
return ret;
}
- owner = bucket->get_info().owner;
-
if (!bucket->get_info().sync_policy) {
rgw_sync_policy_info new_policy;
bucket->get_info().set_sync_policy(std::move(new_policy));
@@ -3217,10 +3248,6 @@ public:
rgw_sync_policy_info& get_policy() {
return *policy;
}
-
- std::optional<rgw_user>& get_owner() {
- return owner;
- }
};
void resolve_zone_id_opt(std::optional<string>& zone_name, std::optional<rgw_zone_id>& zone_id)
@@ -3336,6 +3363,8 @@ int main(int argc, const char **argv)
std::unique_ptr<rgw::sal::User> user;
string tenant;
string user_ns;
+ string account_name;
+ rgw_account_id account_id;
rgw_user new_user_id;
std::string access_key, secret_key, user_email, display_name;
std::string bucket_name, pool_name, object;
@@ -3354,6 +3383,8 @@ int main(int argc, const char **argv)
std::optional<string> opt_zonegroup_name, opt_zonegroup_id;
std::string api_name;
std::string role_name, path, assume_role_doc, policy_name, perm_policy_doc, path_prefix, max_session_duration;
+ std::string description;
+ std::string policy_arn;
std::string redirect_zone;
bool redirect_zone_set = false;
list<string> endpoints;
@@ -3398,8 +3429,11 @@ int main(int argc, const char **argv)
int fix = false;
int remove_bad = false;
int check_head_obj_locator = false;
- int max_buckets = -1;
- bool max_buckets_specified = false;
+ std::optional<int> max_buckets;
+ std::optional<int> max_users;
+ std::optional<int> max_roles;
+ std::optional<int> max_groups;
+ std::optional<int> max_access_keys;
map<string, bool> categories;
string caps;
int check_objects = false;
@@ -3416,6 +3450,8 @@ int main(int argc, const char **argv)
bool admin_specified = false;
int system = false;
bool system_specified = false;
+ int account_root = false;
+ bool account_root_specified = false;
int shard_id = -1;
bool specified_shard_id = false;
string client_id;
@@ -3589,6 +3625,34 @@ int main(int argc, const char **argv)
opt_tenant = val;
} else if (ceph_argparse_witharg(args, i, &val, "--user_ns", (char*)NULL)) {
user_ns = val;
+ } else if (ceph_argparse_witharg(args, i, &val, "--account-name", (char*)NULL)) {
+ account_name = val;
+ } else if (ceph_argparse_witharg(args, i, &val, "--account-id", (char*)NULL)) {
+ account_id = val;
+ } else if (ceph_argparse_witharg(args, i, &val, "--max-users", (char*)NULL)) {
+ max_users = ceph::parse<int>(val);
+ if (!max_users) {
+ cerr << "ERROR: failed to parse --max-users" << std::endl;
+ return EINVAL;
+ }
+ } else if (ceph_argparse_witharg(args, i, &val, "--max-roles", (char*)NULL)) {
+ max_roles = ceph::parse<int>(val);
+ if (!max_roles) {
+ cerr << "ERROR: failed to parse --max-roles" << std::endl;
+ return EINVAL;
+ }
+ } else if (ceph_argparse_witharg(args, i, &val, "--max-groups", (char*)NULL)) {
+ max_groups = ceph::parse<int>(val);
+ if (!max_groups) {
+ cerr << "ERROR: failed to parse --max-groups" << std::endl;
+ return EINVAL;
+ }
+ } else if (ceph_argparse_witharg(args, i, &val, "--max-access-keys", (char*)NULL)) {
+ max_access_keys = ceph::parse<int>(val);
+ if (!max_access_keys) {
+ cerr << "ERROR: failed to parse --max-access-keys" << std::endl;
+ return EINVAL;
+ }
} else if (ceph_argparse_witharg(args, i, &val, "--access-key", (char*)NULL)) {
access_key = val;
} else if (ceph_argparse_witharg(args, i, &val, "--subuser", (char*)NULL)) {
@@ -3645,6 +3709,8 @@ int main(int argc, const char **argv)
admin_specified = true;
} else if (ceph_argparse_binary_flag(args, i, &system, NULL, "--system", (char*)NULL)) {
system_specified = true;
+ } else if (ceph_argparse_binary_flag(args, i, &account_root, NULL, "--account-root", (char*)NULL)) {
+ account_root_specified = true;
} else if (ceph_argparse_binary_flag(args, i, &verbose, NULL, "--verbose", (char*)NULL)) {
// do nothing
} else if (ceph_argparse_binary_flag(args, i, &staging, NULL, "--staging", (char*)NULL)) {
@@ -3658,12 +3724,11 @@ int main(int argc, const char **argv)
} else if (ceph_argparse_witharg(args, i, &val, "--min-rewrite-stripe-size", (char*)NULL)) {
min_rewrite_stripe_size = (uint64_t)atoll(val.c_str());
} else if (ceph_argparse_witharg(args, i, &val, "--max-buckets", (char*)NULL)) {
- max_buckets = (int)strict_strtol(val.c_str(), 10, &err);
- if (!err.empty()) {
- cerr << "ERROR: failed to parse max buckets: " << err << std::endl;
+ max_buckets = ceph::parse<int>(val);
+ if (!max_buckets) {
+ cerr << "ERROR: failed to parse max buckets" << std::endl;
return EINVAL;
}
- max_buckets_specified = true;
} else if (ceph_argparse_witharg(args, i, &val, "--max-entries", (char*)NULL)) {
max_entries = (int)strict_strtol(val.c_str(), 10, &err);
max_entries_specified = true;
@@ -3973,8 +4038,12 @@ int main(int argc, const char **argv)
perm_policy_doc = val;
} else if (ceph_argparse_witharg(args, i, &val, "--path-prefix", (char*)NULL)) {
path_prefix = val;
+ } else if (ceph_argparse_witharg(args, i, &val, "--policy-arn", (char*)NULL)) {
+ policy_arn = val;
} else if (ceph_argparse_witharg(args, i, &val, "--max-session-duration", (char*)NULL)) {
max_session_duration = val;
+ } else if (ceph_argparse_witharg(args, i, &val, "--description", (char*)NULL)) {
+ description = val;
} else if (ceph_argparse_witharg(args, i, &val, "--totp-serial", (char*)NULL)) {
totp_serial = val;
} else if (ceph_argparse_witharg(args, i, &val, "--totp-pin", (char*)NULL)) {
@@ -4186,6 +4255,11 @@ int main(int argc, const char **argv)
std::set<OPT> readonly_ops_list = {
OPT::USER_INFO,
OPT::USER_STATS,
+ OPT::USER_LIST,
+ OPT::USER_POLICY_LIST_ATTACHED,
+ OPT::ACCOUNT_GET,
+ OPT::ACCOUNT_STATS,
+ OPT::ACCOUNT_LIST,
OPT::BUCKETS_LIST,
OPT::BUCKET_LIMIT_CHECK,
OPT::BUCKET_LAYOUT,
@@ -4244,6 +4318,7 @@ int main(int argc, const char **argv)
OPT::ROLE_LIST,
OPT::ROLE_POLICY_LIST,
OPT::ROLE_POLICY_GET,
+ OPT::ROLE_POLICY_LIST_ATTACHED,
OPT::RESHARD_LIST,
OPT::RESHARD_STATUS,
OPT::PUBSUB_TOPIC_LIST,
@@ -4337,6 +4412,9 @@ int main(int argc, const char **argv)
&& opt_cmd != OPT::ROLE_POLICY_LIST
&& opt_cmd != OPT::ROLE_POLICY_GET
&& opt_cmd != OPT::ROLE_POLICY_DELETE
+ && opt_cmd != OPT::ROLE_POLICY_ATTACH
+ && opt_cmd != OPT::ROLE_POLICY_DETACH
+ && opt_cmd != OPT::ROLE_POLICY_LIST_ATTACHED
&& opt_cmd != OPT::ROLE_UPDATE
&& opt_cmd != OPT::RESHARD_ADD
&& opt_cmd != OPT::RESHARD_CANCEL
@@ -4350,7 +4428,13 @@ int main(int argc, const char **argv)
&& opt_cmd != OPT::PUBSUB_TOPIC_STATS
&& opt_cmd != OPT::SCRIPT_PUT
&& opt_cmd != OPT::SCRIPT_GET
- && opt_cmd != OPT::SCRIPT_RM) {
+ && opt_cmd != OPT::SCRIPT_RM
+ && opt_cmd != OPT::ACCOUNT_CREATE
+ && opt_cmd != OPT::ACCOUNT_MODIFY
+ && opt_cmd != OPT::ACCOUNT_GET
+ && opt_cmd != OPT::ACCOUNT_STATS
+ && opt_cmd != OPT::ACCOUNT_RM
+ && opt_cmd != OPT::ACCOUNT_LIST) {
cerr << "ERROR: --tenant is set, but there's no user ID" << std::endl;
return EINVAL;
}
@@ -6369,7 +6453,9 @@ int main(int argc, const char **argv)
resolve_zone_ids_opt(opt_dest_zone_names, opt_dest_zone_ids);
bool non_master_cmd = (!driver->is_meta_master() && !yes_i_really_mean_it);
- std::set<OPT> non_master_ops_list = {OPT::USER_CREATE, OPT::USER_RM,
+ std::set<OPT> non_master_ops_list = {OPT::ACCOUNT_CREATE,
+ OPT::ACCOUNT_MODIFY, OPT::ACCOUNT_RM,
+ OPT::USER_CREATE, OPT::USER_RM,
OPT::USER_MODIFY, OPT::USER_ENABLE,
OPT::USER_SUSPEND, OPT::SUBUSER_CREATE,
OPT::SUBUSER_MODIFY, OPT::SUBUSER_RM,
@@ -6380,7 +6466,9 @@ int main(int argc, const char **argv)
OPT::MFA_REMOVE, OPT::MFA_RESYNC,
OPT::CAPS_ADD, OPT::CAPS_RM,
OPT::ROLE_CREATE, OPT::ROLE_DELETE,
- OPT::ROLE_POLICY_PUT, OPT::ROLE_POLICY_DELETE};
+ OPT::ROLE_POLICY_PUT, OPT::ROLE_POLICY_DELETE,
+ OPT::ROLE_POLICY_ATTACH, OPT::ROLE_POLICY_DETACH,
+ OPT::USER_POLICY_ATTACH, OPT::USER_POLICY_DETACH};
bool print_warning_message = (non_master_ops_list.find(opt_cmd) != non_master_ops_list.end() &&
non_master_cmd);
@@ -6429,8 +6517,8 @@ int main(int argc, const char **argv)
if (gen_secret_key)
user_op.set_gen_secret(); // assume that a key pair should be created
- if (max_buckets_specified)
- user_op.set_max_buckets(max_buckets);
+ if (max_buckets)
+ user_op.set_max_buckets(*max_buckets);
if (admin_specified)
user_op.set_admin(admin);
@@ -6438,6 +6526,9 @@ int main(int argc, const char **argv)
if (system_specified)
user_op.set_system(system);
+ if (account_root_specified)
+ user_op.set_account_root(account_root);
+
if (set_perm)
user_op.set_perm(perm_mask);
@@ -6486,6 +6577,10 @@ int main(int argc, const char **argv)
if (!tags.empty()) {
user_op.set_placement_tags(tags);
}
+ user_op.path = path;
+
+ user_op.account_id = account_id;
+ bucket_op.account_id = account_id;
// RGWUser to use for user operations
RGWUser ruser;
@@ -6725,23 +6820,23 @@ int main(int argc, const char **argv)
cerr << "ERROR: assume role policy document is empty" << std::endl;
return -EINVAL;
}
- bufferlist bl = bufferlist::static_from_string(assume_role_doc);
try {
const rgw::IAM::Policy p(
- g_ceph_context, tenant, bl,
+ g_ceph_context, nullptr, assume_role_doc,
g_ceph_context->_conf.get_val<bool>(
"rgw_policy_reject_invalid_principals"));
} catch (rgw::IAM::PolicyParseException& e) {
cerr << "failed to parse policy: " << e.what() << std::endl;
return -EINVAL;
}
- std::unique_ptr<rgw::sal::RGWRole> role = driver->get_role(role_name, tenant, path,
- assume_role_doc, max_session_duration);
+ std::unique_ptr<rgw::sal::RGWRole> role = driver->get_role(role_name, tenant, account_id, path,
+ assume_role_doc, description, max_session_duration);
ret = role->create(dpp(), true, "", null_yield);
if (ret < 0) {
return -ret;
}
- show_role_info(role.get(), formatter.get());
+ encode_json("role", role->get_info(), formatter.get());
+ formatter->flush(cout);
return 0;
}
case OPT::ROLE_DELETE:
@@ -6750,7 +6845,7 @@ int main(int argc, const char **argv)
cerr << "ERROR: empty role name" << std::endl;
return -EINVAL;
}
- std::unique_ptr<rgw::sal::RGWRole> role = driver->get_role(role_name, tenant);
+ std::unique_ptr<rgw::sal::RGWRole> role = driver->get_role(role_name, tenant, account_id);
ret = role->delete_obj(dpp(), null_yield);
if (ret < 0) {
return -ret;
@@ -6764,12 +6859,13 @@ int main(int argc, const char **argv)
cerr << "ERROR: empty role name" << std::endl;
return -EINVAL;
}
- std::unique_ptr<rgw::sal::RGWRole> role = driver->get_role(role_name, tenant);
+ std::unique_ptr<rgw::sal::RGWRole> role = driver->get_role(role_name, tenant, account_id);
ret = role->get(dpp(), null_yield);
if (ret < 0) {
return -ret;
}
- show_role_info(role.get(), formatter.get());
+ encode_json("role", role->get_info(), formatter.get());
+ formatter->flush(cout);
return 0;
}
case OPT::ROLE_TRUST_POLICY_MODIFY:
@@ -6784,9 +6880,8 @@ int main(int argc, const char **argv)
return -EINVAL;
}
- bufferlist bl = bufferlist::static_from_string(assume_role_doc);
try {
- const rgw::IAM::Policy p(g_ceph_context, tenant, bl,
+ const rgw::IAM::Policy p(g_ceph_context, nullptr, assume_role_doc,
g_ceph_context->_conf.get_val<bool>(
"rgw_policy_reject_invalid_principals"));
} catch (rgw::IAM::PolicyParseException& e) {
@@ -6794,7 +6889,7 @@ int main(int argc, const char **argv)
return -EINVAL;
}
- std::unique_ptr<rgw::sal::RGWRole> role = driver->get_role(role_name, tenant);
+ std::unique_ptr<rgw::sal::RGWRole> role = driver->get_role(role_name, tenant, account_id);
ret = role->get(dpp(), null_yield);
if (ret < 0) {
return -ret;
@@ -6809,12 +6904,49 @@ int main(int argc, const char **argv)
}
case OPT::ROLE_LIST:
{
- vector<std::unique_ptr<rgw::sal::RGWRole>> result;
- ret = driver->get_roles(dpp(), null_yield, path_prefix, tenant, result);
- if (ret < 0) {
- return -ret;
+ rgw::sal::RoleList listing;
+ listing.next_marker = marker;
+
+ int32_t remaining = std::numeric_limits<int32_t>::max();
+ if (max_entries_specified) {
+ remaining = max_entries;
+ formatter->open_object_section("result");
+ }
+ formatter->open_array_section("Roles");
+
+ do {
+ constexpr int32_t max_chunk = 100;
+ int32_t count = std::min(max_chunk, remaining);
+
+ if (!account_id.empty()) {
+ // list roles in the account
+ ret = driver->list_account_roles(dpp(), null_yield, account_id,
+ path_prefix, listing.next_marker,
+ count, listing);
+ } else {
+ // list roles in the tenant
+ ret = driver->list_roles(dpp(), null_yield, tenant, path_prefix,
+ listing.next_marker, count, listing);
+ }
+ if (ret < 0) {
+ return -ret;
+ }
+ for (const auto& info : listing.roles) {
+ encode_json("member", info, formatter.get());
+ }
+ formatter->flush(cout);
+ remaining -= listing.roles.size();
+ } while (!listing.next_marker.empty() && remaining > 0);
+
+ formatter->close_section(); // Roles
+
+ if (max_entries_specified) {
+ if (!listing.next_marker.empty()) {
+ encode_json("next-marker", listing.next_marker, formatter.get());
+ }
+ formatter->close_section(); // result
}
- show_roles_info(result, formatter.get());
+ formatter->flush(cout);
return 0;
}
case OPT::ROLE_POLICY_PUT:
@@ -6834,19 +6966,17 @@ int main(int argc, const char **argv)
return -EINVAL;
}
- bufferlist bl;
if (!infile.empty()) {
+ bufferlist bl;
int ret = read_input(infile, bl);
if (ret < 0) {
cerr << "ERROR: failed to read input policy document: " << cpp_strerror(-ret) << std::endl;
return -ret;
}
perm_policy_doc = bl.to_str();
- } else {
- bl = bufferlist::static_from_string(perm_policy_doc);
}
try {
- const rgw::IAM::Policy p(g_ceph_context, tenant, bl,
+ const rgw::IAM::Policy p(g_ceph_context, nullptr, perm_policy_doc,
g_ceph_context->_conf.get_val<bool>(
"rgw_policy_reject_invalid_principals"));
} catch (rgw::IAM::PolicyParseException& e) {
@@ -6854,7 +6984,7 @@ int main(int argc, const char **argv)
return -EINVAL;
}
- std::unique_ptr<rgw::sal::RGWRole> role = driver->get_role(role_name, tenant);
+ std::unique_ptr<rgw::sal::RGWRole> role = driver->get_role(role_name, tenant, account_id);
ret = role->get(dpp(), null_yield);
if (ret < 0) {
return -ret;
@@ -6873,7 +7003,7 @@ int main(int argc, const char **argv)
cerr << "ERROR: Role name is empty" << std::endl;
return -EINVAL;
}
- std::unique_ptr<rgw::sal::RGWRole> role = driver->get_role(role_name, tenant);
+ std::unique_ptr<rgw::sal::RGWRole> role = driver->get_role(role_name, tenant, account_id);
ret = role->get(dpp(), null_yield);
if (ret < 0) {
return -ret;
@@ -6893,7 +7023,7 @@ int main(int argc, const char **argv)
cerr << "ERROR: policy name is empty" << std::endl;
return -EINVAL;
}
- std::unique_ptr<rgw::sal::RGWRole> role = driver->get_role(role_name, tenant);
+ std::unique_ptr<rgw::sal::RGWRole> role = driver->get_role(role_name, tenant, account_id);
int ret = role->get(dpp(), null_yield);
if (ret < 0) {
return -ret;
@@ -6917,7 +7047,7 @@ int main(int argc, const char **argv)
cerr << "ERROR: policy name is empty" << std::endl;
return -EINVAL;
}
- std::unique_ptr<rgw::sal::RGWRole> role = driver->get_role(role_name, tenant);
+ std::unique_ptr<rgw::sal::RGWRole> role = driver->get_role(role_name, tenant, account_id);
ret = role->get(dpp(), null_yield);
if (ret < 0) {
return -ret;
@@ -6933,7 +7063,97 @@ int main(int argc, const char **argv)
cout << "Policy: " << policy_name << " successfully deleted for role: "
<< role_name << std::endl;
return 0;
- }
+ }
+ case OPT::ROLE_POLICY_ATTACH:
+ {
+ if (role_name.empty()) {
+ cerr << "role name is empty" << std::endl;
+ return EINVAL;
+ }
+ if (policy_arn.empty()) {
+ cerr << "policy arn is empty" << std::endl;
+ return EINVAL;
+ }
+ try {
+ if (!rgw::IAM::get_managed_policy(g_ceph_context, policy_arn)) {
+ cerr << "unrecognized policy arn " << policy_arn << std::endl;
+ return ENOENT;
+ }
+ } catch (rgw::IAM::PolicyParseException& e) {
+ cerr << "failed to parse managed policy: " << e.what() << std::endl;
+ return EINVAL;
+ }
+
+ std::unique_ptr<rgw::sal::RGWRole> role = driver->get_role(role_name, tenant, account_id);
+ ret = role->get(dpp(), null_yield);
+ if (ret < 0) {
+ return -ret;
+ }
+ if (role->get_info().account_id.empty()) {
+ std::cerr << "Managed policies are only supported for account roles" << std::endl;
+ return EINVAL;
+ }
+
+ auto &policies = role->get_info().managed_policies;
+ const bool inserted = policies.arns.insert(policy_arn).second;
+ if (!inserted) {
+ cout << "That managed policy is already attached." << std::endl;
+ return EEXIST;
+ }
+ ret = role->update(dpp(), null_yield);
+ if (ret < 0) {
+ return -ret;
+ }
+ cout << "Managed policy attached successfully" << std::endl;
+ return 0;
+ }
+ case OPT::ROLE_POLICY_DETACH:
+ {
+ if (role_name.empty()) {
+ cerr << "role name is empty" << std::endl;
+ return EINVAL;
+ }
+ if (policy_arn.empty()) {
+ cerr << "policy arn is empty" << std::endl;
+ return EINVAL;
+ }
+
+ std::unique_ptr<rgw::sal::RGWRole> role = driver->get_role(role_name, tenant, account_id);
+ ret = role->get(dpp(), null_yield);
+ if (ret < 0) {
+ return -ret;
+ }
+ // insert the policy arn. if it's already there, just return success
+ auto &policies = role->get_info().managed_policies;
+ auto i = policies.arns.find(policy_arn);
+ if (i == policies.arns.end()) {
+ cout << "That managed policy is not attached." << std::endl;
+ return ENOENT;
+ }
+ policies.arns.erase(i);
+
+ ret = role->update(dpp(), null_yield);
+ if (ret < 0) {
+ return -ret;
+ }
+ cout << "Managed policy detached successfully" << std::endl;
+ return 0;
+ }
+ case OPT::ROLE_POLICY_LIST_ATTACHED:
+ {
+ if (role_name.empty()) {
+ cerr << "ERROR: Role name is empty" << std::endl;
+ return EINVAL;
+ }
+ std::unique_ptr<rgw::sal::RGWRole> role = driver->get_role(role_name, tenant, account_id);
+ ret = role->get(dpp(), null_yield);
+ if (ret < 0) {
+ return -ret;
+ }
+ show_policy_arns(role->get_info().managed_policies.arns, formatter.get());
+ formatter->flush(cout);
+ return 0;
+ }
case OPT::ROLE_UPDATE:
{
if (role_name.empty()) {
@@ -6941,7 +7161,7 @@ int main(int argc, const char **argv)
return -EINVAL;
}
- std::unique_ptr<rgw::sal::RGWRole> role = driver->get_role(role_name, tenant);
+ std::unique_ptr<rgw::sal::RGWRole> role = driver->get_role(role_name, tenant, account_id);
ret = role->get(dpp(), null_yield);
if (ret < 0) {
return -ret;
@@ -7375,7 +7595,7 @@ int main(int argc, const char **argv)
return -r;
}
formatter->dump_string("bucket_id", entry.bucket_id);
- formatter->dump_string("bucket_owner", entry.bucket_owner.to_str());
+ formatter->dump_string("bucket_owner", to_string(entry.bucket_owner));
formatter->dump_string("bucket", entry.bucket);
uint64_t agg_time = 0;
@@ -8790,7 +9010,8 @@ next:
}
if (opt_cmd == OPT::USER_CHECK) {
- check_bad_user_bucket_mapping(driver, *user.get(), fix, null_yield, dpp());
+ check_bad_owner_bucket_mapping(driver, user->get_id(), user->get_tenant(),
+ fix, null_yield, dpp());
}
if (opt_cmd == OPT::USER_STATS) {
@@ -8809,7 +9030,7 @@ next:
"so at most one of the two should be specified" << std::endl;
return EINVAL;
}
- ret = static_cast<rgw::sal::RadosStore*>(driver)->svc()->user->reset_bucket_stats(dpp(), user->get_id(), null_yield);
+ ret = driver->reset_stats(dpp(), null_yield, user->get_id());
if (ret < 0) {
cerr << "ERROR: could not reset user stats: " << cpp_strerror(-ret) <<
std::endl;
@@ -8824,14 +9045,15 @@ next:
cerr << "ERROR: could not init bucket: " << cpp_strerror(-ret) << std::endl;
return -ret;
}
- ret = bucket->sync_user_stats(dpp(), null_yield, nullptr);
+ ret = bucket->sync_owner_stats(dpp(), null_yield, nullptr);
if (ret < 0) {
cerr << "ERROR: could not sync bucket stats: " <<
cpp_strerror(-ret) << std::endl;
return -ret;
}
} else {
- int ret = rgw_user_sync_all_stats(dpp(), driver, user.get(), null_yield);
+ int ret = rgw_sync_all_stats(dpp(), null_yield, driver,
+ user->get_id(), user->get_tenant());
if (ret < 0) {
cerr << "ERROR: could not sync user stats: " <<
cpp_strerror(-ret) << std::endl;
@@ -8840,11 +9062,25 @@ next:
}
}
+ int ret = user->load_user(dpp(), null_yield);
+ if (ret < 0) {
+ cerr << "User has not been initialized or user does not exist" << std::endl;
+ return -ret;
+ }
+
+ const RGWUserInfo& info = user->get_info();
+ rgw_owner owner = info.user_id;
+ if (!info.account_id.empty()) {
+ cerr << "Reading stats for user account " << info.account_id << std::endl;
+ owner = info.account_id;
+ }
+
constexpr bool omit_utilized_stats = false;
RGWStorageStats stats(omit_utilized_stats);
ceph::real_time last_stats_sync;
ceph::real_time last_stats_update;
- int ret = user->read_stats(dpp(), null_yield, &stats, &last_stats_sync, &last_stats_update);
+ ret = driver->load_stats(dpp(), null_yield, owner, stats,
+ last_stats_sync, last_stats_update);
if (ret < 0) {
if (ret == -ENOENT) { /* in case of ENOENT */
cerr << "User has not been initialized or user does not exist" << std::endl;
@@ -8866,6 +9102,115 @@ next:
formatter->flush(cout);
}
+ if (opt_cmd == OPT::USER_POLICY_ATTACH) {
+ if (rgw::sal::User::empty(user)) {
+ cerr << "ERROR: uid not specified" << std::endl;
+ return EINVAL;
+ }
+ if (policy_arn.empty()) {
+ cerr << "policy arn is empty" << std::endl;
+ return EINVAL;
+ }
+ ret = user->load_user(dpp(), null_yield);
+ if (ret < 0) {
+ return -ret;
+ }
+ if (user->get_info().account_id.empty()) {
+ std::cerr << "Managed policies are only supported for account users" << std::endl;
+ return EINVAL;
+ }
+
+ try {
+ if (!rgw::IAM::get_managed_policy(g_ceph_context, policy_arn)) {
+ cerr << "unrecognized policy arn " << policy_arn << std::endl;
+ return ENOENT;
+ }
+ } catch (rgw::IAM::PolicyParseException& e) {
+ cerr << "failed to parse managed policy: " << e.what() << std::endl;
+ return EINVAL;
+ }
+
+ rgw::IAM::ManagedPolicies policies;
+ auto& attrs = user->get_attrs();
+ if (auto it = attrs.find(RGW_ATTR_MANAGED_POLICY); it != attrs.end()) {
+ decode(policies, it->second);
+ }
+ const bool inserted = policies.arns.insert(policy_arn).second;
+ if (!inserted) {
+ cout << "That managed policy is already attached." << std::endl;
+ return EEXIST;
+ }
+
+ bufferlist in_bl;
+ encode(policies, in_bl);
+ attrs[RGW_ATTR_MANAGED_POLICY] = in_bl;
+
+ ret = user->store_user(dpp(), null_yield, false);
+ if (ret < 0) {
+ return -ret;
+ }
+ cout << "Managed policy attached successfully" << std::endl;
+ return 0;
+ }
+ if (opt_cmd == OPT::USER_POLICY_DETACH) {
+ if (rgw::sal::User::empty(user)) {
+ cerr << "ERROR: uid not specified" << std::endl;
+ return EINVAL;
+ }
+ if (policy_arn.empty()) {
+ cerr << "policy arn is empty" << std::endl;
+ return EINVAL;
+ }
+ ret = user->load_user(dpp(), null_yield);
+ if (ret < 0) {
+ return -ret;
+ }
+
+ rgw::IAM::ManagedPolicies policies;
+ auto& attrs = user->get_attrs();
+ if (auto it = attrs.find(RGW_ATTR_MANAGED_POLICY); it != attrs.end()) {
+ decode(policies, it->second);
+ }
+
+ auto i = policies.arns.find(policy_arn);
+ if (i == policies.arns.end()) {
+ cout << "That managed policy is not attached." << std::endl;
+ return ENOENT;
+ }
+ policies.arns.erase(i);
+
+ bufferlist in_bl;
+ encode(policies, in_bl);
+ attrs[RGW_ATTR_MANAGED_POLICY] = in_bl;
+
+ ret = user->store_user(dpp(), null_yield, false);
+ if (ret < 0) {
+ return -ret;
+ }
+ cout << "Managed policy detached successfully" << std::endl;
+ return 0;
+ }
+ if (opt_cmd == OPT::USER_POLICY_LIST_ATTACHED) {
+ if (rgw::sal::User::empty(user)) {
+ cerr << "ERROR: uid not specified" << std::endl;
+ return -EINVAL;
+ }
+ ret = user->load_user(dpp(), null_yield);
+ if (ret < 0) {
+ return -ret;
+ }
+
+ rgw::IAM::ManagedPolicies policies;
+ auto& attrs = user->get_attrs();
+ if (auto it = attrs.find(RGW_ATTR_MANAGED_POLICY); it != attrs.end()) {
+ decode(policies, it->second);
+ }
+
+ show_policy_arns(policies.arns, formatter.get());
+ formatter->flush(cout);
+ return 0;
+ }
+
if (opt_cmd == OPT::METADATA_GET) {
int ret = static_cast<rgw::sal::RadosStore*>(driver)->ctl()->meta.mgr->get(metadata_key, formatter.get(), null_yield, dpp());
if (ret < 0) {
@@ -8898,9 +9243,32 @@ next:
}
}
- if (opt_cmd == OPT::METADATA_LIST || opt_cmd == OPT::USER_LIST) {
+ if (opt_cmd == OPT::METADATA_LIST ||
+ opt_cmd == OPT::USER_LIST ||
+ opt_cmd == OPT::ACCOUNT_LIST) {
if (opt_cmd == OPT::USER_LIST) {
metadata_key = "user";
+
+ if (!account_id.empty() || !account_name.empty()) {
+ // list users by account
+ rgw::account::AdminOpState op_state;
+ op_state.account_id = account_id;
+ op_state.tenant = tenant;
+ op_state.account_name = account_name;
+
+ std::string err_msg;
+ int ret = rgw::account::list_users(
+ dpp(), driver, op_state, path_prefix, marker,
+ max_entries_specified, max_entries, err_msg,
+ stream_flusher, null_yield);
+ if (ret < 0) {
+ cerr << "ERROR: " << err_msg << std::endl;
+ return -ret;
+ }
+ return 0;
+ }
+ } else if (opt_cmd == OPT::ACCOUNT_LIST) {
+ metadata_key = "account";
}
void *handle;
int max = 1000;
@@ -9939,11 +10307,9 @@ next:
if (!rgw::sal::User::empty(user)) {
pipe->params.user = user->get_id();
- } else if (pipe->params.user.empty()) {
- auto owner = sync_policy_ctx.get_owner();
- if (owner) {
- pipe->params.user = *owner;
- }
+ } else if (pipe->params.mode == rgw_sync_pipe_params::MODE_USER) {
+ cerr << "ERROR: missing --uid for --mode=user" << std::endl;
+ return EINVAL;
}
ret = sync_policy_ctx.write_policy();
@@ -10284,11 +10650,6 @@ next:
bool quota_op = (opt_cmd == OPT::QUOTA_SET || opt_cmd == OPT::QUOTA_ENABLE || opt_cmd == OPT::QUOTA_DISABLE);
if (quota_op) {
- if (bucket_name.empty() && rgw::sal::User::empty(user)) {
- cerr << "ERROR: bucket name or uid is required for quota operation" << std::endl;
- return EINVAL;
- }
-
if (!bucket_name.empty()) {
if (!quota_scope.empty() && quota_scope != "bucket") {
cerr << "ERROR: invalid quota scope specification." << std::endl;
@@ -10305,6 +10666,36 @@ next:
cerr << "ERROR: invalid quota scope specification. Please specify either --quota-scope=bucket, or --quota-scope=user" << std::endl;
return EINVAL;
}
+ } else if (!account_id.empty() || !account_name.empty()) {
+ // set account quota
+ rgw::account::AdminOpState op_state;
+ op_state.account_id = account_id;
+ op_state.tenant = tenant;
+ op_state.account_name = account_name;
+
+ if (opt_cmd == OPT::QUOTA_ENABLE) {
+ op_state.quota_enabled = true;
+ } else if (opt_cmd == OPT::QUOTA_DISABLE) {
+ op_state.quota_enabled = false;
+ }
+ if (have_max_objects) {
+ op_state.quota_max_objects = std::max<int64_t>(-1, max_objects);
+ }
+ if (have_max_size) {
+ op_state.quota_max_size = std::max<int64_t>(-1, rgw_rounded_kb(max_size) * 1024);
+ }
+
+ std::string err_msg;
+ ret = rgw::account::modify(dpp(), driver, op_state, err_msg,
+ stream_flusher, null_yield);
+ if (ret < 0) {
+ cerr << "ERROR: failed to set account quota with "
+ << cpp_strerror(-ret) << ": " << err_msg << std::endl;
+ return -ret;
+ }
+ } else {
+ cerr << "ERROR: bucket name or uid or account is required for quota operation" << std::endl;
+ return EINVAL;
}
}
@@ -10649,7 +11040,8 @@ next:
return -ret;
}
} else {
- RGWPubSub ps(driver, tenant, *site);
+ const std::string& account = !account_id.empty() ? account_id : tenant;
+ RGWPubSub ps(driver, account, *site);
const RGWPubSub::Bucket b(ps, bucket.get());
ret = b.get_topics(dpp(), result, null_yield);
if (ret < 0 && ret != -ENOENT) {
@@ -10662,9 +11054,17 @@ next:
}
if (opt_cmd == OPT::PUBSUB_TOPIC_LIST) {
- RGWPubSub ps(driver, tenant, *site);
+ const std::string& account = !account_id.empty() ? account_id : tenant;
+ RGWPubSub ps(driver, account, *site);
std::string next_token = marker;
+ std::optional<rgw_owner> owner;
+ if (!rgw::sal::User::empty(user)) {
+ owner = user->get_id();
+ } else if (!account_id.empty()) {
+ owner = rgw_account_id{account_id};
+ }
+
formatter->open_object_section("result");
formatter->open_array_section("topics");
do {
@@ -10676,7 +11076,7 @@ next:
return -ret;
}
for (const auto& [_, topic] : result.topics) {
- if (!rgw::sal::User::empty(user) && user->get_id() != topic.user) {
+ if (owner && *owner != topic.owner) {
continue;
}
std::set<std::string> subscribed_buckets;
@@ -10713,7 +11113,8 @@ next:
cerr << "ERROR: topic name was not provided (via --topic)" << std::endl;
return EINVAL;
}
- RGWPubSub ps(driver, tenant, *site);
+ const std::string& account = !account_id.empty() ? account_id : tenant;
+ RGWPubSub ps(driver, account, *site);
rgw_pubsub_topic topic;
std::set<std::string> subscribed_buckets;
@@ -10757,7 +11158,8 @@ next:
return -ret;
}
} else {
- RGWPubSub ps(driver, tenant, *site);
+ const std::string& account = !account_id.empty() ? account_id : tenant;
+ RGWPubSub ps(driver, account, *site);
const RGWPubSub::Bucket b(ps, bucket.get());
ret = b.get_topics(dpp(), bucket_topics, null_yield);
if (ret < 0 && ret != -ENOENT) {
@@ -10784,20 +11186,14 @@ next:
return -EINVAL;
}
- RGWPubSub ps(driver, tenant, *site);
+ const std::string& account = !account_id.empty() ? account_id : tenant;
+ RGWPubSub ps(driver, account, *site);
ret = ps.remove_topic(dpp(), topic_name, null_yield);
if (ret < 0) {
cerr << "ERROR: could not remove topic: " << cpp_strerror(-ret) << std::endl;
return -ret;
}
-
- ret = rgw::notify::remove_persistent_topic(
- dpp(), static_cast<rgw::sal::RadosStore*>(driver)->getRados()->get_notif_pool_ctx(), topic_name, null_yield);
- if (ret < 0 && ret != -ENOENT) {
- cerr << "ERROR: could not remove persistent topic: " << cpp_strerror(-ret) << std::endl;
- return -ret;
- }
}
if (opt_cmd == OPT::PUBSUB_NOTIFICATION_RM) {
@@ -10824,7 +11220,8 @@ next:
ret = remove_notification_v2(dpp(), driver, bucket.get(), notification_id,
null_yield);
} else {
- RGWPubSub ps(driver, tenant, *site);
+ const std::string& account = !account_id.empty() ? account_id : tenant;
+ RGWPubSub ps(driver, account, *site);
rgw_pubsub_bucket_topics bucket_topics;
const RGWPubSub::Bucket b(ps, bucket.get());
@@ -10848,11 +11245,25 @@ next:
cerr << "ERROR: topic name was not provided (via --topic)" << std::endl;
return EINVAL;
}
+ const std::string& account = !account_id.empty() ? account_id : tenant;
+ RGWPubSub ps(driver, account, *site);
+
+ rgw_pubsub_topic topic;
+ ret = ps.get_topic(dpp(), topic_name, topic, null_yield, nullptr);
+ if (ret < 0) {
+ cerr << "ERROR: could not get topic: " << cpp_strerror(-ret) << std::endl;
+ return -ret;
+ }
+
+ if (topic.dest.persistent_queue.empty()) {
+ cerr << "This topic does not have a persistent queue." << std::endl;
+ return ENOENT;
+ }
rgw::notify::rgw_topic_stats stats;
- ret = rgw::notify::get_persistent_queue_stats_by_topic_name(
- dpp(), static_cast<rgw::sal::RadosStore *>(driver)->getRados()->get_notif_pool_ctx(), topic_name,
- stats, null_yield);
+ ret = rgw::notify::get_persistent_queue_stats(
+ dpp(), static_cast<rgw::sal::RadosStore *>(driver)->getRados()->get_notif_pool_ctx(),
+ topic.dest.persistent_queue, stats, null_yield);
if (ret < 0) {
cerr << "ERROR: could not get persistent queue: " << cpp_strerror(-ret) << std::endl;
return -ret;
@@ -11011,6 +11422,78 @@ next:
return EPERM;
#endif
}
+
+ if (opt_cmd == OPT::ACCOUNT_CREATE ||
+ opt_cmd == OPT::ACCOUNT_MODIFY ||
+ opt_cmd == OPT::ACCOUNT_GET ||
+ opt_cmd == OPT::ACCOUNT_STATS ||
+ opt_cmd == OPT::ACCOUNT_RM)
+ {
+ auto op_state = rgw::account::AdminOpState{
+ .account_id = account_id,
+ .tenant = tenant,
+ .account_name = account_name,
+ .email = user_email,
+ .max_users = max_users,
+ .max_roles = max_roles,
+ .max_groups = max_groups,
+ .max_access_keys = max_access_keys,
+ .max_buckets = max_buckets,
+ };
+
+ std::string err_msg;
+ if (opt_cmd == OPT::ACCOUNT_CREATE) {
+ ret = rgw::account::create(dpp(), driver, op_state, err_msg,
+ stream_flusher, null_yield);
+ if (ret < 0) {
+ cerr << "ERROR: failed to create account with " << cpp_strerror(-ret)
+ << ": " << err_msg << std::endl;
+ return -ret;
+ }
+ }
+
+ if (opt_cmd == OPT::ACCOUNT_MODIFY) {
+ ret = rgw::account::modify(dpp(), driver, op_state, err_msg,
+ stream_flusher, null_yield);
+ if (ret < 0) {
+ cerr << "ERROR: failed to modify account with " << cpp_strerror(-ret)
+ << ": " << err_msg << std::endl;
+ return -ret;
+ }
+ }
+
+ if (opt_cmd == OPT::ACCOUNT_GET) {
+ ret = rgw::account::info(dpp(), driver, op_state, err_msg,
+ stream_flusher, null_yield);
+ if (ret < 0) {
+ cerr << "ERROR: failed to read account with " << cpp_strerror(-ret)
+ << ": " << err_msg << std::endl;
+ return -ret;
+ }
+ }
+
+ if (opt_cmd == OPT::ACCOUNT_STATS) {
+ ret = rgw::account::stats(dpp(), driver, op_state,
+ sync_stats, reset_stats, err_msg,
+ stream_flusher, null_yield);
+ if (ret < 0) {
+ cerr << "ERROR: failed to read account stats with " << cpp_strerror(-ret)
+ << ": " << err_msg << std::endl;
+ return -ret;
+ }
+ }
+
+ if (opt_cmd == OPT::ACCOUNT_RM) {
+ ret = rgw::account::remove(dpp(), driver, op_state, err_msg,
+ stream_flusher, null_yield);
+ if (ret < 0) {
+ cerr << "ERROR: failed to remove account with " << cpp_strerror(-ret)
+ << ": " << err_msg << std::endl;
+ return -ret;
+ }
+ }
+ }
+
return 0;
}
diff --git a/src/rgw/rgw_appmain.cc b/src/rgw/rgw_appmain.cc
index bba32e3dfa8..6b832fb4328 100644
--- a/src/rgw/rgw_appmain.cc
+++ b/src/rgw/rgw_appmain.cc
@@ -37,6 +37,7 @@
#include "rgw_rest_admin.h"
#include "rgw_rest_info.h"
#include "rgw_rest_usage.h"
+#include "rgw_rest_account.h"
#include "rgw_rest_bucket.h"
#include "rgw_rest_metadata.h"
#include "rgw_rest_log.h"
@@ -360,6 +361,7 @@ void rgw::AppMain::cond_init_apis()
RGWRESTMgr_Admin *admin_resource = new RGWRESTMgr_Admin;
admin_resource->register_resource("info", new RGWRESTMgr_Info);
admin_resource->register_resource("usage", new RGWRESTMgr_Usage);
+ admin_resource->register_resource("account", new RGWRESTMgr_Account);
/* Register driver-specific admin APIs */
env.driver->register_admin_apis(admin_resource);
rest.register_resource(g_conf()->rgw_admin_entry, admin_resource);
diff --git a/src/rgw/rgw_auth.cc b/src/rgw/rgw_auth.cc
index 5d98933063c..57eded9eb6a 100644
--- a/src/rgw/rgw_auth.cc
+++ b/src/rgw/rgw_auth.cc
@@ -3,16 +3,20 @@
#include <array>
#include <string>
+#include <variant>
+#include "common/errno.h"
#include "rgw_common.h"
#include "rgw_auth.h"
#include "rgw_quota.h"
#include "rgw_user.h"
#include "rgw_http_client.h"
+#include "rgw_iam_managed_policy.h"
#include "rgw_keystone.h"
#include "rgw_sal.h"
#include "rgw_log.h"
+#include "include/function2.hpp"
#include "include/str_list.h"
#define dout_context g_ceph_context
@@ -23,68 +27,242 @@ using namespace std;
namespace rgw {
namespace auth {
-std::unique_ptr<rgw::auth::Identity>
-transform_old_authinfo(CephContext* const cct,
- const rgw_user& auth_id,
- const int perm_mask,
- const bool is_admin,
- const uint32_t type)
+// match a principal by path/name[:subuser]
+static bool match_principal(std::string_view path,
+ std::string_view name,
+ std::string_view subuser,
+ std::string_view expected)
+{
+ // leading / was already matched by ":user/" in parse_principal()
+ if (!path.empty()) {
+ path.remove_prefix(1);
+ }
+
+ // match user path
+ if (!expected.starts_with(path)) {
+ return false;
+ }
+ expected.remove_prefix(path.size());
+
+ // match user by id
+ if (!expected.starts_with(name)) {
+ return false;
+ }
+ expected.remove_prefix(name.size());
+ if (expected.empty()) { // exact match
+ return true;
+ }
+
+ // try to match name:subuser
+ if (!expected.starts_with(":")) {
+ return false;
+ }
+ expected.remove_prefix(1);
+ if (expected.empty()) {
+ return false;
+ }
+ return (expected == "*" || expected == subuser);
+}
+
+static bool match_owner(const rgw_owner& owner, const rgw_user& uid,
+ const std::optional<RGWAccountInfo>& account)
+{
+ return std::visit(fu2::overload(
+ [&uid] (const rgw_user& u) { return u == uid; },
+ [&account] (const rgw_account_id& a) {
+ return account && a == account->id;
+ }), owner);
+}
+
+static bool match_account_or_tenant(const std::optional<RGWAccountInfo>& account,
+ std::string_view tenant,
+ std::string_view expected)
+{
+ return (account && account->id == expected)
+ || (tenant == expected);
+}
+
+static void load_inline_policy(CephContext* cct, const bufferlist& bl,
+ const string* tenant,
+ std::vector<rgw::IAM::Policy>& policies)
+{
+ map<string, string> policy_map;
+ using ceph::decode;
+ decode(policy_map, bl);
+ for (const auto& [name, policy] : policy_map) {
+ policies.emplace_back(cct, tenant, policy, false);
+ }
+}
+
+static void load_managed_policy(CephContext* cct, const bufferlist& bl,
+ std::vector<rgw::IAM::Policy>& policies)
+{
+ rgw::IAM::ManagedPolicies policy_set;
+ using ceph::decode;
+ decode(policy_set, bl);
+ for (const auto& arn : policy_set.arns) {
+ if (auto p = rgw::IAM::get_managed_policy(cct, arn); p) {
+ policies.push_back(std::move(*p));
+ }
+ }
+}
+
+static int load_group_policies(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ rgw::sal::Driver* driver,
+ const std::string* tenant,
+ std::string_view group_id,
+ std::vector<rgw::IAM::Policy>& policies)
+{
+ RGWGroupInfo info;
+ rgw::sal::Attrs attrs;
+ RGWObjVersionTracker objv;
+ int r = driver->load_group_by_id(dpp, y, group_id, info, attrs, objv);
+ if (r < 0) {
+ return r;
+ }
+
+ CephContext* cct = dpp->get_cct();
+ if (auto i = attrs.find(RGW_ATTR_IAM_POLICY); i != attrs.end()) {
+ load_inline_policy(cct, i->second, tenant, policies);
+ }
+ if (auto i = attrs.find(RGW_ATTR_MANAGED_POLICY); i != attrs.end()) {
+ load_managed_policy(cct, i->second, policies);
+ }
+ return 0;
+}
+
+int load_account_and_policies(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ sal::Driver* driver,
+ const RGWUserInfo& info,
+ const sal::Attrs& attrs,
+ std::optional<RGWAccountInfo>& account,
+ std::vector<IAM::Policy>& policies)
+{
+ if (!info.account_id.empty()) {
+ account.emplace();
+ rgw::sal::Attrs attrs; // ignored
+ RGWObjVersionTracker objv; // ignored
+ int r = driver->load_account_by_id(dpp, y, info.account_id,
+ *account, attrs, objv);
+ if (r < 0) {
+ ldpp_dout(dpp, 1) << "ERROR: failed to load account "
+ << info.account_id << " for user " << info.user_id
+ << ": " << cpp_strerror(r) << dendl;
+ return r;
+ }
+ }
+
+ // non-account identity policy is restricted to the current tenant
+ const std::string* policy_tenant = info.account_id.empty()
+ ? &info.user_id.tenant : nullptr;
+
+ // load user policies from user attrs
+ CephContext* cct = dpp->get_cct();
+ if (auto bl = attrs.find(RGW_ATTR_USER_POLICY); bl != attrs.end()) {
+ load_inline_policy(cct, bl->second, policy_tenant, policies);
+ }
+ if (auto bl = attrs.find(RGW_ATTR_MANAGED_POLICY); bl != attrs.end()) {
+ load_managed_policy(cct, bl->second, policies);
+ }
+
+ // load each group and its policies
+ for (const auto& id : info.group_ids) {
+ int r = load_group_policies(dpp, y, driver, policy_tenant, id, policies);
+ if (r == -ENOENT) {
+ // in multisite, metadata sync may race to replicate the user before its
+ // group. ignore ENOENT here so we don't reject all the user's requests
+ // in the meantime
+ ldpp_dout(dpp, 1) << "WARNING: skipping nonexistent group id " << id
+ << " for user " << info.user_id << ": " << cpp_strerror(r) << dendl;
+ } else if (r < 0) {
+ ldpp_dout(dpp, 1) << "ERROR: failed to load group id " << id
+ << " for user " << info.user_id << ": " << cpp_strerror(r) << dendl;
+ return r;
+ }
+ }
+
+ return 0;
+}
+
+static auto transform_old_authinfo(const RGWUserInfo& user,
+ std::optional<RGWAccountInfo> account,
+ std::vector<IAM::Policy> policies)
+ -> std::unique_ptr<rgw::auth::Identity>
{
/* This class is not intended for public use. Should be removed altogether
* with this function after moving all our APIs to the new authentication
* infrastructure. */
- class DummyIdentityApplier : public rgw::auth::Identity {
- CephContext* const cct;
-
+ class DummyIdentityApplier : public rgw::auth::IdentityApplier {
/* For this particular case it's OK to use rgw_user structure to convey
* the identity info as this was the policy for doing that before the
* new auth. */
const rgw_user id;
- const int perm_mask;
+ const std::string display_name;
+ const std::string path;
const bool is_admin;
const uint32_t type;
+ const std::optional<RGWAccountInfo> account;
+ const std::vector<IAM::Policy> policies;
public:
- DummyIdentityApplier(CephContext* const cct,
- const rgw_user& auth_id,
- const int perm_mask,
- const bool is_admin,
- const uint32_t type)
- : cct(cct),
- id(auth_id),
- perm_mask(perm_mask),
- is_admin(is_admin),
- type(type) {
+ DummyIdentityApplier(const RGWUserInfo& user,
+ std::optional<RGWAccountInfo> account,
+ std::vector<IAM::Policy> policies)
+ : id(user.user_id),
+ display_name(user.display_name),
+ path(user.path),
+ is_admin(user.admin),
+ type(user.type),
+ account(std::move(account)),
+ policies(std::move(policies))
+ {}
+
+ ACLOwner get_aclowner() const {
+ ACLOwner owner;
+ if (account) {
+ owner.id = account->id;
+ owner.display_name = account->name;
+ } else {
+ owner.id = id;
+ owner.display_name = display_name;
+ }
+ return owner;
}
uint32_t get_perms_from_aclspec(const DoutPrefixProvider* dpp, const aclspec_t& aclspec) const override {
- return rgw_perms_from_aclspec_default_strategy(id, aclspec, dpp);
+ return rgw_perms_from_aclspec_default_strategy(id.to_str(), aclspec, dpp);
}
- bool is_admin_of(const rgw_user& acct_id) const override {
+ bool is_admin_of(const rgw_owner& o) const override {
return is_admin;
}
- bool is_owner_of(const rgw_user& acct_id) const override {
- return id == acct_id;
+ bool is_owner_of(const rgw_owner& o) const override {
+ return match_owner(o, id, account);
}
- bool is_identity(const idset_t& ids) const override {
- for (auto& p : ids) {
- if (p.is_wildcard()) {
- return true;
- } else if (p.is_tenant() && p.get_tenant() == id.tenant) {
- return true;
- } else if (p.is_user() &&
- (p.get_tenant() == id.tenant) &&
- (p.get_id() == id.id)) {
- return true;
- }
+ bool is_identity(const Principal& p) const override {
+ if (p.is_wildcard()) {
+ return true;
+ } else if (p.is_account()) {
+ return match_account_or_tenant(account, id.tenant,
+ p.get_account());
+ } else if (p.is_user()) {
+ std::string_view no_subuser;
+ // account users can match both account- and tenant-based arns
+ if (account && p.get_account() == account->id) {
+ return match_principal(path, display_name, no_subuser, p.get_id());
+ } else {
+ return p.get_account() == id.tenant
+ && match_principal(path, id.id, no_subuser, p.get_id());
+ }
}
return false;
}
uint32_t get_perm_mask() const override {
- return perm_mask;
+ return RGW_PERM_FULL_CONTROL;
}
uint32_t get_identity_type() const override {
@@ -98,32 +276,59 @@ transform_old_authinfo(CephContext* const cct,
string get_subuser() const override {
return {};
}
+ const std::string& get_tenant() const override {
+ return id.tenant;
+ }
+ const std::optional<RGWAccountInfo>& get_account() const override {
+ return account;
+ }
+
+ void write_ops_log_entry(rgw_log_entry& entry) const override {
+ if (account) {
+ entry.account_id = account->id;
+ }
+ }
void to_str(std::ostream& out) const override {
out << "RGWDummyIdentityApplier(auth_id=" << id
- << ", perm_mask=" << perm_mask
<< ", is_admin=" << is_admin << ")";
}
+
+ void load_acct_info(const DoutPrefixProvider* dpp,
+ RGWUserInfo& user_info) const override {
+ // noop, this user info was passed in on construction
+ }
+
+ void modify_request_state(const DoutPrefixProvider* dpp, req_state* s) const {
+ // copy our identity policies into req_state
+ s->iam_identity_policies.insert(s->iam_identity_policies.end(),
+ policies.begin(), policies.end());
+ }
};
- return std::unique_ptr<rgw::auth::Identity>(
- new DummyIdentityApplier(cct,
- auth_id,
- perm_mask,
- is_admin,
- type));
+ return std::make_unique<DummyIdentityApplier>(
+ user, std::move(account), std::move(policies));
}
-std::unique_ptr<rgw::auth::Identity>
-transform_old_authinfo(const req_state* const s)
+auto transform_old_authinfo(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ sal::Driver* driver,
+ sal::User* user)
+ -> tl::expected<std::unique_ptr<Identity>, int>
{
- return transform_old_authinfo(s->cct,
- s->user->get_id(),
- s->perm_mask,
- /* System user has admin permissions by default - it's supposed to pass
- * through any security check. */
- s->system_request,
- s->user->get_type());
+ const RGWUserInfo& info = user->get_info();
+ const sal::Attrs& attrs = user->get_attrs();
+
+ std::optional<RGWAccountInfo> account;
+ std::vector<IAM::Policy> policies;
+
+ int r = load_account_and_policies(dpp, y, driver, info, attrs,
+ account, policies);
+ if (r < 0) {
+ return tl::unexpected(r);
+ }
+
+ return transform_old_authinfo(info, std::move(account), std::move(policies));
}
} /* namespace auth */
@@ -131,13 +336,13 @@ transform_old_authinfo(const req_state* const s)
uint32_t rgw_perms_from_aclspec_default_strategy(
- const rgw_user& uid,
+ const std::string& uid,
const rgw::auth::Identity::aclspec_t& aclspec,
const DoutPrefixProvider *dpp)
{
ldpp_dout(dpp, 5) << "Searching permissions for uid=" << uid << dendl;
- const auto iter = aclspec.find(uid.to_str());
+ const auto iter = aclspec.find(uid);
if (std::end(aclspec) != iter) {
ldpp_dout(dpp, 5) << "Found permission: " << iter->second << dendl;
return iter->second;
@@ -332,6 +537,9 @@ rgw::auth::Strategy::apply(const DoutPrefixProvider *dpp, const rgw::auth::Strat
s->auth.identity = std::move(applier);
s->auth.completer = std::move(completer);
+ /* Populate the owner info. */
+ s->owner = s->auth.identity->get_aclowner();
+
return 0;
} catch (const int err) {
ldpp_dout(dpp, 5) << "applier throwed err=" << err << dendl;
@@ -360,13 +568,40 @@ rgw::auth::Strategy::add_engine(const Control ctrl_flag,
auth_stack.push_back(std::make_pair(std::cref(engine), ctrl_flag));
}
+ACLOwner rgw::auth::WebIdentityApplier::get_aclowner() const
+{
+ ACLOwner owner;
+ if (account) {
+ owner.id = account->id;
+ owner.display_name = account->name;
+ } else {
+ owner.id = rgw_user{role_tenant, sub, "oidc"};
+ owner.display_name = user_name;
+ }
+ return owner;
+}
+
+bool rgw::auth::WebIdentityApplier::is_owner_of(const rgw_owner& o) const
+{
+ return match_owner(o, rgw_user{role_tenant, sub, "oidc"}, account);
+}
+
void rgw::auth::WebIdentityApplier::to_str(std::ostream& out) const
{
out << "rgw::auth::WebIdentityApplier(sub =" << sub
<< ", user_name=" << user_name
+ << ", role_id=" << role_id
<< ", provider_id =" << iss << ")";
}
+void rgw::auth::WebIdentityApplier::write_ops_log_entry(rgw_log_entry& entry) const
+{
+ if (account) {
+ entry.account_id = account->id;
+ }
+ entry.role_id = role_id;
+}
+
string rgw::auth::WebIdentityApplier::get_idp_url() const
{
string idp_url = this->iss;
@@ -402,6 +637,15 @@ void rgw::auth::WebIdentityApplier::load_acct_info(const DoutPrefixProvider* dpp
federated_user.tenant = role_tenant;
federated_user.ns = "oidc";
+ if (account) {
+ // we don't need shadow users for account roles because bucket ownership,
+ // quota, and stats are tracked by the account instead of the user
+ user_info.user_id = std::move(federated_user);
+ user_info.display_name = user_name;
+ user_info.type = TYPE_WEB;
+ return;
+ }
+
std::unique_ptr<rgw::sal::User> user = driver->get_user(federated_user);
//Check in oidc namespace
@@ -421,7 +665,10 @@ void rgw::auth::WebIdentityApplier::load_acct_info(const DoutPrefixProvider* dpp
//Check if user_id.buckets already exists, may have been from the time, when shadow users didnt exist
RGWStorageStats stats;
- int ret = user->read_stats(dpp, null_yield, &stats);
+ ceph::real_time last_synced;
+ ceph::real_time last_updated;
+ int ret = driver->load_stats(dpp, null_yield, federated_user, stats,
+ last_synced, last_updated);
if (ret < 0 && ret != -ENOENT) {
ldpp_dout(dpp, 0) << "ERROR: reading stats for the user returned error " << ret << dendl;
return;
@@ -509,31 +756,35 @@ void rgw::auth::WebIdentityApplier::modify_request_state(const DoutPrefixProvide
}
}
-bool rgw::auth::WebIdentityApplier::is_identity(const idset_t& ids) const
+bool rgw::auth::WebIdentityApplier::is_identity(const Principal& p) const
{
- if (ids.size() > 1) {
- return false;
- }
-
- for (auto id : ids) {
- string idp_url = get_idp_url();
- if (id.is_oidc_provider() && id.get_idp_url() == idp_url) {
- return true;
- }
- }
- return false;
+ return p.is_oidc_provider()
+ && p.get_idp_url() == get_idp_url();
}
const std::string rgw::auth::RemoteApplier::AuthInfo::NO_SUBUSER;
const std::string rgw::auth::RemoteApplier::AuthInfo::NO_ACCESS_KEY;
/* rgw::auth::RemoteAuthApplier */
+ACLOwner rgw::auth::RemoteApplier::get_aclowner() const
+{
+ ACLOwner owner;
+ if (account) {
+ owner.id = account->id;
+ owner.display_name = account->name;
+ } else {
+ owner.id = info.acct_user;
+ owner.display_name = info.acct_name;
+ }
+ return owner;
+}
+
uint32_t rgw::auth::RemoteApplier::get_perms_from_aclspec(const DoutPrefixProvider* dpp, const aclspec_t& aclspec) const
{
uint32_t perm = 0;
/* For backward compatibility with ACLOwner. */
- perm |= rgw_perms_from_aclspec_default_strategy(info.acct_user,
+ perm |= rgw_perms_from_aclspec_default_strategy(info.acct_user.to_str(),
aclspec, dpp);
/* We also need to cover cases where rgw_keystone_implicit_tenants
@@ -541,7 +792,7 @@ uint32_t rgw::auth::RemoteApplier::get_perms_from_aclspec(const DoutPrefixProvid
if (info.acct_user.tenant.empty()) {
const rgw_user tenanted_acct_user(info.acct_user.id, info.acct_user.id);
- perm |= rgw_perms_from_aclspec_default_strategy(tenanted_acct_user,
+ perm |= rgw_perms_from_aclspec_default_strategy(tenanted_acct_user.to_str(),
aclspec, dpp);
}
@@ -555,43 +806,42 @@ uint32_t rgw::auth::RemoteApplier::get_perms_from_aclspec(const DoutPrefixProvid
return perm;
}
-bool rgw::auth::RemoteApplier::is_admin_of(const rgw_user& uid) const
+bool rgw::auth::RemoteApplier::is_admin_of(const rgw_owner& o) const
{
return info.is_admin;
}
-bool rgw::auth::RemoteApplier::is_owner_of(const rgw_user& uid) const
+bool rgw::auth::RemoteApplier::is_owner_of(const rgw_owner& o) const
{
+ auto* uid = std::get_if<rgw_user>(&o);
+ if (!uid) {
+ return false;
+ }
+
if (info.acct_user.tenant.empty()) {
const rgw_user tenanted_acct_user(info.acct_user.id, info.acct_user.id);
- if (tenanted_acct_user == uid) {
+ if (tenanted_acct_user == *uid) {
return true;
}
}
- return info.acct_user == uid;
+ return info.acct_user == *uid;
}
-bool rgw::auth::RemoteApplier::is_identity(const idset_t& ids) const {
- for (auto& id : ids) {
- if (id.is_wildcard()) {
- return true;
-
- // We also need to cover cases where rgw_keystone_implicit_tenants
- // was enabled. */
- } else if (id.is_tenant() &&
- (info.acct_user.tenant.empty() ?
- info.acct_user.id :
- info.acct_user.tenant) == id.get_tenant()) {
- return true;
- } else if (id.is_user() &&
- info.acct_user.id == id.get_id() &&
- (info.acct_user.tenant.empty() ?
- info.acct_user.id :
- info.acct_user.tenant) == id.get_tenant()) {
- return true;
- }
+bool rgw::auth::RemoteApplier::is_identity(const Principal& p) const {
+ // We also need to cover cases where rgw_keystone_implicit_tenants
+ // was enabled.
+ std::string_view tenant = info.acct_user.tenant.empty() ?
+ info.acct_user.id :
+ info.acct_user.tenant;
+ if (p.is_wildcard()) {
+ return true;
+ } else if (p.is_account()) {
+ return p.get_account() == tenant;
+ } else if (p.is_user()) {
+ return p.get_id() == info.acct_user.id
+ && p.get_account() == tenant;
}
return false;
}
@@ -680,6 +930,9 @@ void rgw::auth::RemoteApplier::write_ops_log_entry(rgw_log_entry& entry) const
{
entry.access_key_id = info.access_key_id;
entry.subuser = info.subuser;
+ if (account) {
+ entry.account_id = account->id;
+ }
}
/* TODO(rzarzynski): we need to handle display_name changes. */
@@ -719,7 +972,10 @@ void rgw::auth::RemoteApplier::load_acct_info(const DoutPrefixProvider* dpp, RGW
if (user->load_user(dpp, null_yield) >= 0) {
/* Succeeded. */
- user_info = user->get_info();
+ (void) load_account_and_policies(dpp, null_yield, driver, user->get_info(),
+ user->get_attrs(), account, policies);
+
+ user_info = std::move(user->get_info());
return;
}
}
@@ -730,7 +986,10 @@ void rgw::auth::RemoteApplier::load_acct_info(const DoutPrefixProvider* dpp, RGW
; /* suppress lookup for id used by "other" protocol */
else if (user->load_user(dpp, null_yield) >= 0) {
/* Succeeded. */
- user_info = user->get_info();
+ (void) load_account_and_policies(dpp, null_yield, driver, user->get_info(),
+ user->get_attrs(), account, policies);
+
+ user_info = std::move(user->get_info());
return;
}
@@ -740,50 +999,72 @@ void rgw::auth::RemoteApplier::load_acct_info(const DoutPrefixProvider* dpp, RGW
/* Succeeded if we are here (create_account() hasn't throwed). */
}
+void rgw::auth::RemoteApplier::modify_request_state(const DoutPrefixProvider* dpp, req_state* s) const
+{
+ // copy our identity policies into req_state
+ s->iam_identity_policies.insert(s->iam_identity_policies.end(),
+ policies.begin(), policies.end());
+}
+
/* rgw::auth::LocalApplier */
/* static declaration */
const std::string rgw::auth::LocalApplier::NO_SUBUSER;
const std::string rgw::auth::LocalApplier::NO_ACCESS_KEY;
+ACLOwner rgw::auth::LocalApplier::get_aclowner() const
+{
+ ACLOwner owner;
+ if (account) {
+ owner.id = account->id;
+ owner.display_name = account->name;
+ } else {
+ owner.id = user_info.user_id;
+ owner.display_name = user_info.display_name;
+ }
+ return owner;
+}
+
uint32_t rgw::auth::LocalApplier::get_perms_from_aclspec(const DoutPrefixProvider* dpp, const aclspec_t& aclspec) const
{
- return rgw_perms_from_aclspec_default_strategy(user_info.user_id, aclspec, dpp);
+ // match acl grants to the specific user id
+ uint32_t mask = rgw_perms_from_aclspec_default_strategy(
+ user_info.user_id.to_str(), aclspec, dpp);
+
+ if (account) {
+ // account users also match acl grants to the account id. in aws, grantees
+ // ONLY refer to accounts. but we continue to match user grants to preserve
+ // access when moving legacy users into new accounts
+ mask |= rgw_perms_from_aclspec_default_strategy(account->id, aclspec, dpp);
+ }
+
+ return mask;
}
-bool rgw::auth::LocalApplier::is_admin_of(const rgw_user& uid) const
+bool rgw::auth::LocalApplier::is_admin_of(const rgw_owner& o) const
{
return user_info.admin || user_info.system;
}
-bool rgw::auth::LocalApplier::is_owner_of(const rgw_user& uid) const
+bool rgw::auth::LocalApplier::is_owner_of(const rgw_owner& o) const
{
- return uid == user_info.user_id;
+ return match_owner(o, user_info.user_id, account);
}
-bool rgw::auth::LocalApplier::is_identity(const idset_t& ids) const {
- for (auto& id : ids) {
- if (id.is_wildcard()) {
- return true;
- } else if (id.is_tenant() &&
- id.get_tenant() == user_info.user_id.tenant) {
- return true;
- } else if (id.is_user() &&
- (id.get_tenant() == user_info.user_id.tenant)) {
- if (id.get_id() == user_info.user_id.id) {
- return true;
- }
- std::string wildcard_subuser = user_info.user_id.id;
- wildcard_subuser.append(":*");
- if (wildcard_subuser == id.get_id()) {
- return true;
- } else if (subuser != NO_SUBUSER) {
- std::string user = user_info.user_id.id;
- user.append(":");
- user.append(subuser);
- if (user == id.get_id()) {
- return true;
- }
- }
+bool rgw::auth::LocalApplier::is_identity(const Principal& p) const {
+ if (p.is_wildcard()) {
+ return true;
+ } else if (p.is_account()) {
+ return match_account_or_tenant(account, user_info.user_id.tenant,
+ p.get_account());
+ } else if (p.is_user()) {
+ // account users can match both account- and tenant-based arns
+ if (account && p.get_account() == account->id) {
+ return match_principal(user_info.path, user_info.display_name,
+ subuser, p.get_id());
+ } else {
+ return p.get_account() == user_info.user_id.tenant
+ && match_principal(user_info.path, user_info.user_id.id,
+ subuser, p.get_id());
}
}
return false;
@@ -822,50 +1103,80 @@ void rgw::auth::LocalApplier::load_acct_info(const DoutPrefixProvider* dpp, RGWU
user_info = this->user_info;
}
+void rgw::auth::LocalApplier::modify_request_state(const DoutPrefixProvider* dpp, req_state* s) const
+{
+ // copy our identity policies into req_state
+ s->iam_identity_policies.insert(s->iam_identity_policies.end(),
+ policies.begin(), policies.end());
+}
+
void rgw::auth::LocalApplier::write_ops_log_entry(rgw_log_entry& entry) const
{
entry.access_key_id = access_key_id;
entry.subuser = subuser;
+ if (account) {
+ entry.account_id = account->id;
+ }
+}
+
+ACLOwner rgw::auth::RoleApplier::get_aclowner() const
+{
+ ACLOwner owner;
+ if (role.account) {
+ owner.id = role.account->id;
+ owner.display_name = role.account->name;
+ } else {
+ owner.id = token_attrs.user_id;
+ owner.display_name = role.name;
+ }
+ return owner;
+}
+
+bool rgw::auth::RoleApplier::is_owner_of(const rgw_owner& o) const
+{
+ return match_owner(o, token_attrs.user_id, role.account);
}
void rgw::auth::RoleApplier::to_str(std::ostream& out) const {
out << "rgw::auth::RoleApplier(role name =" << role.name;
- for (auto& policy: role.role_policies) {
+ for (auto& policy: role.inline_policies) {
out << ", role policy =" << policy;
}
+ for (std::string_view arn : role.managed_policies) {
+ if (auto p = arn.find('/'); p != arn.npos) {
+ out << ", managed policy =" << arn.substr(p + 1);
+ } else {
+ out << ", managed policy =" << arn;
+ }
+ }
out << ", token policy =" << token_attrs.token_policy;
out << ")";
}
-bool rgw::auth::RoleApplier::is_identity(const idset_t& ids) const {
- for (auto& p : ids) {
- if (p.is_wildcard()) {
- return true;
- } else if (p.is_role()) {
- string name = p.get_id();
- string tenant = p.get_tenant();
- if (name == role.name && tenant == role.tenant) {
- return true;
- }
- } else if (p.is_assumed_role()) {
- string tenant = p.get_tenant();
- string role_session = role.name + "/" + token_attrs.role_session_name; //role/role-session
- if (role.tenant == tenant && role_session == p.get_role_session()) {
- return true;
- }
+bool rgw::auth::RoleApplier::is_identity(const Principal& p) const {
+ if (p.is_wildcard()) {
+ return true;
+ } else if (p.is_account()) {
+ return match_account_or_tenant(role.account, role.tenant,
+ p.get_account());
+ } else if (p.is_role()) {
+ std::string_view no_subuser;
+ // account roles can match both account- and tenant-based arns
+ return match_account_or_tenant(role.account, role.tenant, p.get_account())
+ && match_principal(role.path, role.name, no_subuser, p.get_id());
+ } else if (p.is_assumed_role()) {
+ string role_session = role.name + "/" + token_attrs.role_session_name; //role/role-session
+ return p.get_account() == role.tenant
+ && p.get_role_session() == role_session;
+ } else {
+ string oidc_id;
+ if (token_attrs.user_id.ns.empty()) {
+ oidc_id = token_attrs.user_id.id;
} else {
- string id = p.get_id();
- string tenant = p.get_tenant();
- string oidc_id;
- if (token_attrs.user_id.ns.empty()) {
- oidc_id = token_attrs.user_id.id;
- } else {
- oidc_id = token_attrs.user_id.ns + "$" + token_attrs.user_id.id;
- }
- if (oidc_id == id && token_attrs.user_id.tenant == tenant) {
- return true;
- }
+ oidc_id = token_attrs.user_id.ns + "$" + token_attrs.user_id.id;
}
+ return p.get_id() == oidc_id
+ && p.get_account() == token_attrs.user_id.tenant;
}
return false;
}
@@ -876,13 +1187,34 @@ void rgw::auth::RoleApplier::load_acct_info(const DoutPrefixProvider* dpp, RGWUs
user_info.user_id = this->token_attrs.user_id;
}
+void rgw::auth::RoleApplier::write_ops_log_entry(rgw_log_entry& entry) const
+{
+ if (role.account) {
+ entry.account_id = role.account->id;
+ }
+ entry.role_id = role.id;
+}
+
void rgw::auth::RoleApplier::modify_request_state(const DoutPrefixProvider *dpp, req_state* s) const
{
- for (auto it: role.role_policies) {
+ // non-account identity policy is restricted to the current tenant
+ const std::string* policy_tenant = role.account ? nullptr : &role.tenant;
+
+ for (const auto& policy : role.inline_policies) {
+ try {
+ const rgw::IAM::Policy p(s->cct, policy_tenant, policy, false);
+ s->iam_identity_policies.push_back(std::move(p));
+ } catch (rgw::IAM::PolicyParseException& e) {
+ //Control shouldn't reach here as the policy has already been
+ //verified earlier
+ ldpp_dout(dpp, 20) << "failed to parse role policy: " << e.what() << dendl;
+ }
+ }
+ for (const auto& arn : role.managed_policies) {
try {
- bufferlist bl = bufferlist::static_from_string(it);
- const rgw::IAM::Policy p(s->cct, role.tenant, bl, false);
- s->iam_user_policies.push_back(std::move(p));
+ if (auto p = rgw::IAM::get_managed_policy(s->cct, arn); p) {
+ s->iam_identity_policies.push_back(std::move(*p));
+ }
} catch (rgw::IAM::PolicyParseException& e) {
//Control shouldn't reach here as the policy has already been
//verified earlier
@@ -893,8 +1225,7 @@ void rgw::auth::RoleApplier::modify_request_state(const DoutPrefixProvider *dpp,
if (!this->token_attrs.token_policy.empty()) {
try {
string policy = this->token_attrs.token_policy;
- bufferlist bl = bufferlist::static_from_string(policy);
- const rgw::IAM::Policy p(s->cct, role.tenant, bl, false);
+ const rgw::IAM::Policy p(s->cct, policy_tenant, policy, false);
s->session_policies.push_back(std::move(p));
} catch (rgw::IAM::PolicyParseException& e) {
//Control shouldn't reach here as the policy has already been
@@ -936,7 +1267,7 @@ rgw::auth::AnonymousEngine::authenticate(const DoutPrefixProvider* dpp, const re
rgw_get_anon_user(user_info);
auto apl = \
- apl_factory->create_apl_local(cct, s, user_info,
+ apl_factory->create_apl_local(cct, s, user_info, std::nullopt, {},
rgw::auth::LocalApplier::NO_SUBUSER,
std::nullopt, rgw::auth::LocalApplier::NO_ACCESS_KEY);
return result_t::grant(std::move(apl));
diff --git a/src/rgw/rgw_auth.h b/src/rgw/rgw_auth.h
index e2de0afb726..2029bf6ce1e 100644
--- a/src/rgw/rgw_auth.h
+++ b/src/rgw/rgw_auth.h
@@ -10,6 +10,7 @@
#include <system_error>
#include <utility>
+#include "include/expected.hpp"
#include "include/function2.hpp"
#include "rgw_common.h"
@@ -32,10 +33,12 @@ using Exception = std::system_error;
class Identity {
public:
typedef std::map<std::string, int> aclspec_t;
- using idset_t = boost::container::flat_set<Principal>;
virtual ~Identity() = default;
+ /* Return the ACLOwner for resources created by this identity. */
+ virtual ACLOwner get_aclowner() const = 0;
+
/* Translate the ACL provided in @aclspec into concrete permission set that
* can be used during the authorization phase (RGWOp::verify_permission).
* On error throws rgw::auth::Exception storing the reason.
@@ -45,15 +48,13 @@ public:
* applier that is being used. */
virtual uint32_t get_perms_from_aclspec(const DoutPrefixProvider* dpp, const aclspec_t& aclspec) const = 0;
- /* Verify whether a given identity *can be treated as* an admin of rgw_user
- * (account in Swift's terminology) specified in @uid. On error throws
- * rgw::auth::Exception storing the reason. */
- virtual bool is_admin_of(const rgw_user& uid) const = 0;
+ /* Verify whether a given identity *can be treated as* an admin of rgw_owner
+ * specified in @o. On error throws rgw::auth::Exception storing the reason. */
+ virtual bool is_admin_of(const rgw_owner& o) const = 0;
- /* Verify whether a given identity *is* the owner of the rgw_user (account
- * in the Swift's terminology) specified in @uid. On internal error throws
- * rgw::auth::Exception storing the reason. */
- virtual bool is_owner_of(const rgw_user& uid) const = 0;
+ /* Verify whether a given identity is the rgw_owner specified in @o.
+ * On internal error throws rgw::auth::Exception storing the reason. */
+ virtual bool is_owner_of(const rgw_owner& o) const = 0;
/* Return the permission mask that is used to narrow down the set of
* operations allowed for a given identity. This method reflects the idea
@@ -72,7 +73,7 @@ public:
/* Verify whether a given identity corresponds to an identity in the
provided set */
- virtual bool is_identity(const idset_t& ids) const = 0;
+ virtual bool is_identity(const Principal& p) const = 0;
/* Identity Type: RGW/ LDAP/ Keystone */
virtual uint32_t get_identity_type() const = 0;
@@ -83,7 +84,11 @@ public:
/* Subuser of Account */
virtual std::string get_subuser() const = 0;
- virtual std::string get_role_tenant() const { return ""; }
+ /* Identity's tenant namespace */
+ virtual const std::string& get_tenant() const = 0;
+
+ /* Return the identity's account info if present */
+ virtual const std::optional<RGWAccountInfo>& get_account() const = 0;
/* write any auth-specific fields that are safe to expose in the ops log */
virtual void write_ops_log_entry(rgw_log_entry& entry) const {};
@@ -96,13 +101,22 @@ inline std::ostream& operator<<(std::ostream& out,
}
-std::unique_ptr<rgw::auth::Identity>
-transform_old_authinfo(CephContext* const cct,
- const rgw_user& auth_id,
- const int perm_mask,
- const bool is_admin,
- const uint32_t type);
-std::unique_ptr<Identity> transform_old_authinfo(const req_state* const s);
+// Return an identity for the given user after loading its account and policies.
+auto transform_old_authinfo(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ sal::Driver* driver,
+ sal::User* user)
+ -> tl::expected<std::unique_ptr<Identity>, int>;
+
+// Load the user account and all user/group policies. May throw
+// PolicyParseException on malformed policy.
+int load_account_and_policies(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ sal::Driver* driver,
+ const RGWUserInfo& info,
+ const sal::Attrs& attrs,
+ std::optional<RGWAccountInfo>& account,
+ std::vector<IAM::Policy>& policies);
/* Interface for classes applying changes to request state/RADOS store
@@ -378,11 +392,13 @@ class WebIdentityApplier : public IdentityApplier {
protected:
CephContext* const cct;
rgw::sal::Driver* driver;
+ std::string role_id;
std::string role_session;
std::string role_tenant;
std::unordered_multimap<std::string, std::string> token_claims;
boost::optional<std::multimap<std::string,std::string>> role_tags;
boost::optional<std::set<std::pair<std::string, std::string>>> principal_tags;
+ std::optional<RGWAccountInfo> account;
std::string get_idp_url() const;
@@ -393,18 +409,23 @@ protected:
public:
WebIdentityApplier( CephContext* const cct,
rgw::sal::Driver* driver,
+ const std::string& role_id,
const std::string& role_session,
const std::string& role_tenant,
const std::unordered_multimap<std::string, std::string>& token_claims,
boost::optional<std::multimap<std::string,std::string>> role_tags,
- boost::optional<std::set<std::pair<std::string, std::string>>> principal_tags)
+ boost::optional<std::set<std::pair<std::string, std::string>>> principal_tags,
+ std::optional<RGWAccountInfo> account)
: cct(cct),
driver(driver),
+ role_id(role_id),
role_session(role_session),
role_tenant(role_tenant),
token_claims(token_claims),
role_tags(role_tags),
- principal_tags(principal_tags) {
+ principal_tags(principal_tags),
+ account(std::move(account))
+ {
const auto& sub = token_claims.find("sub");
if(sub != token_claims.end()) {
this->sub = sub->second;
@@ -443,20 +464,17 @@ public:
void modify_request_state(const DoutPrefixProvider *dpp, req_state* s) const override;
+ ACLOwner get_aclowner() const override;
+
uint32_t get_perms_from_aclspec(const DoutPrefixProvider* dpp, const aclspec_t& aclspec) const override {
return RGW_PERM_NONE;
}
- bool is_admin_of(const rgw_user& uid) const override {
+ bool is_admin_of(const rgw_owner& o) const override {
return false;
}
- bool is_owner_of(const rgw_user& uid) const override {
- if (uid.id == this->sub && uid.tenant == role_tenant && uid.ns == "oidc") {
- return true;
- }
- return false;
- }
+ bool is_owner_of(const rgw_owner& o) const override;
uint32_t get_perm_mask() const override {
return RGW_PERM_NONE;
@@ -464,7 +482,7 @@ public:
void to_str(std::ostream& out) const override;
- bool is_identity(const idset_t& ids) const override;
+ bool is_identity(const Principal& p) const override;
void load_acct_info(const DoutPrefixProvider* dpp, RGWUserInfo& user_info) const override;
@@ -479,17 +497,26 @@ public:
std::string get_subuser() const override {
return {};
}
+ const std::string& get_tenant() const override {
+ return role_tenant;
+ }
+ const std::optional<RGWAccountInfo>& get_account() const override {
+ return account;
+ }
+ void write_ops_log_entry(rgw_log_entry& entry) const override;
struct Factory {
virtual ~Factory() {}
virtual aplptr_t create_apl_web_identity( CephContext* cct,
const req_state* s,
+ const std::string& role_id,
const std::string& role_session,
const std::string& role_tenant,
const std::unordered_multimap<std::string, std::string>& token,
boost::optional<std::multimap<std::string, std::string>>,
- boost::optional<std::set<std::pair<std::string, std::string>>> principal_tags) const = 0;
+ boost::optional<std::set<std::pair<std::string, std::string>>> principal_tags,
+ std::optional<RGWAccountInfo> account) const = 0;
};
};
@@ -594,6 +621,10 @@ protected:
const rgw::auth::ImplicitTenants& implicit_tenant_context;
const rgw::auth::ImplicitTenants::implicit_tenant_flag_bits implicit_tenant_bit;
+ // account and policies are loaded by load_acct_info()
+ mutable std::optional<RGWAccountInfo> account;
+ mutable std::vector<IAM::Policy> policies;
+
virtual void create_account(const DoutPrefixProvider* dpp,
const rgw_user& acct_user,
bool implicit_tenant,
@@ -614,18 +645,26 @@ public:
implicit_tenant_bit(implicit_tenant_bit) {
}
+ ACLOwner get_aclowner() const override;
uint32_t get_perms_from_aclspec(const DoutPrefixProvider* dpp, const aclspec_t& aclspec) const override;
- bool is_admin_of(const rgw_user& uid) const override;
- bool is_owner_of(const rgw_user& uid) const override;
- bool is_identity(const idset_t& ids) const override;
+ bool is_admin_of(const rgw_owner& o) const override;
+ bool is_owner_of(const rgw_owner& o) const override;
+ bool is_identity(const Principal& p) const override;
uint32_t get_perm_mask() const override { return info.perm_mask; }
void to_str(std::ostream& out) const override;
void load_acct_info(const DoutPrefixProvider* dpp, RGWUserInfo& user_info) const override; /* out */
+ void modify_request_state(const DoutPrefixProvider* dpp, req_state* s) const override;
void write_ops_log_entry(rgw_log_entry& entry) const override;
uint32_t get_identity_type() const override { return info.acct_type; }
std::string get_acct_name() const override { return info.acct_name; }
std::string get_subuser() const override { return {}; }
+ const std::string& get_tenant() const override {
+ return info.acct_user.tenant;
+ }
+ const std::optional<RGWAccountInfo>& get_account() const override {
+ return account;
+ }
struct Factory {
virtual ~Factory() {}
@@ -649,6 +688,8 @@ class LocalApplier : public IdentityApplier {
protected:
const RGWUserInfo user_info;
+ const std::optional<RGWAccountInfo> account;
+ const std::vector<IAM::Policy> policies;
const std::string subuser;
uint32_t perm_mask;
const std::string access_key_id;
@@ -662,20 +703,24 @@ public:
LocalApplier(CephContext* const cct,
const RGWUserInfo& user_info,
+ std::optional<RGWAccountInfo> account,
+ std::vector<IAM::Policy> policies,
std::string subuser,
const std::optional<uint32_t>& perm_mask,
const std::string access_key_id)
: user_info(user_info),
+ account(std::move(account)),
+ policies(std::move(policies)),
subuser(std::move(subuser)),
perm_mask(perm_mask.value_or(RGW_PERM_INVALID)),
access_key_id(access_key_id) {
}
-
+ ACLOwner get_aclowner() const override;
uint32_t get_perms_from_aclspec(const DoutPrefixProvider* dpp, const aclspec_t& aclspec) const override;
- bool is_admin_of(const rgw_user& uid) const override;
- bool is_owner_of(const rgw_user& uid) const override;
- bool is_identity(const idset_t& ids) const override;
+ bool is_admin_of(const rgw_owner& o) const override;
+ bool is_owner_of(const rgw_owner& o) const override;
+ bool is_identity(const Principal& p) const override;
uint32_t get_perm_mask() const override {
if (this->perm_mask == RGW_PERM_INVALID) {
return get_perm_mask(subuser, user_info);
@@ -685,9 +730,17 @@ public:
}
void to_str(std::ostream& out) const override;
void load_acct_info(const DoutPrefixProvider* dpp, RGWUserInfo& user_info) const override; /* out */
- uint32_t get_identity_type() const override { return TYPE_RGW; }
+ void modify_request_state(const DoutPrefixProvider* dpp, req_state* s) const override;
+ uint32_t get_identity_type() const override { return user_info.type; }
std::string get_acct_name() const override { return {}; }
std::string get_subuser() const override { return subuser; }
+ const std::string& get_tenant() const override {
+ return user_info.user_id.tenant;
+ }
+ const std::optional<RGWAccountInfo>& get_account() const override {
+ return account;
+ }
+
void write_ops_log_entry(rgw_log_entry& entry) const override;
struct Factory {
@@ -695,6 +748,8 @@ public:
virtual aplptr_t create_apl_local(CephContext* cct,
const req_state* s,
const RGWUserInfo& user_info,
+ std::optional<RGWAccountInfo> account,
+ std::vector<IAM::Policy> policies,
const std::string& subuser,
const std::optional<uint32_t>& perm_mask,
const std::string& access_key_id) const = 0;
@@ -706,8 +761,11 @@ public:
struct Role {
std::string id;
std::string name;
+ std::string path;
std::string tenant;
- std::vector<std::string> role_policies;
+ std::optional<RGWAccountInfo> account;
+ std::vector<std::string> inline_policies;
+ std::vector<std::string> managed_policies;
};
struct TokenAttrs {
rgw_user user_id;
@@ -729,16 +787,15 @@ public:
: role(role),
token_attrs(token_attrs) {}
+ ACLOwner get_aclowner() const override;
uint32_t get_perms_from_aclspec(const DoutPrefixProvider* dpp, const aclspec_t& aclspec) const override {
return 0;
}
- bool is_admin_of(const rgw_user& uid) const override {
+ bool is_admin_of(const rgw_owner& o) const override {
return false;
}
- bool is_owner_of(const rgw_user& uid) const override {
- return (this->token_attrs.user_id.id == uid.id && this->token_attrs.user_id.tenant == uid.tenant && this->token_attrs.user_id.ns == uid.ns);
- }
- bool is_identity(const idset_t& ids) const override;
+ bool is_owner_of(const rgw_owner& o) const override;
+ bool is_identity(const Principal& p) const override;
uint32_t get_perm_mask() const override {
return RGW_PERM_NONE;
}
@@ -747,16 +804,21 @@ public:
uint32_t get_identity_type() const override { return TYPE_ROLE; }
std::string get_acct_name() const override { return {}; }
std::string get_subuser() const override { return {}; }
+ const std::string& get_tenant() const override { return role.tenant; }
+ const std::optional<RGWAccountInfo>& get_account() const override {
+ return role.account;
+ }
+ void write_ops_log_entry(rgw_log_entry& entry) const override;
+
void modify_request_state(const DoutPrefixProvider* dpp, req_state* s) const override;
- std::string get_role_tenant() const override { return role.tenant; }
struct Factory {
virtual ~Factory() {}
- virtual aplptr_t create_apl_role( CephContext* cct,
- const req_state* s,
- const rgw::auth::RoleApplier::Role& role,
- const rgw::auth::RoleApplier::TokenAttrs& token_attrs) const = 0;
- };
+ virtual aplptr_t create_apl_role(CephContext* cct,
+ const req_state* s,
+ Role role,
+ TokenAttrs token_attrs) const = 0;
+ };
};
/* The anonymous abstract engine. */
@@ -788,6 +850,6 @@ protected:
uint32_t rgw_perms_from_aclspec_default_strategy(
- const rgw_user& uid,
+ const std::string& uid,
const rgw::auth::Identity::aclspec_t& aclspec,
const DoutPrefixProvider *dpp);
diff --git a/src/rgw/rgw_auth_filters.h b/src/rgw/rgw_auth_filters.h
index 9e3818bef07..a93641e8b8e 100644
--- a/src/rgw/rgw_auth_filters.h
+++ b/src/rgw/rgw_auth_filters.h
@@ -3,10 +3,10 @@
#pragma once
+#include <optional>
#include <type_traits>
#include <boost/logic/tribool.hpp>
-#include <boost/optional.hpp>
#include "rgw_service.h"
#include "rgw_common.h"
@@ -65,16 +65,20 @@ public:
: decoratee(std::forward<DecorateeT>(decoratee)) {
}
+ ACLOwner get_aclowner() const override {
+ return get_decoratee().get_aclowner();
+ }
+
uint32_t get_perms_from_aclspec(const DoutPrefixProvider* dpp, const aclspec_t& aclspec) const override {
return get_decoratee().get_perms_from_aclspec(dpp, aclspec);
}
- bool is_admin_of(const rgw_user& uid) const override {
- return get_decoratee().is_admin_of(uid);
+ bool is_admin_of(const rgw_owner& o) const override {
+ return get_decoratee().is_admin_of(o);
}
- bool is_owner_of(const rgw_user& uid) const override {
- return get_decoratee().is_owner_of(uid);
+ bool is_owner_of(const rgw_owner& o) const override {
+ return get_decoratee().is_owner_of(o);
}
bool is_anonymous() const override {
@@ -97,17 +101,20 @@ public:
return get_decoratee().get_subuser();
}
- bool is_identity(
- const boost::container::flat_set<Principal>& ids) const override {
- return get_decoratee().is_identity(ids);
+ bool is_identity(const Principal& p) const override {
+ return get_decoratee().is_identity(p);
}
void to_str(std::ostream& out) const override {
get_decoratee().to_str(out);
}
- std::string get_role_tenant() const override { /* in/out */
- return get_decoratee().get_role_tenant();
+ const std::string& get_tenant() const override {
+ return get_decoratee().get_tenant();
+ }
+
+ const std::optional<RGWAccountInfo>& get_account() const override {
+ return get_decoratee().get_account();
}
void load_acct_info(const DoutPrefixProvider* dpp, RGWUserInfo& user_info) const override { /* out */
@@ -225,6 +232,7 @@ class SysReqApplier : public DecoratedApplier<T> {
rgw::sal::Driver* driver;
const RGWHTTPArgs& args;
mutable boost::tribool is_system;
+ mutable std::optional<ACLOwner> effective_owner;
public:
template <typename U>
@@ -242,12 +250,23 @@ public:
void to_str(std::ostream& out) const override;
void load_acct_info(const DoutPrefixProvider* dpp, RGWUserInfo& user_info) const override; /* out */
void modify_request_state(const DoutPrefixProvider* dpp, req_state* s) const override; /* in/out */
+
+ ACLOwner get_aclowner() const override {
+ if (effective_owner) {
+ return *effective_owner;
+ }
+ return DecoratedApplier<T>::get_aclowner();
+ }
};
template <typename T>
void SysReqApplier<T>::to_str(std::ostream& out) const
{
- out << "rgw::auth::SysReqApplier" << " -> ";
+ out << "rgw::auth::SysReqApplier";
+ if (effective_owner) {
+ out << '(' << effective_owner->id << ')';
+ }
+ out << " -> ";
DecoratedApplier<T>::to_str(out);
}
@@ -260,17 +279,19 @@ void SysReqApplier<T>::load_acct_info(const DoutPrefixProvider* dpp, RGWUserInfo
if (is_system) {
//ldpp_dout(dpp, 20) << "system request" << dendl;
- rgw_user effective_uid(args.sys_get(RGW_SYS_PARAM_PREFIX "uid"));
- if (! effective_uid.empty()) {
- /* We aren't writing directly to user_info for consistency and security
- * reasons. rgw_get_user_info_by_uid doesn't trigger the operator=() but
- * calls ::decode instead. */
- std::unique_ptr<rgw::sal::User> user = driver->get_user(effective_uid);
- if (user->load_user(dpp, null_yield) < 0) {
- //ldpp_dout(dpp, 0) << "User lookup failed!" << dendl;
- throw -EACCES;
+ std::string str = args.sys_get(RGW_SYS_PARAM_PREFIX "uid");
+ if (!str.empty()) {
+ effective_owner.emplace();
+ effective_owner->id = parse_owner(str);
+
+ if (const auto* uid = std::get_if<rgw_user>(&effective_owner->id); uid) {
+ std::unique_ptr<rgw::sal::User> user = driver->get_user(*uid);
+ if (user->load_user(dpp, null_yield) < 0) {
+ //ldpp_dout(dpp, 0) << "User lookup failed!" << dendl;
+ throw -EACCES;
+ }
+ effective_owner->display_name = user->get_display_name();
}
- user_info = user->get_info();
}
}
}
diff --git a/src/rgw/rgw_auth_s3.cc b/src/rgw/rgw_auth_s3.cc
index fd7fd07a54e..61c2118b672 100644
--- a/src/rgw/rgw_auth_s3.cc
+++ b/src/rgw/rgw_auth_s3.cc
@@ -468,38 +468,71 @@ static inline int parse_v4_auth_header(const req_info& info, /* in
bool is_non_s3_op(RGWOpType op_type)
{
- if (op_type == RGW_STS_GET_SESSION_TOKEN ||
- op_type == RGW_STS_ASSUME_ROLE ||
- op_type == RGW_STS_ASSUME_ROLE_WEB_IDENTITY ||
- op_type == RGW_OP_CREATE_ROLE ||
- op_type == RGW_OP_DELETE_ROLE ||
- op_type == RGW_OP_GET_ROLE ||
- op_type == RGW_OP_MODIFY_ROLE_TRUST_POLICY ||
- op_type == RGW_OP_LIST_ROLES ||
- op_type == RGW_OP_PUT_ROLE_POLICY ||
- op_type == RGW_OP_GET_ROLE_POLICY ||
- op_type == RGW_OP_LIST_ROLE_POLICIES ||
- op_type == RGW_OP_DELETE_ROLE_POLICY ||
- op_type == RGW_OP_PUT_USER_POLICY ||
- op_type == RGW_OP_GET_USER_POLICY ||
- op_type == RGW_OP_LIST_USER_POLICIES ||
- op_type == RGW_OP_DELETE_USER_POLICY ||
- op_type == RGW_OP_CREATE_OIDC_PROVIDER ||
- op_type == RGW_OP_DELETE_OIDC_PROVIDER ||
- op_type == RGW_OP_GET_OIDC_PROVIDER ||
- op_type == RGW_OP_LIST_OIDC_PROVIDERS ||
- op_type == RGW_OP_PUBSUB_TOPIC_CREATE ||
- op_type == RGW_OP_PUBSUB_TOPICS_LIST ||
- op_type == RGW_OP_PUBSUB_TOPIC_GET ||
- op_type == RGW_OP_PUBSUB_TOPIC_SET ||
- op_type == RGW_OP_PUBSUB_TOPIC_DELETE ||
- op_type == RGW_OP_TAG_ROLE ||
- op_type == RGW_OP_LIST_ROLE_TAGS ||
- op_type == RGW_OP_UNTAG_ROLE ||
- op_type == RGW_OP_UPDATE_ROLE) {
+ switch (op_type) {
+ case RGW_STS_GET_SESSION_TOKEN:
+ case RGW_STS_ASSUME_ROLE:
+ case RGW_STS_ASSUME_ROLE_WEB_IDENTITY:
+ case RGW_OP_CREATE_ROLE:
+ case RGW_OP_DELETE_ROLE:
+ case RGW_OP_GET_ROLE:
+ case RGW_OP_MODIFY_ROLE_TRUST_POLICY:
+ case RGW_OP_LIST_ROLES:
+ case RGW_OP_PUT_ROLE_POLICY:
+ case RGW_OP_GET_ROLE_POLICY:
+ case RGW_OP_LIST_ROLE_POLICIES:
+ case RGW_OP_DELETE_ROLE_POLICY:
+ case RGW_OP_ATTACH_ROLE_POLICY:
+ case RGW_OP_DETACH_ROLE_POLICY:
+ case RGW_OP_LIST_ATTACHED_ROLE_POLICIES:
+ case RGW_OP_PUT_USER_POLICY:
+ case RGW_OP_GET_USER_POLICY:
+ case RGW_OP_LIST_USER_POLICIES:
+ case RGW_OP_DELETE_USER_POLICY:
+ case RGW_OP_ATTACH_USER_POLICY:
+ case RGW_OP_DETACH_USER_POLICY:
+ case RGW_OP_LIST_ATTACHED_USER_POLICIES:
+ case RGW_OP_CREATE_OIDC_PROVIDER:
+ case RGW_OP_DELETE_OIDC_PROVIDER:
+ case RGW_OP_GET_OIDC_PROVIDER:
+ case RGW_OP_LIST_OIDC_PROVIDERS:
+ case RGW_OP_PUBSUB_TOPIC_CREATE:
+ case RGW_OP_PUBSUB_TOPICS_LIST:
+ case RGW_OP_PUBSUB_TOPIC_GET:
+ case RGW_OP_PUBSUB_TOPIC_SET:
+ case RGW_OP_PUBSUB_TOPIC_DELETE:
+ case RGW_OP_TAG_ROLE:
+ case RGW_OP_LIST_ROLE_TAGS:
+ case RGW_OP_UNTAG_ROLE:
+ case RGW_OP_UPDATE_ROLE:
+
+ case RGW_OP_CREATE_USER:
+ case RGW_OP_GET_USER:
+ case RGW_OP_UPDATE_USER:
+ case RGW_OP_DELETE_USER:
+ case RGW_OP_LIST_USERS:
+ case RGW_OP_CREATE_ACCESS_KEY:
+ case RGW_OP_UPDATE_ACCESS_KEY:
+ case RGW_OP_DELETE_ACCESS_KEY:
+ case RGW_OP_LIST_ACCESS_KEYS:
+ case RGW_OP_CREATE_GROUP:
+ case RGW_OP_GET_GROUP:
+ case RGW_OP_UPDATE_GROUP:
+ case RGW_OP_DELETE_GROUP:
+ case RGW_OP_LIST_GROUPS:
+ case RGW_OP_ADD_USER_TO_GROUP:
+ case RGW_OP_REMOVE_USER_FROM_GROUP:
+ case RGW_OP_LIST_GROUPS_FOR_USER:
+ case RGW_OP_PUT_GROUP_POLICY:
+ case RGW_OP_GET_GROUP_POLICY:
+ case RGW_OP_LIST_GROUP_POLICIES:
+ case RGW_OP_DELETE_GROUP_POLICY:
+ case RGW_OP_ATTACH_GROUP_POLICY:
+ case RGW_OP_DETACH_GROUP_POLICY:
+ case RGW_OP_LIST_ATTACHED_GROUP_POLICIES:
return true;
+ default:
+ return false;
}
- return false;
}
int parse_v4_credentials(const req_info& info, /* in */
diff --git a/src/rgw/rgw_auth_s3.h b/src/rgw/rgw_auth_s3.h
index b2b1238f313..e1fe5163f02 100644
--- a/src/rgw/rgw_auth_s3.h
+++ b/src/rgw/rgw_auth_s3.h
@@ -56,20 +56,23 @@ class STSAuthStrategy : public rgw::auth::Strategy,
aplptr_t create_apl_local(CephContext* const cct,
const req_state* const s,
const RGWUserInfo& user_info,
+ std::optional<RGWAccountInfo> account,
+ std::vector<IAM::Policy> policies,
const std::string& subuser,
const std::optional<uint32_t>& perm_mask,
const std::string& access_key_id) const override {
auto apl = rgw::auth::add_sysreq(cct, driver, s,
- rgw::auth::LocalApplier(cct, user_info, subuser, perm_mask, access_key_id));
+ LocalApplier(cct, user_info, std::move(account), std::move(policies),
+ subuser, perm_mask, access_key_id));
return aplptr_t(new decltype(apl)(std::move(apl)));
}
aplptr_t create_apl_role(CephContext* const cct,
const req_state* const s,
- const rgw::auth::RoleApplier::Role& role,
- const rgw::auth::RoleApplier::TokenAttrs& token_attrs) const override {
+ RoleApplier::Role role,
+ RoleApplier::TokenAttrs token_attrs) const override {
auto apl = rgw::auth::add_sysreq(cct, driver, s,
- rgw::auth::RoleApplier(cct, role, token_attrs));
+ rgw::auth::RoleApplier(cct, std::move(role), std::move(token_attrs)));
return aplptr_t(new decltype(apl)(std::move(apl)));
}
@@ -113,8 +116,8 @@ class ExternalAuthStrategy : public rgw::auth::Strategy,
rgw::auth::RemoteApplier::acl_strategy_t&& acl_alg,
const rgw::auth::RemoteApplier::AuthInfo &info) const override {
auto apl = rgw::auth::add_sysreq(cct, driver, s,
- rgw::auth::RemoteApplier(cct, driver, std::move(acl_alg), info,
- implicit_tenant_context,
+ rgw::auth::RemoteApplier(cct, driver, std::move(acl_alg),
+ info, implicit_tenant_context,
rgw::auth::ImplicitTenants::IMPLICIT_TENANTS_S3));
/* TODO(rzarzynski): replace with static_ptr. */
return aplptr_t(new decltype(apl)(std::move(apl)));
@@ -174,11 +177,14 @@ class AWSAuthStrategy : public rgw::auth::Strategy,
aplptr_t create_apl_local(CephContext* const cct,
const req_state* const s,
const RGWUserInfo& user_info,
+ std::optional<RGWAccountInfo> account,
+ std::vector<IAM::Policy> policies,
const std::string& subuser,
const std::optional<uint32_t>& perm_mask,
const std::string& access_key_id) const override {
auto apl = rgw::auth::add_sysreq(cct, driver, s,
- rgw::auth::LocalApplier(cct, user_info, subuser, perm_mask, access_key_id));
+ LocalApplier(cct, user_info, std::move(account), std::move(policies),
+ subuser, perm_mask, access_key_id));
/* TODO(rzarzynski): replace with static_ptr. */
return aplptr_t(new decltype(apl)(std::move(apl)));
}
diff --git a/src/rgw/rgw_basic_types.cc b/src/rgw/rgw_basic_types.cc
index 5a09c017f3d..f82694683a0 100644
--- a/src/rgw/rgw_basic_types.cc
+++ b/src/rgw/rgw_basic_types.cc
@@ -7,6 +7,7 @@
#include "cls/user/cls_user_types.h"
+#include "rgw_account.h"
#include "rgw_basic_types.h"
#include "rgw_bucket.h"
#include "rgw_xml.h"
@@ -170,11 +171,63 @@ ostream& operator <<(ostream& m, const Principal& p) {
return m << "*";
}
- m << "arn:aws:iam:" << p.get_tenant() << ":";
- if (p.is_tenant()) {
+ m << "arn:aws:iam:" << p.get_account() << ":";
+ if (p.is_account()) {
return m << "root";
}
return m << (p.is_user() ? "user/" : "role/") << p.get_id();
}
}
}
+
+// rgw_account_id
+void encode_json_impl(const char* name, const rgw_account_id& id, Formatter* f)
+{
+ f->dump_string(name, id);
+}
+
+void decode_json_obj(rgw_account_id& id, JSONObj* obj)
+{
+ decode_json_obj(static_cast<std::string&>(id), obj);
+}
+
+// rgw_owner variant
+rgw_owner parse_owner(const std::string& str)
+{
+ if (rgw::account::validate_id(str)) {
+ return rgw_account_id{str};
+ } else {
+ return rgw_user{str};
+ }
+}
+
+std::string to_string(const rgw_owner& o)
+{
+ struct visitor {
+ std::string operator()(const rgw_account_id& a) { return a; }
+ std::string operator()(const rgw_user& u) { return u.to_str(); }
+ };
+ return std::visit(visitor{}, o);
+}
+
+std::ostream& operator<<(std::ostream& out, const rgw_owner& o)
+{
+ struct visitor {
+ std::ostream& out;
+ std::ostream& operator()(const rgw_account_id& a) { return out << a; }
+ std::ostream& operator()(const rgw_user& u) { return out << u; }
+ };
+ return std::visit(visitor{out}, o);
+}
+
+void encode_json_impl(const char *name, const rgw_owner& o, ceph::Formatter *f)
+{
+ encode_json(name, to_string(o), f);
+}
+
+void decode_json_obj(rgw_owner& o, JSONObj *obj)
+{
+ std::string str;
+ decode_json_obj(str, obj);
+ o = parse_owner(str);
+}
diff --git a/src/rgw/rgw_basic_types.h b/src/rgw/rgw_basic_types.h
index a8190aa35ad..cd56db1081b 100644
--- a/src/rgw/rgw_basic_types.h
+++ b/src/rgw/rgw_basic_types.h
@@ -141,7 +141,7 @@ extern void decode_json_obj(rgw_placement_rule& v, JSONObj *obj);
namespace rgw {
namespace auth {
class Principal {
- enum types { User, Role, Tenant, Wildcard, OidcProvider, AssumedRole };
+ enum types { User, Role, Account, Wildcard, OidcProvider, AssumedRole };
types t;
rgw_user u;
std::string idp_url;
@@ -169,8 +169,8 @@ public:
return Principal(Role, std::move(t), std::move(u));
}
- static Principal tenant(std::string&& t) {
- return Principal(Tenant, std::move(t), {});
+ static Principal account(std::string&& t) {
+ return Principal(Account, std::move(t), {});
}
static Principal oidc_provider(std::string&& idp_url) {
@@ -193,8 +193,8 @@ public:
return t == Role;
}
- bool is_tenant() const {
- return t == Tenant;
+ bool is_account() const {
+ return t == Account;
}
bool is_oidc_provider() const {
@@ -205,7 +205,7 @@ public:
return t == AssumedRole;
}
- const std::string& get_tenant() const {
+ const std::string& get_account() const {
return u.tenant;
}
diff --git a/src/rgw/rgw_common.cc b/src/rgw/rgw_common.cc
index 7ca010fb33a..f5b834e0672 100644
--- a/src/rgw/rgw_common.cc
+++ b/src/rgw/rgw_common.cc
@@ -10,6 +10,7 @@
#include "json_spirit/json_spirit.h"
#include "common/ceph_json.h"
#include "common/Formatter.h"
+#include "common/versioned_variant.h"
#include "rgw_op.h"
#include "rgw_common.h"
@@ -43,6 +44,7 @@ using rgw::ARN;
using rgw::IAM::Effect;
using rgw::IAM::op_to_perm;
using rgw::IAM::Policy;
+using rgw::IAM::PolicyPrincipal;
const uint32_t RGWBucketInfo::NUM_SHARDS_BLIND_BUCKET(UINT32_MAX);
@@ -80,10 +82,11 @@ rgw_http_errors rgw_http_s3_errors({
{ ERR_INVALID_WEBSITE_ROUTING_RULES_ERROR, {400, "InvalidRequest" }},
{ ERR_INVALID_ENCRYPTION_ALGORITHM, {400, "InvalidEncryptionAlgorithmError" }},
{ ERR_INVALID_RETENTION_PERIOD,{400, "InvalidRetentionPeriod"}},
- { ERR_LIMIT_EXCEEDED, {400, "LimitExceeded" }},
+ { ERR_LIMIT_EXCEEDED, {409, "LimitExceeded" }},
{ ERR_LENGTH_REQUIRED, {411, "MissingContentLength" }},
{ EACCES, {403, "AccessDenied" }},
{ EPERM, {403, "AccessDenied" }},
+ { ERR_AUTHORIZATION, {403, "AuthorizationError" }},
{ ERR_SIGNATURE_NO_MATCH, {403, "SignatureDoesNotMatch" }},
{ ERR_INVALID_ACCESS_KEY, {403, "InvalidAccessKeyId" }},
{ ERR_USER_SUSPENDED, {403, "UserSuspended" }},
@@ -94,7 +97,7 @@ rgw_http_errors rgw_http_s3_errors({
{ ERR_NO_SUCH_BUCKET, {404, "NoSuchBucket" }},
{ ERR_NO_SUCH_WEBSITE_CONFIGURATION, {404, "NoSuchWebsiteConfiguration" }},
{ ERR_NO_SUCH_UPLOAD, {404, "NoSuchUpload" }},
- { ERR_NOT_FOUND, {404, "Not Found"}},
+ { ERR_NOT_FOUND, {404, "NotFound"}},
{ ERR_NO_SUCH_LC, {404, "NoSuchLifecycleConfiguration"}},
{ ERR_NO_SUCH_BUCKET_POLICY, {404, "NoSuchBucketPolicy"}},
{ ERR_NO_SUCH_USER, {404, "NoSuchUser"}},
@@ -133,6 +136,8 @@ rgw_http_errors rgw_http_s3_errors({
{ ERR_NO_SUCH_TAG_SET, {404, "NoSuchTagSet"}},
{ ERR_NO_SUCH_BUCKET_ENCRYPTION_CONFIGURATION, {404, "ServerSideEncryptionConfigurationNotFoundError"}},
{ ERR_NO_SUCH_PUBLIC_ACCESS_BLOCK_CONFIGURATION, {404, "NoSuchPublicAccessBlockConfiguration"}},
+ { ERR_ACCOUNT_EXISTS, {409, "AccountAlreadyExists"}},
+ { ECANCELED, {409, "ConcurrentModification"}},
});
rgw_http_errors rgw_http_swift_errors({
@@ -372,6 +377,15 @@ void set_req_state_err(req_state* s, int err_no)
void dump(req_state* s)
{
+ std::optional<Formatter::ObjectSection> error_response;
+ if (s->prot_flags & RGW_REST_IAM) {
+ error_response.emplace(*s->formatter, "ErrorResponse", RGW_REST_IAM_XMLNS);
+ } else if (s->prot_flags & RGW_REST_SNS) {
+ error_response.emplace(*s->formatter, "ErrorResponse", RGW_REST_SNS_XMLNS);
+ } else if (s->prot_flags & RGW_REST_STS) {
+ error_response.emplace(*s->formatter, "ErrorResponse", RGW_REST_STS_XMLNS);
+ }
+
if (s->format != RGWFormat::HTML)
s->formatter->open_object_section("Error");
if (!s->err.err_code.empty())
@@ -383,7 +397,7 @@ void dump(req_state* s)
s->formatter->dump_string("RequestId", s->trans_id);
s->formatter->dump_string("HostId", s->host_id);
if (s->format != RGWFormat::HTML)
- s->formatter->close_section();
+ s->formatter->close_section(); // Error
}
struct str_len {
@@ -1116,8 +1130,6 @@ Effect eval_or_pass(const DoutPrefixProvider* dpp,
return policy->eval(env, id, op, resource, princ_type);
}
-}
-
Effect eval_identity_or_session_policies(const DoutPrefixProvider* dpp,
const vector<Policy>& policies,
const rgw::IAM::Environment& env,
@@ -1125,16 +1137,103 @@ Effect eval_identity_or_session_policies(const DoutPrefixProvider* dpp,
const ARN& arn) {
auto policy_res = Effect::Pass, prev_res = Effect::Pass;
for (auto& policy : policies) {
- if (policy_res = eval_or_pass(dpp, policy, env, boost::none, op, arn); policy_res == Effect::Deny)
+ if (policy_res = eval_or_pass(dpp, policy, env, boost::none, op, arn);
+ policy_res == Effect::Deny) {
+ ldpp_dout(dpp, 10) << __func__ << " Deny from " << policy << dendl;
return policy_res;
- else if (policy_res == Effect::Allow)
+ } else if (policy_res == Effect::Allow) {
+ ldpp_dout(dpp, 20) << __func__ << " Allow from " << policy << dendl;
prev_res = Effect::Allow;
- else if (policy_res == Effect::Pass && prev_res == Effect::Allow)
+ } else if (policy_res == Effect::Pass && prev_res == Effect::Allow) {
policy_res = Effect::Allow;
+ }
}
return policy_res;
}
+} // anonymous namespace
+
+// determine whether a request is allowed or denied within an account
+Effect evaluate_iam_policies(
+ const DoutPrefixProvider* dpp,
+ const rgw::IAM::Environment& env,
+ const rgw::auth::Identity& identity,
+ bool account_root, uint64_t op, const rgw::ARN& arn,
+ const boost::optional<Policy>& resource_policy,
+ const vector<Policy>& identity_policies,
+ const vector<Policy>& session_policies)
+{
+ auto identity_res = eval_identity_or_session_policies(dpp, identity_policies, env, op, arn);
+ if (identity_res == Effect::Deny) {
+ ldpp_dout(dpp, 10) << __func__ << ": explicit deny from identity-based policy" << dendl;
+ return Effect::Deny;
+ }
+
+ PolicyPrincipal princ_type = PolicyPrincipal::Other;
+ auto resource_res = eval_or_pass(dpp, resource_policy, env, identity,
+ op, arn, princ_type);
+ if (resource_res == Effect::Deny) {
+ ldpp_dout(dpp, 10) << __func__ << ": explicit deny from resource-based policy" << dendl;
+ return Effect::Deny;
+ }
+
+ //Take into account session policies, if the identity making a request is a role
+ if (!session_policies.empty()) {
+ auto session_res = eval_identity_or_session_policies(dpp, session_policies, env, op, arn);
+ if (session_res == Effect::Deny) {
+ ldpp_dout(dpp, 10) << __func__ << ": explicit deny from session policy" << dendl;
+ return Effect::Deny;
+ }
+ if (princ_type == PolicyPrincipal::Role) {
+ //Intersection of session policy and identity policy plus intersection of session policy and bucket policy
+ if (session_res == Effect::Allow && identity_res == Effect::Allow) {
+ ldpp_dout(dpp, 10) << __func__ << ": allowed by session and identity-based policy" << dendl;
+ return Effect::Allow;
+ }
+ if (session_res == Effect::Allow && resource_res == Effect::Allow) {
+ ldpp_dout(dpp, 10) << __func__ << ": allowed by session and resource-based policy" << dendl;
+ return Effect::Allow;
+ }
+ } else if (princ_type == PolicyPrincipal::Session) {
+ //Intersection of session policy and identity policy plus bucket policy
+ if (session_res == Effect::Allow && identity_res == Effect::Allow) {
+ ldpp_dout(dpp, 10) << __func__ << ": allowed by session and identity-based policy" << dendl;
+ return Effect::Allow;
+ }
+ if (resource_res == Effect::Allow) {
+ ldpp_dout(dpp, 10) << __func__ << ": allowed by resource-based policy" << dendl;
+ return Effect::Allow;
+ }
+ } else if (princ_type == PolicyPrincipal::Other) {// there was no match in the bucket policy
+ if (session_res == Effect::Allow && identity_res == Effect::Allow) {
+ ldpp_dout(dpp, 10) << __func__ << ": allowed by session and identity-based policy" << dendl;
+ return Effect::Allow;
+ }
+ }
+ ldpp_dout(dpp, 10) << __func__ << ": implicit deny from session policy" << dendl;
+ return Effect::Pass;
+ }
+
+ // Allow from resource policy overrides implicit deny from identity
+ if (resource_res == Effect::Allow) {
+ ldpp_dout(dpp, 10) << __func__ << ": allowed by resource-based policy" << dendl;
+ return Effect::Allow;
+ }
+
+ if (identity_res == Effect::Allow) {
+ ldpp_dout(dpp, 10) << __func__ << ": allowed by identity-based policy" << dendl;
+ return Effect::Allow;
+ }
+
+ if (account_root) {
+ ldpp_dout(dpp, 10) << __func__ << ": granted to account root" << dendl;
+ return Effect::Allow;
+ }
+
+ ldpp_dout(dpp, 10) << __func__ << ": implicit deny from identity-based policy" << dendl;
+ return Effect::Pass;
+}
+
bool verify_user_permission(const DoutPrefixProvider* dpp,
perm_state_base * const s,
const RGWAccessControlPolicy& user_acl,
@@ -1144,24 +1243,14 @@ bool verify_user_permission(const DoutPrefixProvider* dpp,
const uint64_t op,
bool mandatory_policy)
{
- auto identity_policy_res = eval_identity_or_session_policies(dpp, user_policies, s->env, op, res);
- if (identity_policy_res == Effect::Deny) {
+ const bool account_root = (s->identity->get_identity_type() == TYPE_ROOT);
+ const auto effect = evaluate_iam_policies(dpp, s->env, *s->identity,
+ account_root, op, res, {},
+ user_policies, session_policies);
+ if (effect == Effect::Deny) {
return false;
}
-
- if (! session_policies.empty()) {
- auto session_policy_res = eval_identity_or_session_policies(dpp, session_policies, s->env, op, res);
- if (session_policy_res == Effect::Deny) {
- return false;
- }
- //Intersection of identity policies and session policies
- if (identity_policy_res == Effect::Allow && session_policy_res == Effect::Allow) {
- return true;
- }
- return false;
- }
-
- if (identity_policy_res == Effect::Allow) {
+ if (effect == Effect::Allow) {
return true;
}
@@ -1184,14 +1273,13 @@ bool verify_user_permission_no_policy(const DoutPrefixProvider* dpp,
if (s->identity->get_identity_type() == TYPE_ROLE)
return false;
- /* S3 doesn't have a subuser, it takes user permissions */
+ /* S3 doesn't support account ACLs, so user_acl will be uninitialized. */
+ if (user_acl.get_owner().empty())
+ return true;
+
if ((perm & (int)s->perm_mask) != perm)
return false;
- /* S3 doesn't support account ACLs, so user_acl will be uninitialized. */
- if (user_acl.get_owner().id.empty())
- return true;
-
return user_acl.verify_permission(dpp, *s->identity, perm, perm);
}
@@ -1202,7 +1290,12 @@ bool verify_user_permission(const DoutPrefixProvider* dpp,
bool mandatory_policy)
{
perm_state_from_req_state ps(s);
- return verify_user_permission(dpp, &ps, s->user_acl, s->iam_user_policies, s->session_policies, res, op, mandatory_policy);
+
+ if (s->auth.identity->get_account()) {
+ // account users always require an Allow from identity-based policy
+ mandatory_policy = true;
+ }
+ return verify_user_permission(dpp, &ps, s->user_acl, s->iam_identity_policies, s->session_policies, res, op, mandatory_policy);
}
bool verify_user_permission_no_policy(const DoutPrefixProvider* dpp,
@@ -1235,7 +1328,8 @@ bool verify_requester_payer_permission(struct perm_state_base *s)
bool verify_bucket_permission(const DoutPrefixProvider* dpp,
struct perm_state_base * const s,
- const rgw_bucket& bucket,
+ const rgw::ARN& arn,
+ bool account_root,
const RGWAccessControlPolicy& user_acl,
const RGWAccessControlPolicy& bucket_acl,
const boost::optional<Policy>& bucket_policy,
@@ -1246,55 +1340,27 @@ bool verify_bucket_permission(const DoutPrefixProvider* dpp,
if (!verify_requester_payer_permission(s))
return false;
- auto identity_policy_res = eval_identity_or_session_policies(dpp, identity_policies, s->env, op, ARN(bucket));
- if (identity_policy_res == Effect::Deny)
- return false;
-
- rgw::IAM::PolicyPrincipal princ_type = rgw::IAM::PolicyPrincipal::Other;
if (bucket_policy) {
ldpp_dout(dpp, 16) << __func__ << ": policy: " << bucket_policy.get()
- << "resource: " << ARN(bucket) << dendl;
+ << " resource: " << arn << dendl;
}
- auto r = eval_or_pass(dpp, bucket_policy, s->env, *s->identity,
- op, ARN(bucket), princ_type);
- if (r == Effect::Deny)
- return false;
-
- //Take into account session policies, if the identity making a request is a role
- if (!session_policies.empty()) {
- auto session_policy_res = eval_identity_or_session_policies(dpp, session_policies, s->env, op, ARN(bucket));
- if (session_policy_res == Effect::Deny) {
- return false;
- }
- if (princ_type == rgw::IAM::PolicyPrincipal::Role) {
- //Intersection of session policy and identity policy plus intersection of session policy and bucket policy
- if ((session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) ||
- (session_policy_res == Effect::Allow && r == Effect::Allow))
- return true;
- } else if (princ_type == rgw::IAM::PolicyPrincipal::Session) {
- //Intersection of session policy and identity policy plus bucket policy
- if ((session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) || r == Effect::Allow)
- return true;
- } else if (princ_type == rgw::IAM::PolicyPrincipal::Other) {// there was no match in the bucket policy
- if (session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow)
- return true;
- }
+ const auto effect = evaluate_iam_policies(
+ dpp, s->env, *s->identity, account_root, op, arn,
+ bucket_policy, identity_policies, session_policies);
+ if (effect == Effect::Deny) {
return false;
}
-
- if (r == Effect::Allow || identity_policy_res == Effect::Allow)
- // It looks like S3 ACLs only GRANT permissions rather than
- // denying them, so this should be safe.
+ if (effect == Effect::Allow) {
return true;
+ }
const auto perm = op_to_perm(op);
-
return verify_bucket_permission_no_policy(dpp, s, user_acl, bucket_acl, perm);
}
bool verify_bucket_permission(const DoutPrefixProvider* dpp,
req_state * const s,
- const rgw_bucket& bucket,
+ const rgw::ARN& arn,
const RGWAccessControlPolicy& user_acl,
const RGWAccessControlPolicy& bucket_acl,
const boost::optional<Policy>& bucket_policy,
@@ -1303,7 +1369,28 @@ bool verify_bucket_permission(const DoutPrefixProvider* dpp,
const uint64_t op)
{
perm_state_from_req_state ps(s);
- return verify_bucket_permission(dpp, &ps, bucket,
+
+ if (ps.identity->get_account()) {
+ const bool account_root = (ps.identity->get_identity_type() == TYPE_ROOT);
+ if (!ps.identity->is_owner_of(s->bucket_owner.id)) {
+ ldpp_dout(dpp, 4) << "cross-account request for bucket owner "
+ << s->bucket_owner.id << " != " << s->owner.id << dendl;
+ // cross-account requests evaluate the identity-based policies separately
+ // from the resource-based policies and require Allow from both
+ return verify_bucket_permission(dpp, &ps, arn, account_root, {}, {}, {},
+ user_policies, session_policies, op)
+ && verify_bucket_permission(dpp, &ps, arn, false, user_acl,
+ bucket_acl, bucket_policy, {}, {}, op);
+ } else {
+ // don't consult acls for same-account access. require an Allow from
+ // either identity- or resource-based policy
+ return verify_bucket_permission(dpp, &ps, arn, account_root, {}, {},
+ bucket_policy, user_policies,
+ session_policies, op);
+ }
+ }
+ constexpr bool account_root = false;
+ return verify_bucket_permission(dpp, &ps, arn, account_root,
user_acl, bucket_acl,
bucket_policy, user_policies,
session_policies, op);
@@ -1320,10 +1407,15 @@ bool verify_bucket_permission_no_policy(const DoutPrefixProvider* dpp, struct pe
if (bucket_acl.verify_permission(dpp, *s->identity, perm, perm,
s->get_referer(),
s->bucket_access_conf &&
- s->bucket_access_conf->ignore_public_acls()))
+ s->bucket_access_conf->ignore_public_acls())) {
+ ldpp_dout(dpp, 10) << __func__ << ": granted by bucket acl" << dendl;
return true;
-
- return user_acl.verify_permission(dpp, *s->identity, perm, perm);
+ }
+ if (user_acl.verify_permission(dpp, *s->identity, perm, perm)) {
+ ldpp_dout(dpp, 10) << __func__ << ": granted by user acl" << dendl;
+ return true;
+ }
+ return false;
}
bool verify_bucket_permission_no_policy(const DoutPrefixProvider* dpp, req_state * const s,
@@ -1353,94 +1445,24 @@ bool verify_bucket_permission_no_policy(const DoutPrefixProvider* dpp, req_state
perm);
}
-bool verify_bucket_permission(const DoutPrefixProvider* dpp, req_state * const s, const uint64_t op)
+bool verify_bucket_permission(const DoutPrefixProvider* dpp, req_state* s,
+ const rgw::ARN& arn, uint64_t op)
{
- if (rgw::sal::Bucket::empty(s->bucket)) {
- // request is missing a bucket name
- return false;
- }
-
- perm_state_from_req_state ps(s);
-
- return verify_bucket_permission(dpp,
- &ps,
- s->bucket->get_key(),
- s->user_acl,
- s->bucket_acl,
- s->iam_policy,
- s->iam_user_policies,
- s->session_policies,
- op);
+ return verify_bucket_permission(dpp, s, arn, s->user_acl, s->bucket_acl,
+ s->iam_policy, s->iam_identity_policies,
+ s->session_policies, op);
}
-// Authorize anyone permitted by the bucket policy, identity policies, session policies and the bucket owner
-// unless explicitly denied by the policy.
-
-int verify_bucket_owner_or_policy(req_state* const s,
- const uint64_t op)
+bool verify_bucket_permission(const DoutPrefixProvider* dpp, req_state* s, uint64_t op)
{
- auto identity_policy_res = eval_identity_or_session_policies(s, s->iam_user_policies, s->env, op, ARN(s->bucket->get_key()));
- if (identity_policy_res == Effect::Deny) {
- return -EACCES;
- }
-
- rgw::IAM::PolicyPrincipal princ_type = rgw::IAM::PolicyPrincipal::Other;
- auto e = eval_or_pass(s, s->iam_policy,
- s->env, *s->auth.identity,
- op, ARN(s->bucket->get_key()), princ_type);
- if (e == Effect::Deny) {
- return -EACCES;
- }
-
- if (!s->session_policies.empty()) {
- auto session_policy_res = eval_identity_or_session_policies(s, s->session_policies, s->env, op,
- ARN(s->bucket->get_key()));
- if (session_policy_res == Effect::Deny) {
- return -EACCES;
- }
- if (princ_type == rgw::IAM::PolicyPrincipal::Role) {
- //Intersection of session policy and identity policy plus intersection of session policy and bucket policy
- if ((session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) ||
- (session_policy_res == Effect::Allow && e == Effect::Allow))
- return 0;
- } else if (princ_type == rgw::IAM::PolicyPrincipal::Session) {
- //Intersection of session policy and identity policy plus bucket policy
- if ((session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) || e == Effect::Allow)
- return 0;
- } else if (princ_type == rgw::IAM::PolicyPrincipal::Other) {// there was no match in the bucket policy
- if (session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow)
- return 0;
- }
- return -EACCES;
- }
-
- if (e == Effect::Allow ||
- identity_policy_res == Effect::Allow ||
- (e == Effect::Pass &&
- identity_policy_res == Effect::Pass &&
- s->auth.identity->is_owner_of(s->bucket_owner.id))) {
- return 0;
- } else {
- return -EACCES;
+ if (rgw::sal::Bucket::empty(s->bucket)) {
+ // request is missing a bucket name
+ return false;
}
+ return verify_bucket_permission(dpp, s, ARN(s->bucket->get_key()), op);
}
-static inline bool check_deferred_bucket_perms(const DoutPrefixProvider* dpp,
- struct perm_state_base * const s,
- const rgw_bucket& bucket,
- const RGWAccessControlPolicy& user_acl,
- const RGWAccessControlPolicy& bucket_acl,
- const boost::optional<Policy>& bucket_policy,
- const vector<Policy>& identity_policies,
- const vector<Policy>& session_policies,
- const uint8_t deferred_check,
- const uint64_t op)
-{
- return (s->defer_to_bucket_acls == deferred_check \
- && verify_bucket_permission(dpp, s, bucket, user_acl, bucket_acl, bucket_policy, identity_policies, session_policies,op));
-}
-
static inline bool check_deferred_bucket_only_acl(const DoutPrefixProvider* dpp,
struct perm_state_base * const s,
const RGWAccessControlPolicy& user_acl,
@@ -1453,7 +1475,7 @@ static inline bool check_deferred_bucket_only_acl(const DoutPrefixProvider* dpp,
}
bool verify_object_permission(const DoutPrefixProvider* dpp, struct perm_state_base * const s,
- const rgw_obj& obj,
+ const rgw_obj& obj, bool account_root,
const RGWAccessControlPolicy& user_acl,
const RGWAccessControlPolicy& bucket_acl,
const RGWAccessControlPolicy& object_acl,
@@ -1465,80 +1487,19 @@ bool verify_object_permission(const DoutPrefixProvider* dpp, struct perm_state_b
if (!verify_requester_payer_permission(s))
return false;
- auto identity_policy_res = eval_identity_or_session_policies(dpp, identity_policies, s->env, op, ARN(obj));
- if (identity_policy_res == Effect::Deny)
- return false;
-
- rgw::IAM::PolicyPrincipal princ_type = rgw::IAM::PolicyPrincipal::Other;
- auto r = eval_or_pass(dpp, bucket_policy, s->env, *s->identity, op, ARN(obj), princ_type);
- if (r == Effect::Deny)
+ const auto effect = evaluate_iam_policies(
+ dpp, s->env, *s->identity, account_root, op, ARN(obj),
+ bucket_policy, identity_policies, session_policies);
+ if (effect == Effect::Deny) {
return false;
-
- if (!session_policies.empty()) {
- auto session_policy_res = eval_identity_or_session_policies(dpp, session_policies, s->env, op, ARN(obj));
- if (session_policy_res == Effect::Deny) {
- return false;
- }
- if (princ_type == rgw::IAM::PolicyPrincipal::Role) {
- //Intersection of session policy and identity policy plus intersection of session policy and bucket policy
- if ((session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) ||
- (session_policy_res == Effect::Allow && r == Effect::Allow))
- return true;
- } else if (princ_type == rgw::IAM::PolicyPrincipal::Session) {
- //Intersection of session policy and identity policy plus bucket policy
- if ((session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) || r == Effect::Allow)
- return true;
- } else if (princ_type == rgw::IAM::PolicyPrincipal::Other) {// there was no match in the bucket policy
- if (session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow)
- return true;
- }
- return false;
- }
-
- if (r == Effect::Allow || identity_policy_res == Effect::Allow)
- // It looks like S3 ACLs only GRANT permissions rather than
- // denying them, so this should be safe.
- return true;
-
- const auto perm = op_to_perm(op);
-
- if (check_deferred_bucket_perms(dpp, s, obj.bucket, user_acl, bucket_acl, bucket_policy,
- identity_policies, session_policies, RGW_DEFER_TO_BUCKET_ACLS_RECURSE, op) ||
- check_deferred_bucket_perms(dpp, s, obj.bucket, user_acl, bucket_acl, bucket_policy,
- identity_policies, session_policies, RGW_DEFER_TO_BUCKET_ACLS_FULL_CONTROL, rgw::IAM::s3All)) {
- return true;
}
-
- bool ret = object_acl.verify_permission(dpp, *s->identity, s->perm_mask, perm,
- nullptr, /* http_referrer */
- s->bucket_access_conf &&
- s->bucket_access_conf->ignore_public_acls());
- if (ret) {
+ if (effect == Effect::Allow) {
return true;
}
- if (!s->cct->_conf->rgw_enforce_swift_acls)
- return ret;
-
- if ((perm & (int)s->perm_mask) != perm)
- return false;
-
- int swift_perm = 0;
- if (perm & (RGW_PERM_READ | RGW_PERM_READ_ACP))
- swift_perm |= RGW_PERM_READ_OBJS;
- if (perm & RGW_PERM_WRITE)
- swift_perm |= RGW_PERM_WRITE_OBJS;
-
- if (!swift_perm)
- return false;
-
- /* we already verified the user mask above, so we pass swift_perm as the mask here,
- otherwise the mask might not cover the swift permissions bits */
- if (bucket_acl.verify_permission(dpp, *s->identity, swift_perm, swift_perm,
- s->get_referer()))
- return true;
-
- return user_acl.verify_permission(dpp, *s->identity, swift_perm, swift_perm);
+ const auto perm = op_to_perm(op);
+ return verify_object_permission_no_policy(dpp, s, user_acl, bucket_acl,
+ object_acl, perm);
}
bool verify_object_permission(const DoutPrefixProvider* dpp, req_state * const s,
@@ -1552,7 +1513,32 @@ bool verify_object_permission(const DoutPrefixProvider* dpp, req_state * const s
const uint64_t op)
{
perm_state_from_req_state ps(s);
- return verify_object_permission(dpp, &ps, obj,
+
+ if (ps.identity->get_account()) {
+ const bool account_root = (ps.identity->get_identity_type() == TYPE_ROOT);
+
+ const rgw_owner& object_owner = !object_acl.get_owner().empty() ?
+ object_acl.get_owner().id : s->bucket_owner.id;
+ if (!ps.identity->is_owner_of(object_owner)) {
+ ldpp_dout(dpp, 4) << "cross-account request for object owner "
+ << object_owner << " != " << s->owner.id << dendl;
+ // cross-account requests evaluate the identity-based policies separately
+ // from the resource-based policies and require Allow from both
+ return verify_object_permission(dpp, &ps, obj, account_root, {}, {}, {}, {},
+ identity_policies, session_policies, op)
+ && verify_object_permission(dpp, &ps, obj, false,
+ user_acl, bucket_acl, object_acl,
+ bucket_policy, {}, {}, op);
+ } else {
+ // don't consult acls for same-account access. require an Allow from
+ // either identity- or resource-based policy
+ return verify_object_permission(dpp, &ps, obj, account_root, {}, {}, {},
+ bucket_policy, identity_policies,
+ session_policies, op);
+ }
+ }
+ constexpr bool account_root = false;
+ return verify_object_permission(dpp, &ps, obj, account_root,
user_acl, bucket_acl,
object_acl, bucket_policy,
identity_policies, session_policies, op);
@@ -1575,6 +1561,7 @@ bool verify_object_permission_no_policy(const DoutPrefixProvider* dpp,
s->bucket_access_conf &&
s->bucket_access_conf->ignore_public_acls());
if (ret) {
+ ldpp_dout(dpp, 10) << __func__ << ": granted by object acl" << dendl;
return true;
}
@@ -1596,10 +1583,15 @@ bool verify_object_permission_no_policy(const DoutPrefixProvider* dpp,
/* we already verified the user mask above, so we pass swift_perm as the mask here,
otherwise the mask might not cover the swift permissions bits */
if (bucket_acl.verify_permission(dpp, *s->identity, swift_perm, swift_perm,
- s->get_referer()))
+ s->get_referer())) {
+ ldpp_dout(dpp, 10) << __func__ << ": granted by bucket acl" << dendl;
return true;
-
- return user_acl.verify_permission(dpp, *s->identity, swift_perm, swift_perm);
+ }
+ if (user_acl.verify_permission(dpp, *s->identity, swift_perm, swift_perm)) {
+ ldpp_dout(dpp, 10) << __func__ << ": granted by user acl" << dendl;
+ return true;
+ }
+ return false;
}
bool verify_object_permission_no_policy(const DoutPrefixProvider* dpp, req_state *s, int perm)
@@ -1619,16 +1611,13 @@ bool verify_object_permission_no_policy(const DoutPrefixProvider* dpp, req_state
bool verify_object_permission(const DoutPrefixProvider* dpp, req_state *s, uint64_t op)
{
- perm_state_from_req_state ps(s);
-
- return verify_object_permission(dpp,
- &ps,
+ return verify_object_permission(dpp, s,
rgw_obj(s->bucket->get_key(), s->object->get_key()),
s->user_acl,
s->bucket_acl,
s->object_acl,
s->iam_policy,
- s->iam_user_policies,
+ s->iam_identity_policies,
s->session_policies,
op);
}
@@ -2260,9 +2249,19 @@ RGWBucketInfo::~RGWBucketInfo()
}
void RGWBucketInfo::encode(bufferlist& bl) const {
- ENCODE_START(23, 4, bl);
+ // rgw_owner is now encoded at the end. if the owner is a user, duplicate the
+ // encoding of its id/tenant/ns in the existing locations for backward compat.
+ // otherwise, encode empty strings there
+ const rgw_user* user = std::get_if<rgw_user>(&owner);
+ std::string empty;
+
+ ENCODE_START(24, 4, bl);
encode(bucket, bl);
- encode(owner.id, bl);
+ if (user) {
+ encode(user->id, bl);
+ } else {
+ encode(empty, bl);
+ }
encode(flags, bl);
encode(zonegroup, bl);
uint64_t ct = real_clock::to_time_t(creation_time);
@@ -2271,7 +2270,11 @@ void RGWBucketInfo::encode(bufferlist& bl) const {
encode(has_instance_obj, bl);
encode(quota, bl);
encode(requester_pays, bl);
- encode(owner.tenant, bl);
+ if (user) {
+ encode(user->tenant, bl);
+ } else {
+ encode(empty, bl);
+ }
encode(has_website, bl);
if (has_website) {
encode(website_conf, bl);
@@ -2293,17 +2296,24 @@ void RGWBucketInfo::encode(bufferlist& bl) const {
encode(*sync_policy, bl);
}
encode(layout, bl);
- encode(owner.ns, bl);
+ if (user) {
+ encode(user->ns, bl);
+ } else {
+ encode(empty, bl);
+ }
+ ceph::versioned_variant::encode(owner, bl); // v24
+
ENCODE_FINISH(bl);
}
void RGWBucketInfo::decode(bufferlist::const_iterator& bl) {
- DECODE_START_LEGACY_COMPAT_LEN_32(23, 4, 4, bl);
+ rgw_user user;
+ DECODE_START_LEGACY_COMPAT_LEN_32(24, 4, 4, bl);
decode(bucket, bl);
if (struct_v >= 2) {
string s;
decode(s, bl);
- owner.from_str(s);
+ user.from_str(s);
}
if (struct_v >= 3)
decode(flags, bl);
@@ -2329,7 +2339,7 @@ void RGWBucketInfo::decode(bufferlist::const_iterator& bl) {
if (struct_v >= 12)
decode(requester_pays, bl);
if (struct_v >= 13)
- decode(owner.tenant, bl);
+ decode(user.tenant, bl);
if (struct_v >= 14) {
decode(has_website, bl);
if (has_website) {
@@ -2373,7 +2383,12 @@ void RGWBucketInfo::decode(bufferlist::const_iterator& bl) {
decode(layout, bl);
}
if (struct_v >= 23) {
- decode(owner.ns, bl);
+ decode(user.ns, bl);
+ }
+ if (struct_v >= 24) {
+ ceph::versioned_variant::decode(owner, bl);
+ } else {
+ owner = std::move(user); // user was decoded piecewise above
}
if (layout.logs.empty() &&
@@ -2492,7 +2507,7 @@ void RGWBucketInfo::dump(Formatter *f) const
encode_json("bucket", bucket, f);
utime_t ut(creation_time);
encode_json("creation_time", ut, f);
- encode_json("owner", owner.to_str(), f);
+ encode_json("owner", owner, f);
encode_json("flags", flags, f);
encode_json("zonegroup", zonegroup, f);
encode_json("placement_rule", placement_rule, f);
@@ -2565,6 +2580,11 @@ void RGWUserInfo::generate_test_instances(list<RGWUserInfo*>& o)
i->user_id = "user_id";
i->display_name = "display_name";
i->user_email = "user@email";
+ i->account_id = "RGW12345678901234567";
+ i->path = "/";
+ i->create_date = ceph::real_time{std::chrono::hours(1)};
+ i->tags.emplace("key", "value");
+ i->group_ids.insert("group");
RGWAccessKey k1, k2;
k1.id = "id1";
k1.key = "key1";
@@ -2788,12 +2808,20 @@ void RGWUserInfo::dump(Formatter *f) const
case TYPE_NONE:
user_source_type = "none";
break;
+ case TYPE_ROOT:
+ user_source_type = "root";
+ break;
default:
user_source_type = "none";
break;
}
encode_json("type", user_source_type, f);
encode_json("mfa_ids", mfa_ids, f);
+ encode_json("account_id", account_id, f);
+ encode_json("path", path, f);
+ encode_json("create_date", create_date, f);
+ encode_json("tags", tags, f);
+ encode_json("group_ids", group_ids, f);
}
void RGWUserInfo::decode_json(JSONObj *obj)
@@ -2841,10 +2869,17 @@ void RGWUserInfo::decode_json(JSONObj *obj)
type = TYPE_KEYSTONE;
} else if (user_source_type == "ldap") {
type = TYPE_LDAP;
+ } else if (user_source_type == "root") {
+ type = TYPE_ROOT;
} else if (user_source_type == "none") {
type = TYPE_NONE;
}
JSONDecoder::decode_json("mfa_ids", mfa_ids, obj);
+ JSONDecoder::decode_json("account_id", account_id, obj);
+ JSONDecoder::decode_json("path", path, obj);
+ JSONDecoder::decode_json("create_date", create_date, obj);
+ JSONDecoder::decode_json("tags", tags, obj);
+ JSONDecoder::decode_json("group_ids", group_ids, obj);
}
@@ -2917,6 +2952,7 @@ void RGWAccessKey::dump(Formatter *f) const
encode_json("secret_key", key, f);
encode_json("subuser", subuser, f);
encode_json("active", active, f);
+ encode_json("create_date", create_date, f);
}
void RGWAccessKey::dump_plain(Formatter *f) const
@@ -2938,6 +2974,7 @@ void RGWAccessKey::dump(Formatter *f, const string& user, bool swift) const
}
encode_json("secret_key", key, f);
encode_json("active", active, f);
+ encode_json("create_date", create_date, f);
}
void RGWAccessKey::decode_json(JSONObj *obj) {
@@ -2952,6 +2989,7 @@ void RGWAccessKey::decode_json(JSONObj *obj) {
}
}
JSONDecoder::decode_json("active", active, obj);
+ JSONDecoder::decode_json("create_date", create_date, obj);
}
void RGWAccessKey::decode_json(JSONObj *obj, bool swift) {
@@ -2969,6 +3007,82 @@ void RGWAccessKey::decode_json(JSONObj *obj, bool swift) {
}
JSONDecoder::decode_json("secret_key", key, obj, true);
JSONDecoder::decode_json("active", active, obj);
+ JSONDecoder::decode_json("create_date", create_date, obj);
+}
+
+
+void RGWAccountInfo::dump(Formatter * const f) const
+{
+ encode_json("id", id, f);
+ encode_json("tenant", tenant, f);
+ encode_json("name", name, f);
+ encode_json("email", email, f);
+ encode_json("quota", quota, f);
+ encode_json("max_users", max_users, f);
+ encode_json("max_roles", max_roles, f);
+ encode_json("max_groups", max_groups, f);
+ encode_json("max_buckets", max_buckets, f);
+ encode_json("max_access_keys", max_access_keys, f);
+}
+
+void RGWAccountInfo::decode_json(JSONObj* obj)
+{
+ JSONDecoder::decode_json("id", id, obj);
+ JSONDecoder::decode_json("tenant", tenant, obj);
+ JSONDecoder::decode_json("name", name, obj);
+ JSONDecoder::decode_json("email", email, obj);
+ JSONDecoder::decode_json("quota", quota, obj);
+ JSONDecoder::decode_json("max_users", max_users, obj);
+ JSONDecoder::decode_json("max_roles", max_roles, obj);
+ JSONDecoder::decode_json("max_groups", max_groups, obj);
+ JSONDecoder::decode_json("max_buckets", max_buckets, obj);
+ JSONDecoder::decode_json("max_access_keys", max_access_keys, obj);
+}
+
+void RGWAccountInfo::generate_test_instances(std::list<RGWAccountInfo*>& o)
+{
+ o.push_back(new RGWAccountInfo);
+ auto p = new RGWAccountInfo;
+ p->id = "account1";
+ p->tenant = "tenant1";
+ p->name = "name1";
+ p->email = "email@example.com";
+ p->max_users = 10;
+ p->max_roles = 10;
+ p->max_groups = 10;
+ p->max_buckets = 10;
+ p->max_access_keys = 10;
+ o.push_back(p);
+}
+
+void RGWGroupInfo::dump(Formatter * const f) const
+{
+ encode_json("id", id, f);
+ encode_json("tenant", tenant, f);
+ encode_json("name", name, f);
+ encode_json("path", path, f);
+ encode_json("account_id", account_id, f);
+}
+
+void RGWGroupInfo::decode_json(JSONObj* obj)
+{
+ JSONDecoder::decode_json("id", id, obj);
+ JSONDecoder::decode_json("tenant", tenant, obj);
+ JSONDecoder::decode_json("name", name, obj);
+ JSONDecoder::decode_json("path", path, obj);
+ JSONDecoder::decode_json("account_id", account_id, obj);
+}
+
+void RGWGroupInfo::generate_test_instances(std::list<RGWGroupInfo*>& o)
+{
+ o.push_back(new RGWGroupInfo);
+ auto p = new RGWGroupInfo;
+ p->id = "id";
+ p->tenant = "tenant";
+ p->name = "name";
+ p->path = "/path/";
+ p->account_id = "account";
+ o.push_back(p);
}
void RGWStorageStats::dump(Formatter *f) const
diff --git a/src/rgw/rgw_common.h b/src/rgw/rgw_common.h
index a70b043318d..5e44eeed89b 100644
--- a/src/rgw/rgw_common.h
+++ b/src/rgw/rgw_common.h
@@ -22,10 +22,13 @@
#include <unordered_map>
#include <fmt/format.h>
+#include <boost/container/flat_map.hpp>
+#include <boost/container/flat_set.hpp>
#include "common/ceph_crypto.h"
#include "common/random_string.h"
#include "common/tracer.h"
+#include "common/versioned_variant.h"
#include "rgw_acl.h"
#include "rgw_bucket_layout.h"
#include "rgw_cors.h"
@@ -150,6 +153,7 @@ using ceph::crypto::MD5;
/* IAM Policy */
#define RGW_ATTR_IAM_POLICY RGW_ATTR_PREFIX "iam-policy"
#define RGW_ATTR_USER_POLICY RGW_ATTR_PREFIX "user-policy"
+#define RGW_ATTR_MANAGED_POLICY RGW_ATTR_PREFIX "managed-policy"
#define RGW_ATTR_PUBLIC_ACCESS RGW_ATTR_PREFIX "public-access"
/* RGW File Attributes */
@@ -212,7 +216,16 @@ static inline const char* to_mime_type(const RGWFormat f)
#define RGW_REST_WEBSITE 0x8
#define RGW_REST_STS 0x10
#define RGW_REST_IAM 0x20
-#define RGW_REST_SNS 0x30
+#define RGW_REST_SNS 0x40
+
+inline constexpr const char* RGW_REST_IAM_XMLNS =
+ "https://iam.amazonaws.com/doc/2010-05-08/";
+
+inline constexpr const char* RGW_REST_SNS_XMLNS =
+ "https://sns.amazonaws.com/doc/2010-03-31/";
+
+inline constexpr const char* RGW_REST_STS_XMLNS =
+ "https://sts.amazonaws.com/doc/2011-06-15/";
#define RGW_SUSPENDED_USER_AUID (uint64_t)-2
@@ -312,6 +325,7 @@ static inline const char* to_mime_type(const RGWFormat f)
#define ERR_INVALID_OBJECT_STATE 2222
#define ERR_PRESIGNED_URL_EXPIRED 2223
#define ERR_PRESIGNED_URL_DISABLED 2224
+#define ERR_AUTHORIZATION 2225 // SNS 403 AuthorizationError
#define ERR_BUSY_RESHARDING 2300
#define ERR_NO_SUCH_ENTITY 2301
@@ -322,6 +336,7 @@ static inline const char* to_mime_type(const RGWFormat f)
#define ERR_INVALID_IDENTITY_TOKEN 2401
#define ERR_NO_SUCH_TAG_SET 2402
+#define ERR_ACCOUNT_EXISTS 2403
#ifndef UINT32_MAX
#define UINT32_MAX (0xffffffffu)
@@ -514,6 +529,7 @@ enum RGWIdentityType
TYPE_LDAP=3,
TYPE_ROLE=4,
TYPE_WEB=5,
+ TYPE_ROOT=6, // account root user
};
void encode_json(const char *name, const rgw_placement_rule& val, ceph::Formatter *f);
@@ -571,21 +587,24 @@ struct RGWUserInfo
int32_t max_buckets;
uint32_t op_mask;
RGWUserCaps caps;
- __u8 admin;
- __u8 system;
+ __u8 admin = 0;
+ __u8 system = 0;
rgw_placement_rule default_placement;
std::list<std::string> placement_tags;
std::map<int, std::string> temp_url_keys;
RGWQuota quota;
uint32_t type;
std::set<std::string> mfa_ids;
+ rgw_account_id account_id;
+ std::string path = "/";
+ ceph::real_time create_date;
+ std::multimap<std::string, std::string> tags;
+ boost::container::flat_set<std::string, std::less<>> group_ids;
RGWUserInfo()
: suspended(0),
max_buckets(RGW_DEFAULT_MAX_BUCKETS),
op_mask(RGW_OP_TYPE_ALL),
- admin(0),
- system(0),
type(TYPE_NONE) {
}
@@ -601,7 +620,7 @@ struct RGWUserInfo
}
void encode(bufferlist& bl) const {
- ENCODE_START(22, 9, bl);
+ ENCODE_START(23, 9, bl);
encode((uint64_t)0, bl); // old auid
std::string access_key;
std::string secret_key;
@@ -648,10 +667,15 @@ struct RGWUserInfo
encode(assumed_role_arn, bl);
}
encode(user_id.ns, bl);
+ encode(account_id, bl);
+ encode(path, bl);
+ encode(create_date, bl);
+ encode(tags, bl);
+ encode(group_ids, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& bl) {
- DECODE_START_LEGACY_COMPAT_LEN_32(22, 9, 9, bl);
+ DECODE_START_LEGACY_COMPAT_LEN_32(23, 9, 9, bl);
if (struct_v >= 2) {
uint64_t old_auid;
decode(old_auid, bl);
@@ -738,6 +762,15 @@ struct RGWUserInfo
} else {
user_id.ns.clear();
}
+ if (struct_v >= 23) {
+ decode(account_id, bl);
+ decode(path, bl);
+ decode(create_date, bl);
+ decode(tags, bl);
+ decode(group_ids, bl);
+ } else {
+ path = "/";
+ }
DECODE_FINISH(bl);
}
void dump(Formatter *f) const;
@@ -747,6 +780,99 @@ struct RGWUserInfo
};
WRITE_CLASS_ENCODER(RGWUserInfo)
+// user account metadata
+struct RGWAccountInfo {
+ rgw_account_id id;
+ std::string tenant;
+ std::string name;
+ std::string email;
+ RGWQuotaInfo quota;
+
+ static constexpr int32_t DEFAULT_USER_LIMIT = 1000;
+ int32_t max_users = DEFAULT_USER_LIMIT;
+
+ static constexpr int32_t DEFAULT_ROLE_LIMIT = 1000;
+ int32_t max_roles = DEFAULT_ROLE_LIMIT;
+
+ static constexpr int32_t DEFAULT_GROUP_LIMIT = 1000;
+ int32_t max_groups = DEFAULT_GROUP_LIMIT;
+
+ static constexpr int32_t DEFAULT_BUCKET_LIMIT = 1000;
+ int32_t max_buckets = DEFAULT_BUCKET_LIMIT;
+
+ static constexpr int32_t DEFAULT_ACCESS_KEY_LIMIT = 4;
+ int32_t max_access_keys = DEFAULT_ACCESS_KEY_LIMIT;
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(id, bl);
+ encode(tenant, bl);
+ encode(name, bl);
+ encode(email, bl);
+ encode(quota, bl);
+ encode(max_users, bl);
+ encode(max_roles, bl);
+ encode(max_groups, bl);
+ encode(max_buckets, bl);
+ encode(max_access_keys, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(id, bl);
+ decode(tenant, bl);
+ decode(name, bl);
+ decode(email, bl);
+ decode(quota, bl);
+ decode(max_users, bl);
+ decode(max_roles, bl);
+ decode(max_groups, bl);
+ decode(max_buckets, bl);
+ decode(max_access_keys, bl);
+ DECODE_FINISH(bl);
+ }
+
+ void dump(Formatter* f) const;
+ void decode_json(JSONObj* obj);
+ static void generate_test_instances(std::list<RGWAccountInfo*>& o);
+};
+WRITE_CLASS_ENCODER(RGWAccountInfo)
+
+// user group metadata
+struct RGWGroupInfo {
+ std::string id;
+ std::string tenant;
+ std::string name;
+ std::string path;
+ rgw_account_id account_id;
+
+ void encode(bufferlist& bl) const {
+ ENCODE_START(1, 1, bl);
+ encode(id, bl);
+ encode(tenant, bl);
+ encode(name, bl);
+ encode(path, bl);
+ encode(account_id, bl);
+ ENCODE_FINISH(bl);
+ }
+
+ void decode(bufferlist::const_iterator& bl) {
+ DECODE_START(1, bl);
+ decode(id, bl);
+ decode(tenant, bl);
+ decode(name, bl);
+ decode(path, bl);
+ decode(account_id, bl);
+ DECODE_FINISH(bl);
+ }
+
+ void dump(Formatter* f) const;
+ void decode_json(JSONObj* obj);
+ static void generate_test_instances(std::list<RGWGroupInfo*>& o);
+};
+WRITE_CLASS_ENCODER(RGWGroupInfo)
+
/// `RGWObjVersionTracker`
/// ======================
///
@@ -898,7 +1024,7 @@ class RGWSI_Zone;
struct RGWBucketInfo {
rgw_bucket bucket;
- rgw_user owner;
+ rgw_owner owner;
uint32_t flags{0};
std::string zonegroup;
ceph::real_time creation_time;
@@ -973,7 +1099,7 @@ WRITE_CLASS_ENCODER(RGWBucketInfo)
struct RGWBucketEntryPoint
{
rgw_bucket bucket;
- rgw_user owner;
+ rgw_owner owner;
ceph::real_time creation_time;
bool linked;
@@ -983,13 +1109,19 @@ struct RGWBucketEntryPoint
RGWBucketEntryPoint() : linked(false), has_bucket_info(false) {}
void encode(bufferlist& bl) const {
+ const rgw_user* user = std::get_if<rgw_user>(&owner);
ENCODE_START(10, 8, bl);
encode(bucket, bl);
- encode(owner.id, bl);
+ if (user) {
+ encode(user->id, bl);
+ } else {
+ encode(std::string{}, bl); // empty user id
+ }
encode(linked, bl);
uint64_t ctime = (uint64_t)real_clock::to_time_t(creation_time);
encode(ctime, bl);
- encode(owner, bl);
+ // 'rgw_user owner' converted to 'rgw_owner'
+ ceph::converted_variant::encode(owner, bl);
encode(creation_time, bl);
ENCODE_FINISH(bl);
}
@@ -1004,7 +1136,8 @@ struct RGWBucketEntryPoint
}
has_bucket_info = false;
decode(bucket, bl);
- decode(owner.id, bl);
+ std::string user_id;
+ decode(user_id, bl);
decode(linked, bl);
uint64_t ctime;
decode(ctime, bl);
@@ -1012,7 +1145,9 @@ struct RGWBucketEntryPoint
creation_time = real_clock::from_time_t((time_t)ctime);
}
if (struct_v >= 9) {
- decode(owner, bl);
+ ceph::converted_variant::decode(owner, bl);
+ } else {
+ owner = rgw_user{"", user_id};
}
if (struct_v >= 10) {
decode(creation_time, bl);
@@ -1138,6 +1273,7 @@ struct req_state : DoutPrefixProvider {
std::string src_bucket_name;
std::unique_ptr<rgw::sal::Object> src_object;
ACLOwner bucket_owner;
+ // Resource owner for the authenticated identity, initialized in authorize()
ACLOwner owner;
std::string zonegroup_name;
@@ -1196,7 +1332,7 @@ struct req_state : DoutPrefixProvider {
rgw::IAM::Environment env;
boost::optional<rgw::IAM::Policy> iam_policy;
boost::optional<PublicAccessBlockConfiguration> bucket_access_conf;
- std::vector<rgw::IAM::Policy> iam_user_policies;
+ std::vector<rgw::IAM::Policy> iam_identity_policies;
/* Is the request made by an user marked as a system one?
* Being system user means we also have the admin status. */
@@ -1627,13 +1763,16 @@ bool verify_object_permission_no_policy(const DoutPrefixProvider* dpp,
const RGWAccessControlPolicy& object_acl,
const int perm);
-/** Check if the req_state's user has the necessary permissions
- * to do the requested action */
-rgw::IAM::Effect eval_identity_or_session_policies(const DoutPrefixProvider* dpp,
- const std::vector<rgw::IAM::Policy>& user_policies,
- const rgw::IAM::Environment& env,
- const uint64_t op,
- const rgw::ARN& arn);
+// determine whether a request is allowed or denied within an account
+rgw::IAM::Effect evaluate_iam_policies(
+ const DoutPrefixProvider* dpp,
+ const rgw::IAM::Environment& env,
+ const rgw::auth::Identity& identity,
+ bool account_root, uint64_t op, const rgw::ARN& arn,
+ const boost::optional<rgw::IAM::Policy>& resource_policy,
+ const std::vector<rgw::IAM::Policy>& identity_policies,
+ const std::vector<rgw::IAM::Policy>& session_policies);
+
bool verify_user_permission(const DoutPrefixProvider* dpp,
req_state * const s,
const RGWAccessControlPolicy& user_acl,
@@ -1657,14 +1796,17 @@ bool verify_user_permission_no_policy(const DoutPrefixProvider* dpp,
bool verify_bucket_permission(
const DoutPrefixProvider* dpp,
req_state * const s,
- const rgw_bucket& bucket,
+ const rgw::ARN& arn,
const RGWAccessControlPolicy& user_acl,
const RGWAccessControlPolicy& bucket_acl,
const boost::optional<rgw::IAM::Policy>& bucket_policy,
const std::vector<rgw::IAM::Policy>& identity_policies,
const std::vector<rgw::IAM::Policy>& session_policies,
const uint64_t op);
-bool verify_bucket_permission(const DoutPrefixProvider* dpp, req_state * const s, const uint64_t op);
+bool verify_bucket_permission(const DoutPrefixProvider* dpp, req_state* s,
+ const rgw::ARN& arn, uint64_t op);
+bool verify_bucket_permission(const DoutPrefixProvider* dpp,
+ req_state* s, uint64_t op);
bool verify_bucket_permission_no_policy(
const DoutPrefixProvider* dpp,
req_state * const s,
@@ -1674,8 +1816,6 @@ bool verify_bucket_permission_no_policy(
bool verify_bucket_permission_no_policy(const DoutPrefixProvider* dpp,
req_state * const s,
const int perm);
-int verify_bucket_owner_or_policy(req_state* const s,
- const uint64_t op);
extern bool verify_object_permission(
const DoutPrefixProvider* dpp,
req_state * const s,
diff --git a/src/rgw/rgw_crypt.cc b/src/rgw/rgw_crypt.cc
index 085fda0a44b..58874bf22b9 100644
--- a/src/rgw/rgw_crypt.cc
+++ b/src/rgw/rgw_crypt.cc
@@ -13,6 +13,7 @@
#include <rgw/rgw_b64.h>
#include <rgw/rgw_rest_s3.h>
#include "include/ceph_assert.h"
+#include "include/function2.hpp"
#include "crypto/crypto_accel.h"
#include "crypto/crypto_plugin.h"
#include "rgw/rgw_kms.h"
@@ -964,7 +965,13 @@ std::string expand_key_name(req_state *s, const std::string_view&t)
continue;
}
if (t.compare(i+1, 8, "owner_id") == 0) {
- r.append(s->bucket->get_info().owner.id);
+ r.append(std::visit(fu2::overload(
+ [] (const rgw_user& user_id) -> const std::string& {
+ return user_id.id;
+ },
+ [] (const rgw_account_id& account_id) -> const std::string& {
+ return account_id;
+ }), s->bucket->get_info().owner));
i += 9;
continue;
}
diff --git a/src/rgw/rgw_data_access.cc b/src/rgw/rgw_data_access.cc
index 76cadc9c8bf..06e13c3890c 100644
--- a/src/rgw/rgw_data_access.cc
+++ b/src/rgw/rgw_data_access.cc
@@ -125,7 +125,7 @@ int RGWDataAccess::Object::put(bufferlist& data,
std::string req_id = driver->zone_unique_id(driver->get_new_req_id());
std::unique_ptr<rgw::sal::Writer> processor;
- processor = driver->get_atomic_writer(dpp, y, obj.get(), owner.id,
+ processor = driver->get_atomic_writer(dpp, y, obj.get(), owner,
nullptr, olh_epoch, req_id);
int ret = processor->prepare(y);
diff --git a/src/rgw/rgw_file.cc b/src/rgw/rgw_file.cc
index 3424d4b04d7..66e883e7257 100644
--- a/src/rgw/rgw_file.cc
+++ b/src/rgw/rgw_file.cc
@@ -1872,7 +1872,7 @@ namespace rgw {
}
}
processor = get_driver()->get_atomic_writer(this, state->yield, state->object.get(),
- state->bucket_owner.id,
+ state->bucket_owner,
&state->dest_placement, 0, state->req_id);
op_ret = processor->prepare(state->yield);
diff --git a/src/rgw/rgw_iam_managed_policy.cc b/src/rgw/rgw_iam_managed_policy.cc
new file mode 100644
index 00000000000..4e7f48cfe5f
--- /dev/null
+++ b/src/rgw/rgw_iam_managed_policy.cc
@@ -0,0 +1,191 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab ft=cpp
+
+/*
+ * Ceph - scalable distributed file system
+ *
+ * Copyright contributors to the Ceph project
+ *
+ * This is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License version 2.1, as published by the Free Software
+ * Foundation. See file COPYING.
+ *
+ */
+
+#include "rgw_iam_managed_policy.h"
+#include "rgw_iam_policy.h"
+
+namespace rgw::IAM {
+
+// Type: AWS managed policy
+// Creation time: February 06, 2015, 18:40 UTC
+// Edited time: June 21, 2019, 19:40 UTC
+// ARN: arn:aws:iam::aws:policy/IAMFullAccess
+// Policy version: v2 (default)
+static constexpr std::string_view IAMFullAccess = R"(
+{
+ "Version" : "2012-10-17",
+ "Statement" : [
+ {
+ "Effect" : "Allow",
+ "Action" : [
+ "iam:*",
+ "organizations:DescribeAccount",
+ "organizations:DescribeOrganization",
+ "organizations:DescribeOrganizationalUnit",
+ "organizations:DescribePolicy",
+ "organizations:ListChildren",
+ "organizations:ListParents",
+ "organizations:ListPoliciesForTarget",
+ "organizations:ListRoots",
+ "organizations:ListPolicies",
+ "organizations:ListTargetsForPolicy"
+ ],
+ "Resource" : "*"
+ }
+ ]
+})";
+
+// Type: AWS managed policy
+// Creation time: February 06, 2015, 18:40 UTC
+// Edited time: January 25, 2018, 19:11 UTC
+// ARN: arn:aws:iam::aws:policy/IAMReadOnlyAccess
+// Policy version: v4 (default)
+static constexpr std::string_view IAMReadOnlyAccess = R"(
+{
+ "Version" : "2012-10-17",
+ "Statement" : [
+ {
+ "Effect" : "Allow",
+ "Action" : [
+ "iam:GenerateCredentialReport",
+ "iam:GenerateServiceLastAccessedDetails",
+ "iam:Get*",
+ "iam:List*",
+ "iam:SimulateCustomPolicy",
+ "iam:SimulatePrincipalPolicy"
+ ],
+ "Resource" : "*"
+ }
+ ]
+})";
+
+// Type: AWS managed policy
+// Creation time: February 06, 2015, 18:41 UTC
+// Edited time: February 06, 2015, 18:41 UTC
+// ARN: arn:aws:iam::aws:policy/AmazonSNSFullAccess
+// Policy version: v1 (default)
+static constexpr std::string_view AmazonSNSFullAccess = R"(
+{
+ "Version" : "2012-10-17",
+ "Statement" : [
+ {
+ "Action" : [
+ "sns:*"
+ ],
+ "Effect" : "Allow",
+ "Resource" : "*"
+ }
+ ]
+})";
+
+// Type: AWS managed policy
+// Creation time: February 06, 2015, 18:41 UTC
+// Edited time: February 06, 2015, 18:41 UTC
+// ARN: arn:aws:iam::aws:policy/AmazonSNSReadOnlyAccess
+// Policy version: v1 (default)
+static constexpr std::string_view AmazonSNSReadOnlyAccess = R"(
+{
+ "Version" : "2012-10-17",
+ "Statement" : [
+ {
+ "Effect" : "Allow",
+ "Action" : [
+ "sns:GetTopicAttributes",
+ "sns:List*"
+ ],
+ "Resource" : "*"
+ }
+ ]
+})";
+
+// Type: AWS managed policy
+// Creation time: February 06, 2015, 18:40 UTC
+// Edited time: September 27, 2021, 20:16 UTC
+// ARN: arn:aws:iam::aws:policy/AmazonS3FullAccess
+// Policy version: v2 (default)
+static constexpr std::string_view AmazonS3FullAccess = R"(
+{
+ "Version" : "2012-10-17",
+ "Statement" : [
+ {
+ "Effect" : "Allow",
+ "Action" : [
+ "s3:*",
+ "s3-object-lambda:*"
+ ],
+ "Resource" : "*"
+ }
+ ]
+})";
+
+// Type: AWS managed policy
+// Creation time: February 06, 2015, 18:40 UTC
+// Edited time: August 10, 2023, 21:31 UTC
+// ARN: arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess
+// Policy version: v3 (default)
+static constexpr std::string_view AmazonS3ReadOnlyAccess = R"(
+{
+ "Version" : "2012-10-17",
+ "Statement" : [
+ {
+ "Effect" : "Allow",
+ "Action" : [
+ "s3:Get*",
+ "s3:List*",
+ "s3:Describe*",
+ "s3-object-lambda:Get*",
+ "s3-object-lambda:List*"
+ ],
+ "Resource" : "*"
+ }
+ ]
+})";
+
+auto get_managed_policy(CephContext* cct, std::string_view arn)
+ -> std::optional<Policy>
+{
+ const std::string* tenant = nullptr;
+ constexpr bool reject = false; // reject_invalid_principals
+ if (arn == "arn:aws:iam::aws:policy/IAMFullAccess") {
+ return Policy{cct, tenant, std::string{IAMFullAccess}, reject};
+ } else if (arn == "arn:aws:iam::aws:policy/IAMReadOnlyAccess") {
+ return Policy{cct, tenant, std::string{IAMReadOnlyAccess}, reject};
+ } else if (arn == "arn:aws:iam::aws:policy/AmazonSNSFullAccess") {
+ return Policy{cct, tenant, std::string{AmazonSNSFullAccess}, reject};
+ } else if (arn == "arn:aws:iam::aws:policy/AmazonSNSReadOnlyAccess") {
+ return Policy{cct, tenant, std::string{AmazonSNSReadOnlyAccess}, reject};
+ } else if (arn == "arn:aws:iam::aws:policy/AmazonS3FullAccess") {
+ return Policy{cct, tenant, std::string{AmazonS3FullAccess}, reject};
+ } else if (arn == "arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess") {
+ return Policy{cct, tenant, std::string{AmazonS3ReadOnlyAccess}, reject};
+ }
+ return {};
+}
+
+void encode(const ManagedPolicies& m, bufferlist& bl, uint64_t f)
+{
+ ENCODE_START(1, 1, bl);
+ encode(m.arns, bl);
+ ENCODE_FINISH(bl);
+}
+
+void decode(ManagedPolicies& m, bufferlist::const_iterator& bl)
+{
+ DECODE_START(1, bl);
+ decode(m.arns, bl);
+ DECODE_FINISH(bl);
+}
+
+} // namespace rgw::IAM
diff --git a/src/rgw/rgw_iam_managed_policy.h b/src/rgw/rgw_iam_managed_policy.h
new file mode 100644
index 00000000000..37b519e535b
--- /dev/null
+++ b/src/rgw/rgw_iam_managed_policy.h
@@ -0,0 +1,39 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab ft=cpp
+
+/*
+ * Ceph - scalable distributed file system
+ *
+ * Copyright contributors to the Ceph project
+ *
+ * This is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License version 2.1, as published by the Free Software
+ * Foundation. See file COPYING.
+ *
+ */
+
+#pragma once
+
+#include <optional>
+#include <string>
+#include <boost/container/flat_set.hpp>
+#include "common/ceph_context.h"
+#include "include/buffer_fwd.h"
+
+namespace rgw::IAM {
+
+struct Policy;
+
+/// Return a managed policy by ARN.
+auto get_managed_policy(CephContext* cct, std::string_view arn)
+ -> std::optional<Policy>;
+
+/// A serializable container for managed policy ARNs.
+struct ManagedPolicies {
+ boost::container::flat_set<std::string> arns;
+};
+void encode(const ManagedPolicies&, bufferlist&, uint64_t f=0);
+void decode(ManagedPolicies&, bufferlist::const_iterator&);
+
+} // namespace rgw::IAM
diff --git a/src/rgw/rgw_iam_policy.cc b/src/rgw/rgw_iam_policy.cc
index 813b78f161e..bef4b587a66 100644
--- a/src/rgw/rgw_iam_policy.cc
+++ b/src/rgw/rgw_iam_policy.cc
@@ -81,6 +81,7 @@ static const actpair actpairs[] =
{ "s3:GetBucketLocation", s3GetBucketLocation },
{ "s3:GetBucketLogging", s3GetBucketLogging },
{ "s3:GetBucketNotification", s3GetBucketNotification },
+ { "s3:GetBucketOwnershipControls", s3GetBucketOwnershipControls },
{ "s3:GetBucketPolicy", s3GetBucketPolicy },
{ "s3:GetBucketPolicyStatus", s3GetBucketPolicyStatus },
{ "s3:GetBucketPublicAccessBlock", s3GetBucketPublicAccessBlock },
@@ -113,6 +114,7 @@ static const actpair actpairs[] =
{ "s3:PutBucketEncryption", s3PutBucketEncryption },
{ "s3:PutBucketLogging", s3PutBucketLogging },
{ "s3:PutBucketNotification", s3PutBucketNotification },
+ { "s3:PutBucketOwnershipControls", s3PutBucketOwnershipControls },
{ "s3:PutBucketPolicy", s3PutBucketPolicy },
{ "s3:PutBucketRequestPayment", s3PutBucketRequestPayment },
{ "s3:PutBucketTagging", s3PutBucketTagging },
@@ -132,10 +134,16 @@ static const actpair actpairs[] =
{ "s3:PutPublicAccessBlock", s3PutPublicAccessBlock },
{ "s3:PutReplicationConfiguration", s3PutReplicationConfiguration },
{ "s3:RestoreObject", s3RestoreObject },
+ { "s3:DescribeJob", s3DescribeJob },
+ { "s3-object-lambda:GetObject", s3objectlambdaGetObject },
+ { "s3-object-lambda:ListBucket", s3objectlambdaListBucket },
{ "iam:PutUserPolicy", iamPutUserPolicy },
{ "iam:GetUserPolicy", iamGetUserPolicy },
{ "iam:DeleteUserPolicy", iamDeleteUserPolicy },
{ "iam:ListUserPolicies", iamListUserPolicies },
+ { "iam:AttachUserPolicy", iamAttachUserPolicy },
+ { "iam:DetachUserPolicy", iamDetachUserPolicy },
+ { "iam:ListAttachedUserPolicies", iamListAttachedUserPolicies },
{ "iam:CreateRole", iamCreateRole},
{ "iam:DeleteRole", iamDeleteRole},
{ "iam:GetRole", iamGetRole},
@@ -145,6 +153,9 @@ static const actpair actpairs[] =
{ "iam:GetRolePolicy", iamGetRolePolicy},
{ "iam:ListRolePolicies", iamListRolePolicies},
{ "iam:DeleteRolePolicy", iamDeleteRolePolicy},
+ { "iam:AttachRolePolicy", iamAttachRolePolicy },
+ { "iam:DetachRolePolicy", iamDetachRolePolicy },
+ { "iam:ListAttachedRolePolicies", iamListAttachedRolePolicies },
{ "iam:CreateOIDCProvider", iamCreateOIDCProvider},
{ "iam:DeleteOIDCProvider", iamDeleteOIDCProvider},
{ "iam:GetOIDCProvider", iamGetOIDCProvider},
@@ -153,6 +164,34 @@ static const actpair actpairs[] =
{ "iam:ListRoleTags", iamListRoleTags},
{ "iam:UntagRole", iamUntagRole},
{ "iam:UpdateRole", iamUpdateRole},
+ { "iam:CreateUser", iamCreateUser},
+ { "iam:GetUser", iamGetUser},
+ { "iam:UpdateUser", iamUpdateUser},
+ { "iam:DeleteUser", iamDeleteUser},
+ { "iam:ListUsers", iamListUsers},
+ { "iam:CreateAccessKey", iamCreateAccessKey},
+ { "iam:UpdateAccessKey", iamUpdateAccessKey},
+ { "iam:DeleteAccessKey", iamDeleteAccessKey},
+ { "iam:ListAccessKeys", iamListAccessKeys},
+ { "iam:CreateGroup", iamCreateGroup},
+ { "iam:GetGroup", iamGetGroup},
+ { "iam:UpdateGroup", iamUpdateGroup},
+ { "iam:DeleteGroup", iamDeleteGroup},
+ { "iam:ListGroups", iamListGroups},
+ { "iam:AddUserToGroup", iamAddUserToGroup},
+ { "iam:RemoveUserFromGroup", iamRemoveUserFromGroup},
+ { "iam:ListGroupsForUser", iamListGroupsForUser},
+ { "iam:PutGroupPolicy", iamPutGroupPolicy },
+ { "iam:GetGroupPolicy", iamGetGroupPolicy },
+ { "iam:ListGroupPolicies", iamListGroupPolicies },
+ { "iam:DeleteGroupPolicy", iamDeleteGroupPolicy },
+ { "iam:AttachGroupPolicy", iamAttachGroupPolicy },
+ { "iam:DetachGroupPolicy", iamDetachGroupPolicy },
+ { "iam:ListAttachedGroupPolicies", iamListAttachedGroupPolicies },
+ { "iam:GenerateCredentialReport", iamGenerateCredentialReport},
+ { "iam:GenerateServiceLastAccessedDetails", iamGenerateServiceLastAccessedDetails},
+ { "iam:SimulateCustomPolicy", iamSimulateCustomPolicy},
+ { "iam:SimulatePrincipalPolicy", iamSimulatePrincipalPolicy},
{ "sts:AssumeRole", stsAssumeRole},
{ "sts:AssumeRoleWithWebIdentity", stsAssumeRoleWithWebIdentity},
{ "sts:GetSessionToken", stsGetSessionToken},
@@ -162,6 +201,17 @@ static const actpair actpairs[] =
{ "sns:Publish", snsPublish},
{ "sns:SetTopicAttributes", snsSetTopicAttributes},
{ "sns:CreateTopic", snsCreateTopic},
+ { "sns:ListTopics", snsListTopics},
+ { "organizations:DescribeAccount", organizationsDescribeAccount},
+ { "organizations:DescribeOrganization", organizationsDescribeOrganization},
+ { "organizations:DescribeOrganizationalUnit", organizationsDescribeOrganizationalUnit},
+ { "organizations:DescribePolicy", organizationsDescribePolicy},
+ { "organizations:ListChildren", organizationsListChildren},
+ { "organizations:ListParents", organizationsListParents},
+ { "organizations:ListPoliciesForTarget", organizationsListPoliciesForTarget},
+ { "organizations:ListRoots", organizationsListRoots},
+ { "organizations:ListPolicies", organizationsListPolicies},
+ { "organizations:ListTargetsForPolicy", organizationsListTargetsForPolicy},
};
struct PolicyParser;
@@ -214,7 +264,7 @@ struct PolicyParser : public BaseReaderHandler<UTF8<>, PolicyParser> {
keyword_hash tokens;
std::vector<ParseState> s;
CephContext* cct;
- const string& tenant;
+ const string* tenant = nullptr;
Policy& policy;
uint32_t v = 0;
@@ -322,7 +372,7 @@ struct PolicyParser : public BaseReaderHandler<UTF8<>, PolicyParser> {
v = 0;
}
- PolicyParser(CephContext* cct, const string& tenant, Policy& policy,
+ PolicyParser(CephContext* cct, const string* tenant, Policy& policy,
bool reject_invalid_principals)
: cct(cct), tenant(tenant), policy(policy),
reject_invalid_principals(reject_invalid_principals) {}
@@ -482,7 +532,7 @@ boost::optional<Principal> ParseState::parse_principal(string&& s,
// AWS and Federated ARNs
if (auto a = ARN::parse(s)) {
if (a->resource == "root") {
- return Principal::tenant(std::move(a->account));
+ return Principal::account(std::move(a->account));
}
static const char rx_str[] = "([^/]*)/(.*)";
@@ -515,7 +565,7 @@ boost::optional<Principal> ParseState::parse_principal(string&& s,
// Since tenants are simply prefixes, there's no really good
// way to see if one exists or not. So we return the thing and
// let them try to match against it.
- return Principal::tenant(std::move(s));
+ return Principal::account(std::move(s));
}
if (errmsg)
*errmsg =
@@ -595,6 +645,12 @@ bool ParseState::do_string(CephContext* cct, const char* s, size_t l) {
if ((t->notaction & s3AllValue) == s3AllValue) {
t->notaction[s3All] = 1;
}
+ if ((t->action & s3objectlambdaAllValue) == s3objectlambdaAllValue) {
+ t->action[s3objectlambdaAll] = 1;
+ }
+ if ((t->notaction & s3objectlambdaAllValue) == s3objectlambdaAllValue) {
+ t->notaction[s3objectlambdaAll] = 1;
+ }
if ((t->action & iamAllValue) == iamAllValue) {
t->action[iamAll] = 1;
}
@@ -613,6 +669,12 @@ bool ParseState::do_string(CephContext* cct, const char* s, size_t l) {
if ((t->notaction & snsAllValue) == snsAllValue) {
t->notaction[snsAll] = 1;
}
+ if ((t->action & organizationsAllValue) == organizationsAllValue) {
+ t->action[organizationsAll] = 1;
+ }
+ if ((t->notaction & organizationsAllValue) == organizationsAllValue) {
+ t->notaction[organizationsAll] = 1;
+ }
}
}
} else if (w->id == TokenID::Resource || w->id == TokenID::NotResource) {
@@ -626,16 +688,16 @@ bool ParseState::do_string(CephContext* cct, const char* s, size_t l) {
return false;
}
// You can't specify resources for someone ELSE'S account.
- if (a->account.empty() || a->account == pp->tenant ||
- a->account == "*") {
- if (a->account.empty() || a->account == "*")
- a->account = pp->tenant;
+ if (a->account.empty() || pp->tenant == nullptr ||
+ a->account == *pp->tenant || a->account == "*") {
+ if (pp->tenant && (a->account.empty() || a->account == "*"))
+ a->account = *pp->tenant;
(w->id == TokenID::Resource ? t->resource : t->notresource)
.emplace(std::move(*a));
} else {
annotate(fmt::format("Policy owned by tenant `{}` cannot grant access to "
"resource owned by tenant `{}`.",
- pp->tenant, a->account));
+ *pp->tenant, a->account));
return false;
}
} else if (w->kind == TokenKind::cond_key) {
@@ -1155,6 +1217,15 @@ Effect Statement::eval(const Environment& e,
return Effect::Pass;
}
+static bool is_identity(const auth::Identity& ida,
+ const flat_set<auth::Principal>& princ)
+{
+ return std::any_of(princ.begin(), princ.end(),
+ [&ida] (const auth::Principal& p) {
+ return ida.is_identity(p);
+ });
+}
+
Effect Statement::eval_principal(const Environment& e,
boost::optional<const rgw::auth::Identity&> ida, boost::optional<PolicyPrincipal&> princ_type) const {
if (princ_type) {
@@ -1164,15 +1235,13 @@ Effect Statement::eval_principal(const Environment& e,
if (princ.empty() && noprinc.empty()) {
return Effect::Deny;
}
- if (ida->get_identity_type() != TYPE_ROLE && !princ.empty() && !ida->is_identity(princ)) {
+ if (ida->get_identity_type() != TYPE_ROLE && !princ.empty() && !is_identity(*ida, princ)) {
return Effect::Deny;
}
if (ida->get_identity_type() == TYPE_ROLE && !princ.empty()) {
bool princ_matched = false;
for (auto p : princ) { // Check each principal to determine the type of the one that has matched
- boost::container::flat_set<Principal> id;
- id.insert(p);
- if (ida->is_identity(id)) {
+ if (ida->is_identity(p)) {
if (p.is_assumed_role() || p.is_user()) {
if (princ_type) *princ_type = PolicyPrincipal::Session;
} else {
@@ -1184,7 +1253,7 @@ Effect Statement::eval_principal(const Environment& e,
if (!princ_matched) {
return Effect::Deny;
}
- } else if (!noprinc.empty() && ida->is_identity(noprinc)) {
+ } else if (!noprinc.empty() && is_identity(*ida, noprinc)) {
return Effect::Deny;
}
}
@@ -1274,6 +1343,12 @@ const char* action_bit_string(uint64_t action) {
case s3PutBucketAcl:
return "s3:PutBucketAcl";
+ case s3GetBucketOwnershipControls:
+ return "s3:GetBucketOwnershipControls";
+
+ case s3PutBucketOwnershipControls:
+ return "s3:PutBucketOwnershipControls";
+
case s3GetBucketCORS:
return "s3:GetBucketCORS";
@@ -1391,6 +1466,15 @@ const char* action_bit_string(uint64_t action) {
case s3BypassGovernanceRetention:
return "s3:BypassGovernanceRetention";
+ case s3DescribeJob:
+ return "s3:DescribeJob";
+
+ case s3objectlambdaGetObject:
+ return "s3-object-lambda:GetObject";
+
+ case s3objectlambdaListBucket:
+ return "s3-object-lambda:ListBucket";
+
case iamPutUserPolicy:
return "iam:PutUserPolicy";
@@ -1403,6 +1487,15 @@ const char* action_bit_string(uint64_t action) {
case iamDeleteUserPolicy:
return "iam:DeleteUserPolicy";
+ case iamAttachUserPolicy:
+ return "iam:AttachUserPolicy";
+
+ case iamDetachUserPolicy:
+ return "iam:DetachUserPolicy";
+
+ case iamListAttachedUserPolicies:
+ return "iam:ListAttachedUserPolicies";
+
case iamCreateRole:
return "iam:CreateRole";
@@ -1430,6 +1523,15 @@ const char* action_bit_string(uint64_t action) {
case iamDeleteRolePolicy:
return "iam:DeleteRolePolicy";
+ case iamAttachRolePolicy:
+ return "iam:AttachRolePolicy";
+
+ case iamDetachRolePolicy:
+ return "iam:DetachRolePolicy";
+
+ case iamListAttachedRolePolicies:
+ return "iam:ListAttachedRolePolicies";
+
case iamCreateOIDCProvider:
return "iam:CreateOIDCProvider";
@@ -1454,6 +1556,90 @@ const char* action_bit_string(uint64_t action) {
case iamUpdateRole:
return "iam:UpdateRole";
+ case iamCreateUser:
+ return "iam:CreateUser";
+
+ case iamGetUser:
+ return "iam:GetUser";
+
+ case iamUpdateUser:
+ return "iam:UpdateUser";
+
+ case iamDeleteUser:
+ return "iam:DeleteUser";
+
+ case iamListUsers:
+ return "iam:ListUsers";
+
+ case iamCreateAccessKey:
+ return "iam:CreateAccessKey";
+
+ case iamUpdateAccessKey:
+ return "iam:UpdateAccessKey";
+
+ case iamDeleteAccessKey:
+ return "iam:DeleteAccessKey";
+
+ case iamListAccessKeys:
+ return "iam:ListAccessKeys";
+
+ case iamCreateGroup:
+ return "iam:CreateGroup";
+
+ case iamGetGroup:
+ return "iam:GetGroup";
+
+ case iamUpdateGroup:
+ return "iam:UpdateGroup";
+
+ case iamDeleteGroup:
+ return "iam:DeleteGroup";
+
+ case iamListGroups:
+ return "iam:ListGroups";
+
+ case iamAddUserToGroup:
+ return "iam:AddUserToGroup";
+
+ case iamRemoveUserFromGroup:
+ return "iam:RemoveUserFromGroup";
+
+ case iamListGroupsForUser:
+ return "iam:ListGroupsForUser";
+
+ case iamPutGroupPolicy:
+ return "iam:PutGroupPolicy";
+
+ case iamGetGroupPolicy:
+ return "iam:GetGroupPolicy";
+
+ case iamListGroupPolicies:
+ return "iam:ListGroupPolicies";
+
+ case iamDeleteGroupPolicy:
+ return "iam:DeleteGroupPolicy";
+
+ case iamAttachGroupPolicy:
+ return "iam:AttachGroupPolicy";
+
+ case iamDetachGroupPolicy:
+ return "iam:DetachGroupPolicy";
+
+ case iamListAttachedGroupPolicies:
+ return "iam:ListAttachedGroupPolicies";
+
+ case iamGenerateCredentialReport:
+ return "iam:GenerateCredentialReport";
+
+ case iamGenerateServiceLastAccessedDetails:
+ return "iam:GenerateServiceLastAccessedDetails";
+
+ case iamSimulateCustomPolicy:
+ return "iam:SimulateCustomPolicy";
+
+ case iamSimulatePrincipalPolicy:
+ return "iam:SimulatePrincipalPolicy";
+
case stsAssumeRole:
return "sts:AssumeRole";
@@ -1480,6 +1666,39 @@ const char* action_bit_string(uint64_t action) {
case snsCreateTopic:
return "sns:CreateTopic";
+
+ case snsListTopics:
+ return "sns:ListTopics";
+
+ case organizationsDescribeAccount:
+ return "organizations:DescribeAccount";
+
+ case organizationsDescribeOrganization:
+ return "organizations:DescribeOrganization";
+
+ case organizationsDescribeOrganizationalUnit:
+ return "organizations:DescribeOrganizationalUnit";
+
+ case organizationsDescribePolicy:
+ return "organizations:DescribePolicy";
+
+ case organizationsListChildren:
+ return "organizations:ListChildren";
+
+ case organizationsListParents:
+ return "organizations:ListParents";
+
+ case organizationsListPoliciesForTarget:
+ return "organizations:ListPoliciesForTarget";
+
+ case organizationsListRoots:
+ return "organizations:ListRoots";
+
+ case organizationsListPolicies:
+ return "organizations:ListPolicies";
+
+ case organizationsListTargetsForPolicy:
+ return "organizations:ListTargetsForPolicy";
}
return "s3Invalid";
}
@@ -1578,10 +1797,10 @@ ostream& operator <<(ostream& m, const Statement& s) {
return m << " }";
}
-Policy::Policy(CephContext* cct, const string& tenant,
- const bufferlist& _text,
+Policy::Policy(CephContext* cct, const string* tenant,
+ std::string _text,
bool reject_invalid_principals)
- : text(_text.to_str()) {
+ : text(std::move(_text)) {
StringStream ss(text.data());
PolicyParser pp(cct, tenant, *this, reject_invalid_principals);
auto pr = Reader{}.Parse<kParseNumbersAsStringsFlag |
diff --git a/src/rgw/rgw_iam_policy.h b/src/rgw/rgw_iam_policy.h
index 5d6f334c176..262aeb69149 100644
--- a/src/rgw/rgw_iam_policy.h
+++ b/src/rgw/rgw_iam_policy.h
@@ -40,116 +40,171 @@ class Identity;
namespace rgw {
namespace IAM {
-static constexpr std::uint64_t s3GetObject = 0;
-static constexpr std::uint64_t s3GetObjectVersion = 1;
-static constexpr std::uint64_t s3PutObject = 2;
-static constexpr std::uint64_t s3GetObjectAcl = 3;
-static constexpr std::uint64_t s3GetObjectVersionAcl = 4;
-static constexpr std::uint64_t s3PutObjectAcl = 5;
-static constexpr std::uint64_t s3PutObjectVersionAcl = 6;
-static constexpr std::uint64_t s3DeleteObject = 7;
-static constexpr std::uint64_t s3DeleteObjectVersion = 8;
-static constexpr std::uint64_t s3ListMultipartUploadParts = 9;
-static constexpr std::uint64_t s3AbortMultipartUpload = 10;
-static constexpr std::uint64_t s3GetObjectTorrent = 11;
-static constexpr std::uint64_t s3GetObjectVersionTorrent = 12;
-static constexpr std::uint64_t s3RestoreObject = 13;
-static constexpr std::uint64_t s3CreateBucket = 14;
-static constexpr std::uint64_t s3DeleteBucket = 15;
-static constexpr std::uint64_t s3ListBucket = 16;
-static constexpr std::uint64_t s3ListBucketVersions = 17;
-static constexpr std::uint64_t s3ListAllMyBuckets = 18;
-static constexpr std::uint64_t s3ListBucketMultipartUploads = 19;
-static constexpr std::uint64_t s3GetAccelerateConfiguration = 20;
-static constexpr std::uint64_t s3PutAccelerateConfiguration = 21;
-static constexpr std::uint64_t s3GetBucketAcl = 22;
-static constexpr std::uint64_t s3PutBucketAcl = 23;
-static constexpr std::uint64_t s3GetBucketCORS = 24;
-static constexpr std::uint64_t s3PutBucketCORS = 25;
-static constexpr std::uint64_t s3GetBucketVersioning = 26;
-static constexpr std::uint64_t s3PutBucketVersioning = 27;
-static constexpr std::uint64_t s3GetBucketRequestPayment = 28;
-static constexpr std::uint64_t s3PutBucketRequestPayment = 29;
-static constexpr std::uint64_t s3GetBucketLocation = 30;
-static constexpr std::uint64_t s3GetBucketPolicy = 31;
-static constexpr std::uint64_t s3DeleteBucketPolicy = 32;
-static constexpr std::uint64_t s3PutBucketPolicy = 33;
-static constexpr std::uint64_t s3GetBucketNotification = 34;
-static constexpr std::uint64_t s3PutBucketNotification = 35;
-static constexpr std::uint64_t s3GetBucketLogging = 36;
-static constexpr std::uint64_t s3PutBucketLogging = 37;
-static constexpr std::uint64_t s3GetBucketTagging = 38;
-static constexpr std::uint64_t s3PutBucketTagging = 39;
-static constexpr std::uint64_t s3GetBucketWebsite = 40;
-static constexpr std::uint64_t s3PutBucketWebsite = 41;
-static constexpr std::uint64_t s3DeleteBucketWebsite = 42;
-static constexpr std::uint64_t s3GetLifecycleConfiguration = 43;
-static constexpr std::uint64_t s3PutLifecycleConfiguration = 44;
-static constexpr std::uint64_t s3PutReplicationConfiguration = 45;
-static constexpr std::uint64_t s3GetReplicationConfiguration = 46;
-static constexpr std::uint64_t s3DeleteReplicationConfiguration = 47;
-static constexpr std::uint64_t s3GetObjectTagging = 48;
-static constexpr std::uint64_t s3PutObjectTagging = 49;
-static constexpr std::uint64_t s3DeleteObjectTagging = 50;
-static constexpr std::uint64_t s3GetObjectVersionTagging = 51;
-static constexpr std::uint64_t s3PutObjectVersionTagging = 52;
-static constexpr std::uint64_t s3DeleteObjectVersionTagging = 53;
-static constexpr std::uint64_t s3PutBucketObjectLockConfiguration = 54;
-static constexpr std::uint64_t s3GetBucketObjectLockConfiguration = 55;
-static constexpr std::uint64_t s3PutObjectRetention = 56;
-static constexpr std::uint64_t s3GetObjectRetention = 57;
-static constexpr std::uint64_t s3PutObjectLegalHold = 58;
-static constexpr std::uint64_t s3GetObjectLegalHold = 59;
-static constexpr std::uint64_t s3BypassGovernanceRetention = 60;
-static constexpr std::uint64_t s3GetBucketPolicyStatus = 61;
-static constexpr std::uint64_t s3PutPublicAccessBlock = 62;
-static constexpr std::uint64_t s3GetPublicAccessBlock = 63;
-static constexpr std::uint64_t s3DeletePublicAccessBlock = 64;
-static constexpr std::uint64_t s3GetBucketPublicAccessBlock = 65;
-static constexpr std::uint64_t s3PutBucketPublicAccessBlock = 66;
-static constexpr std::uint64_t s3DeleteBucketPublicAccessBlock = 67;
-static constexpr std::uint64_t s3GetBucketEncryption = 68;
-static constexpr std::uint64_t s3PutBucketEncryption = 69;
-static constexpr std::uint64_t s3All = 70;
-
-static constexpr std::uint64_t iamPutUserPolicy = s3All + 1;
-static constexpr std::uint64_t iamGetUserPolicy = s3All + 2;
-static constexpr std::uint64_t iamDeleteUserPolicy = s3All + 3;
-static constexpr std::uint64_t iamListUserPolicies = s3All + 4;
-static constexpr std::uint64_t iamCreateRole = s3All + 5;
-static constexpr std::uint64_t iamDeleteRole = s3All + 6;
-static constexpr std::uint64_t iamModifyRoleTrustPolicy = s3All + 7;
-static constexpr std::uint64_t iamGetRole = s3All + 8;
-static constexpr std::uint64_t iamListRoles = s3All + 9;
-static constexpr std::uint64_t iamPutRolePolicy = s3All + 10;
-static constexpr std::uint64_t iamGetRolePolicy = s3All + 11;
-static constexpr std::uint64_t iamListRolePolicies = s3All + 12;
-static constexpr std::uint64_t iamDeleteRolePolicy = s3All + 13;
-static constexpr std::uint64_t iamCreateOIDCProvider = s3All + 14;
-static constexpr std::uint64_t iamDeleteOIDCProvider = s3All + 15;
-static constexpr std::uint64_t iamGetOIDCProvider = s3All + 16;
-static constexpr std::uint64_t iamListOIDCProviders = s3All + 17;
-static constexpr std::uint64_t iamTagRole = s3All + 18;
-static constexpr std::uint64_t iamListRoleTags = s3All + 19;
-static constexpr std::uint64_t iamUntagRole = s3All + 20;
-static constexpr std::uint64_t iamUpdateRole = s3All + 21;
-static constexpr std::uint64_t iamAll = s3All + 22;
-
-static constexpr std::uint64_t stsAssumeRole = iamAll + 1;
-static constexpr std::uint64_t stsAssumeRoleWithWebIdentity = iamAll + 2;
-static constexpr std::uint64_t stsGetSessionToken = iamAll + 3;
-static constexpr std::uint64_t stsTagSession = iamAll + 4;
-static constexpr std::uint64_t stsAll = iamAll + 5;
-
-static constexpr std::uint64_t snsGetTopicAttributes = stsAll + 1;
-static constexpr std::uint64_t snsDeleteTopic = stsAll + 2;
-static constexpr std::uint64_t snsPublish = stsAll + 3;
-static constexpr std::uint64_t snsSetTopicAttributes = stsAll + 4;
-static constexpr std::uint64_t snsCreateTopic = stsAll + 5;
-static constexpr std::uint64_t snsAll = stsAll + 6;
-
-static constexpr std::uint64_t s3Count = s3All;
-static constexpr std::uint64_t allCount = snsAll + 1;
+enum {
+ s3GetObject,
+ s3GetObjectVersion,
+ s3PutObject,
+ s3GetObjectAcl,
+ s3GetObjectVersionAcl,
+ s3PutObjectAcl,
+ s3PutObjectVersionAcl,
+ s3DeleteObject,
+ s3DeleteObjectVersion,
+ s3ListMultipartUploadParts,
+ s3AbortMultipartUpload,
+ s3GetObjectTorrent,
+ s3GetObjectVersionTorrent,
+ s3RestoreObject,
+ s3CreateBucket,
+ s3DeleteBucket,
+ s3ListBucket,
+ s3ListBucketVersions,
+ s3ListAllMyBuckets,
+ s3ListBucketMultipartUploads,
+ s3GetAccelerateConfiguration,
+ s3PutAccelerateConfiguration,
+ s3GetBucketAcl,
+ s3PutBucketAcl,
+ s3GetBucketOwnershipControls,
+ s3PutBucketOwnershipControls,
+ s3GetBucketCORS,
+ s3PutBucketCORS,
+ s3GetBucketVersioning,
+ s3PutBucketVersioning,
+ s3GetBucketRequestPayment,
+ s3PutBucketRequestPayment,
+ s3GetBucketLocation,
+ s3GetBucketPolicy,
+ s3DeleteBucketPolicy,
+ s3PutBucketPolicy,
+ s3GetBucketNotification,
+ s3PutBucketNotification,
+ s3GetBucketLogging,
+ s3PutBucketLogging,
+ s3GetBucketTagging,
+ s3PutBucketTagging,
+ s3GetBucketWebsite,
+ s3PutBucketWebsite,
+ s3DeleteBucketWebsite,
+ s3GetLifecycleConfiguration,
+ s3PutLifecycleConfiguration,
+ s3PutReplicationConfiguration,
+ s3GetReplicationConfiguration,
+ s3DeleteReplicationConfiguration,
+ s3GetObjectTagging,
+ s3PutObjectTagging,
+ s3DeleteObjectTagging,
+ s3GetObjectVersionTagging,
+ s3PutObjectVersionTagging,
+ s3DeleteObjectVersionTagging,
+ s3PutBucketObjectLockConfiguration,
+ s3GetBucketObjectLockConfiguration,
+ s3PutObjectRetention,
+ s3GetObjectRetention,
+ s3PutObjectLegalHold,
+ s3GetObjectLegalHold,
+ s3BypassGovernanceRetention,
+ s3GetBucketPolicyStatus,
+ s3PutPublicAccessBlock,
+ s3GetPublicAccessBlock,
+ s3DeletePublicAccessBlock,
+ s3GetBucketPublicAccessBlock,
+ s3PutBucketPublicAccessBlock,
+ s3DeleteBucketPublicAccessBlock,
+ s3GetBucketEncryption,
+ s3PutBucketEncryption,
+ s3DescribeJob,
+ s3All,
+
+ s3objectlambdaGetObject,
+ s3objectlambdaListBucket,
+ s3objectlambdaAll,
+
+ iamPutUserPolicy,
+ iamGetUserPolicy,
+ iamDeleteUserPolicy,
+ iamListUserPolicies,
+ iamAttachUserPolicy,
+ iamDetachUserPolicy,
+ iamListAttachedUserPolicies,
+ iamCreateRole,
+ iamDeleteRole,
+ iamModifyRoleTrustPolicy,
+ iamGetRole,
+ iamListRoles,
+ iamPutRolePolicy,
+ iamGetRolePolicy,
+ iamListRolePolicies,
+ iamDeleteRolePolicy,
+ iamAttachRolePolicy,
+ iamDetachRolePolicy,
+ iamListAttachedRolePolicies,
+ iamCreateOIDCProvider,
+ iamDeleteOIDCProvider,
+ iamGetOIDCProvider,
+ iamListOIDCProviders,
+ iamTagRole,
+ iamListRoleTags,
+ iamUntagRole,
+ iamUpdateRole,
+ iamCreateUser,
+ iamGetUser,
+ iamUpdateUser,
+ iamDeleteUser,
+ iamListUsers,
+ iamCreateAccessKey,
+ iamUpdateAccessKey,
+ iamDeleteAccessKey,
+ iamListAccessKeys,
+ iamCreateGroup,
+ iamGetGroup,
+ iamUpdateGroup,
+ iamDeleteGroup,
+ iamListGroups,
+ iamAddUserToGroup,
+ iamRemoveUserFromGroup,
+ iamListGroupsForUser,
+ iamPutGroupPolicy,
+ iamGetGroupPolicy,
+ iamListGroupPolicies,
+ iamDeleteGroupPolicy,
+ iamAttachGroupPolicy,
+ iamDetachGroupPolicy,
+ iamListAttachedGroupPolicies,
+ iamGenerateCredentialReport,
+ iamGenerateServiceLastAccessedDetails,
+ iamSimulateCustomPolicy,
+ iamSimulatePrincipalPolicy,
+ iamAll,
+
+ stsAssumeRole,
+ stsAssumeRoleWithWebIdentity,
+ stsGetSessionToken,
+ stsTagSession,
+ stsAll,
+
+ snsGetTopicAttributes,
+ snsDeleteTopic,
+ snsPublish,
+ snsSetTopicAttributes,
+ snsCreateTopic,
+ snsListTopics,
+ snsAll,
+
+ organizationsDescribeAccount,
+ organizationsDescribeOrganization,
+ organizationsDescribeOrganizationalUnit,
+ organizationsDescribePolicy,
+ organizationsListChildren,
+ organizationsListParents,
+ organizationsListPoliciesForTarget,
+ organizationsListRoots,
+ organizationsListPolicies,
+ organizationsListTargetsForPolicy,
+ organizationsAll,
+
+ allCount
+};
using Action_t = std::bitset<allCount>;
using NotAction_t = Action_t;
@@ -169,9 +224,11 @@ constexpr std::bitset<N> set_cont_bits(size_t start, size_t end)
static const Action_t None(0);
static const Action_t s3AllValue = set_cont_bits<allCount>(0,s3All);
-static const Action_t iamAllValue = set_cont_bits<allCount>(s3All+1,iamAll);
+static const Action_t s3objectlambdaAllValue = set_cont_bits<allCount>(s3All+1,s3objectlambdaAll);
+static const Action_t iamAllValue = set_cont_bits<allCount>(s3objectlambdaAll+1,iamAll);
static const Action_t stsAllValue = set_cont_bits<allCount>(iamAll+1,stsAll);
-static const Action_t snsAllValue = set_cont_bits<allCount>(stsAll + 1, snsAll);
+static const Action_t snsAllValue = set_cont_bits<allCount>(stsAll+1, snsAll);
+static const Action_t organizationsAllValue = set_cont_bits<allCount>(snsAll+1,organizationsAll);
static const Action_t allValue = set_cont_bits<allCount>(0,allCount);
namespace {
@@ -533,8 +590,8 @@ struct Policy {
// when executing operations that *set* a bucket policy, but should
// be false when reading a stored bucket policy so as not to break
// backwards configuration.
- Policy(CephContext* cct, const std::string& tenant,
- const bufferlist& text,
+ Policy(CephContext* cct, const std::string* tenant,
+ std::string text,
bool reject_invalid_principals);
Effect eval(const Environment& e,
diff --git a/src/rgw/rgw_lc.cc b/src/rgw/rgw_lc.cc
index f8b4c9cc96c..7ae42cdfb70 100644
--- a/src/rgw/rgw_lc.cc
+++ b/src/rgw/rgw_lc.cc
@@ -597,7 +597,7 @@ static int remove_expired_obj(const DoutPrefixProvider* dpp,
= obj->get_bucket()->get_info().versioning_status();
del_op->params.obj_owner.id = rgw_user{meta.owner};
del_op->params.obj_owner.display_name = meta.owner_display_name;
- del_op->params.bucket_owner.id = bucket_info.owner;
+ del_op->params.bucket_owner = bucket_info.owner;
del_op->params.unmod_since = meta.mtime;
// notification supported only for RADOS driver for now
diff --git a/src/rgw/rgw_lib.cc b/src/rgw/rgw_lib.cc
index 5a8fc14a804..665a03b6279 100644
--- a/src/rgw/rgw_lib.cc
+++ b/src/rgw/rgw_lib.cc
@@ -247,7 +247,14 @@ namespace rgw {
/* FIXME: remove this after switching all handlers to the new
* authentication infrastructure. */
if (! s->auth.identity) {
- s->auth.identity = rgw::auth::transform_old_authinfo(s);
+ auto result = rgw::auth::transform_old_authinfo(
+ op, null_yield, env.driver, s->user.get());
+ if (!result) {
+ ret = result.error();
+ abort_req(s, op, ret);
+ goto done;
+ }
+ s->auth.identity = std::move(result).value();
}
ldpp_dout(s, 2) << "reading op permissions" << dendl;
@@ -377,7 +384,14 @@ namespace rgw {
/* FIXME: remove this after switching all handlers to the new authentication
* infrastructure. */
if (! s->auth.identity) {
- s->auth.identity = rgw::auth::transform_old_authinfo(s);
+ auto result = rgw::auth::transform_old_authinfo(
+ op, null_yield, env.driver, s->user.get());
+ if (!result) {
+ ret = result.error();
+ abort_req(s, op, ret);
+ goto done;
+ }
+ s->auth.identity = std::move(result).value();
}
ldpp_dout(s, 2) << "reading op permissions" << dendl;
@@ -563,9 +577,10 @@ namespace rgw {
if (ret < 0) {
derr << "ERROR: failed reading user info: uid=" << uid << " ret="
<< ret << dendl;
+ return ret;
}
user_info = user->get_info();
- return ret;
+ return 0;
}
int RGWLibRequest::read_permissions(RGWOp* op, optional_yield y) {
diff --git a/src/rgw/rgw_lib.h b/src/rgw/rgw_lib.h
index 30234eebcdd..643e0c2c2d2 100644
--- a/src/rgw/rgw_lib.h
+++ b/src/rgw/rgw_lib.h
@@ -4,6 +4,7 @@
#pragma once
#include <mutex>
+#include <optional>
#include "rgw_common.h"
#include "rgw_client_io.h"
#include "rgw_rest.h"
diff --git a/src/rgw/rgw_log.cc b/src/rgw/rgw_log.cc
index e3c463fcecc..5d7e3678f8f 100644
--- a/src/rgw/rgw_log.cc
+++ b/src/rgw/rgw_log.cc
@@ -200,21 +200,21 @@ static void log_usage(req_state *s, const string& op_name)
if (!usage_logger)
return;
- rgw_user user;
- rgw_user payer;
+ std::string user;
+ std::string payer;
string bucket_name;
bucket_name = s->bucket_name;
if (!bucket_name.empty()) {
bucket_name = s->bucket_name;
- user = s->bucket_owner.id;
+ user = to_string(s->bucket_owner.id);
if (!rgw::sal::Bucket::empty(s->bucket.get()) &&
s->bucket->get_info().requester_pays) {
- payer = s->user->get_id();
+ payer = s->user->get_id().to_str();
}
} else {
- user = s->user->get_id();
+ user = to_string(s->owner.id);
}
bool error = s->err.is_err();
@@ -222,9 +222,7 @@ static void log_usage(req_state *s, const string& op_name)
bucket_name = "-"; /* bucket not found, use the invalid '-' as bucket name */
}
- string u = user.to_str();
- string p = payer.to_str();
- rgw_usage_log_entry entry(u, p, bucket_name);
+ rgw_usage_log_entry entry(user, payer, bucket_name);
uint64_t bytes_sent = ACCOUNTING_IO(s)->get_bytes_sent();
uint64_t bytes_received = ACCOUNTING_IO(s)->get_bytes_received();
@@ -261,7 +259,7 @@ void rgw_format_ops_log_entry(struct rgw_log_entry& entry, Formatter *formatter)
t.localtime(formatter->dump_stream("time_local"));
}
formatter->dump_string("remote_addr", entry.remote_addr);
- string obj_owner = entry.object_owner.to_str();
+ string obj_owner = to_string(entry.object_owner);
if (obj_owner.length())
formatter->dump_string("object_owner", obj_owner);
formatter->dump_string("user", entry.user);
@@ -305,6 +303,9 @@ void rgw_format_ops_log_entry(struct rgw_log_entry& entry, Formatter *formatter)
case TYPE_ROLE:
formatter->dump_string("authentication_type","STS");
break;
+ case TYPE_ROOT:
+ formatter->dump_string("authentication_type", "Local Account Root");
+ break;
default:
break;
}
@@ -679,8 +680,8 @@ int rgw_log_op(RGWREST* const rest, req_state *s, const RGWOp* op, OpsLogSink *o
void rgw_log_entry::generate_test_instances(list<rgw_log_entry*>& o)
{
rgw_log_entry *e = new rgw_log_entry;
- e->object_owner = "object_owner";
- e->bucket_owner = "bucket_owner";
+ e->object_owner = parse_owner("object_owner");
+ e->bucket_owner = parse_owner("bucket_owner");
e->bucket = "bucket";
e->remote_addr = "1.2.3.4";
e->user = "user";
@@ -696,14 +697,16 @@ void rgw_log_entry::generate_test_instances(list<rgw_log_entry*>& o)
e->bucket_id = "10";
e->trans_id = "trans_id";
e->identity_type = TYPE_RGW;
+ e->account_id = "account_id";
+ e->role_id = "role_id";
o.push_back(e);
o.push_back(new rgw_log_entry);
}
void rgw_log_entry::dump(Formatter *f) const
{
- f->dump_string("object_owner", object_owner.to_str());
- f->dump_string("bucket_owner", bucket_owner.to_str());
+ f->dump_string("object_owner", to_string(object_owner));
+ f->dump_string("bucket_owner", to_string(bucket_owner));
f->dump_string("bucket", bucket);
f->dump_stream("time") << time;
f->dump_string("remote_addr", remote_addr);
@@ -722,4 +725,10 @@ void rgw_log_entry::dump(Formatter *f) const
f->dump_string("bucket_id", bucket_id);
f->dump_string("trans_id", trans_id);
f->dump_unsigned("identity_type", identity_type);
+ if (!account_id.empty()) {
+ f->dump_string("account_id", account_id);
+ }
+ if (!role_id.empty()) {
+ f->dump_string("role_id", role_id);
+ }
}
diff --git a/src/rgw/rgw_log.h b/src/rgw/rgw_log.h
index 1dd79273e6a..828124aa758 100644
--- a/src/rgw/rgw_log.h
+++ b/src/rgw/rgw_log.h
@@ -6,6 +6,7 @@
#include <boost/container/flat_map.hpp>
#include "rgw_common.h"
#include "common/OutputDataSocket.h"
+#include "common/versioned_variant.h"
#include <vector>
#include <fstream>
#include "rgw_sal_fwd.h"
@@ -75,8 +76,8 @@ struct rgw_log_entry {
using headers_map = boost::container::flat_map<std::string, std::string>;
using Clock = req_state::Clock;
- rgw_user object_owner;
- rgw_user bucket_owner;
+ rgw_owner object_owner;
+ rgw_owner bucket_owner;
std::string bucket;
Clock::time_point time;
std::string remote_addr;
@@ -101,11 +102,16 @@ struct rgw_log_entry {
std::string subuser;
bool temp_url {false};
delete_multi_obj_op_meta delete_multi_obj_meta;
+ rgw_account_id account_id;
+ std::string role_id;
void encode(bufferlist &bl) const {
- ENCODE_START(14, 5, bl);
- encode(object_owner.id, bl);
- encode(bucket_owner.id, bl);
+ ENCODE_START(15, 5, bl);
+ // old object/bucket owner ids, encoded in full in v8
+ std::string empty_owner_id;
+ encode(empty_owner_id, bl);
+ encode(empty_owner_id, bl);
+
encode(bucket, bl);
encode(time, bl);
encode(remote_addr, bl);
@@ -123,8 +129,9 @@ struct rgw_log_entry {
encode(bytes_received, bl);
encode(bucket_id, bl);
encode(obj, bl);
- encode(object_owner, bl);
- encode(bucket_owner, bl);
+ // transparently converted from rgw_user to rgw_owner
+ ceph::converted_variant::encode(object_owner, bl);
+ ceph::converted_variant::encode(bucket_owner, bl);
encode(x_headers, bl);
encode(trans_id, bl);
encode(token_claims, bl);
@@ -133,13 +140,17 @@ struct rgw_log_entry {
encode(subuser, bl);
encode(temp_url, bl);
encode(delete_multi_obj_meta, bl);
+ encode(account_id, bl);
+ encode(role_id, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator &p) {
- DECODE_START_LEGACY_COMPAT_LEN(14, 5, 5, p);
- decode(object_owner.id, p);
+ DECODE_START_LEGACY_COMPAT_LEN(15, 5, 5, p);
+ std::string object_owner_id;
+ std::string bucket_owner_id;
+ decode(object_owner_id, p);
if (struct_v > 3)
- decode(bucket_owner.id, p);
+ decode(bucket_owner_id, p);
decode(bucket, p);
decode(time, p);
decode(remote_addr, p);
@@ -176,8 +187,12 @@ struct rgw_log_entry {
decode(obj, p);
}
if (struct_v >= 8) {
- decode(object_owner, p);
- decode(bucket_owner, p);
+ // transparently converted from rgw_user to rgw_owner
+ ceph::converted_variant::decode(object_owner, p);
+ ceph::converted_variant::decode(bucket_owner, p);
+ } else {
+ object_owner = parse_owner(object_owner_id);
+ bucket_owner = parse_owner(bucket_owner_id);
}
if (struct_v >= 9) {
decode(x_headers, p);
@@ -199,6 +214,10 @@ struct rgw_log_entry {
if (struct_v >= 14) {
decode(delete_multi_obj_meta, p);
}
+ if (struct_v >= 15) {
+ decode(account_id, p);
+ decode(role_id, p);
+ }
DECODE_FINISH(p);
}
void dump(ceph::Formatter *f) const;
diff --git a/src/rgw/rgw_lua_request.cc b/src/rgw/rgw_lua_request.cc
index a36aad666f4..3caad296545 100644
--- a/src/rgw/rgw_lua_request.cc
+++ b/src/rgw/rgw_lua_request.cc
@@ -262,7 +262,7 @@ struct OwnerMetaTable : public EmptyMetaTable {
if (strcasecmp(index, "DisplayName") == 0) {
pushstring(L, owner->display_name);
} else if (strcasecmp(index, "User") == 0) {
- create_metatable<UserMetaTable>(L, name, index, false, &owner->id);
+ pushstring(L, to_string(owner->id));
} else {
return error_unknown_field(L, index, name);
}
@@ -303,8 +303,19 @@ struct BucketMetaTable : public EmptyMetaTable {
} else if (strcasecmp(index, "PlacementRule") == 0) {
create_metatable<PlacementRuleMetaTable>(L, name, index, false, &(bucket->get_info().placement_rule));
} else if (strcasecmp(index, "User") == 0) {
- create_metatable<UserMetaTable>(L, name, index, false,
- const_cast<rgw_user*>(&bucket->get_owner()));
+ const rgw_owner& owner = bucket->get_owner();
+ if (const rgw_user* u = std::get_if<rgw_user>(&owner); u) {
+ create_metatable<UserMetaTable>(L, name, index, false, const_cast<rgw_user*>(u));
+ } else {
+ lua_pushnil(L);
+ }
+ } else if (strcasecmp(index, "Account") == 0) {
+ const rgw_owner& owner = bucket->get_owner();
+ if (const rgw_account_id* a = std::get_if<rgw_account_id>(&owner); a) {
+ pushstring(L, *a);
+ } else {
+ lua_pushnil(L);
+ }
} else {
return error_unknown_field(L, index, name);
}
@@ -365,8 +376,7 @@ struct GrantMetaTable : public EmptyMetaTable {
lua_pushinteger(L, grant->get_type().get_type());
} else if (strcasecmp(index, "User") == 0) {
if (const auto user = grant->get_user(); user) {
- create_metatable<UserMetaTable>(L, name, index, false,
- const_cast<rgw_user*>(&user->id));
+ pushstring(L, to_string(user->id));
} else {
lua_pushnil(L);
}
@@ -733,7 +743,7 @@ struct RequestMetaTable : public EmptyMetaTable {
create_metatable<PolicyMetaTable>(L, name, index, false, s->iam_policy.get_ptr());
}
} else if (strcasecmp(index, "UserPolicies") == 0) {
- create_metatable<PoliciesMetaTable>(L, name, index, false, &(s->iam_user_policies));
+ create_metatable<PoliciesMetaTable>(L, name, index, false, &(s->iam_identity_policies));
} else if (strcasecmp(index, "RGWId") == 0) {
pushstring(L, s->host_id);
} else if (strcasecmp(index, "HTTP") == 0) {
diff --git a/src/rgw/rgw_main.cc b/src/rgw/rgw_main.cc
index be211e28c92..6d0dab8245a 100644
--- a/src/rgw/rgw_main.cc
+++ b/src/rgw/rgw_main.cc
@@ -134,6 +134,7 @@ int main(int argc, char *argv[])
register_async_signal_handler(SIGTERM, rgw::signal::handle_sigterm);
register_async_signal_handler(SIGINT, rgw::signal::handle_sigterm);
register_async_signal_handler(SIGUSR1, rgw::signal::handle_sigterm);
+ register_async_signal_handler(SIGXFSZ, rgw::signal::sig_handler_noop);
sighandler_alrm = signal(SIGALRM, godown_alarm);
main.init_perfcounters();
@@ -184,6 +185,7 @@ int main(int argc, char *argv[])
unregister_async_signal_handler(SIGTERM, rgw::signal::handle_sigterm);
unregister_async_signal_handler(SIGINT, rgw::signal::handle_sigterm);
unregister_async_signal_handler(SIGUSR1, rgw::signal::handle_sigterm);
+ unregister_async_signal_handler(SIGXFSZ, rgw::signal::sig_handler_noop);
shutdown_async_signal_handler();
};
diff --git a/src/rgw/rgw_oidc_provider.cc b/src/rgw/rgw_oidc_provider.cc
index ddf9d863a5f..734c9a8788e 100644
--- a/src/rgw/rgw_oidc_provider.cc
+++ b/src/rgw/rgw_oidc_provider.cc
@@ -1,182 +1,41 @@
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
-#include <errno.h>
-#include <ctime>
-#include <regex>
-
-#include "common/errno.h"
-#include "common/Formatter.h"
-#include "common/ceph_json.h"
-#include "common/ceph_time.h"
-#include "rgw_rados.h"
-#include "rgw_zone.h"
-
-#include "include/types.h"
-#include "rgw_string.h"
-
-#include "rgw_common.h"
-#include "rgw_tools.h"
#include "rgw_oidc_provider.h"
-#include "services/svc_zone.h"
-#include "services/svc_sys_obj.h"
-
#define dout_subsys ceph_subsys_rgw
-using namespace std;
-
-namespace rgw { namespace sal {
-
-const string RGWOIDCProvider::oidc_url_oid_prefix = "oidc_url.";
-const string RGWOIDCProvider::oidc_arn_prefix = "arn:aws:iam::";
-
-int RGWOIDCProvider::get_tenant_url_from_arn(string& tenant, string& url)
-{
- auto provider_arn = rgw::ARN::parse(arn);
- if (!provider_arn) {
- return -EINVAL;
- }
- url = provider_arn->resource;
- tenant = provider_arn->account;
- auto pos = url.find("oidc-provider/");
- if (pos != std::string::npos) {
- url.erase(pos, 14);
- }
- return 0;
-}
-
-int RGWOIDCProvider::create(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y)
-{
- int ret;
-
- if (! validate_input(dpp)) {
- return -EINVAL;
- }
-
- string idp_url = url_remove_prefix(provider_url);
-
- /* check to see the name is not used */
- ret = read_url(dpp, idp_url, tenant, y);
- if (exclusive && ret == 0) {
- ldpp_dout(dpp, 0) << "ERROR: url " << provider_url << " already in use"
- << id << dendl;
- return -EEXIST;
- } else if ( ret < 0 && ret != -ENOENT) {
- ldpp_dout(dpp, 0) << "failed reading provider url " << provider_url << ": "
- << cpp_strerror(-ret) << dendl;
- return ret;
- }
-
- //arn
- arn = oidc_arn_prefix + tenant + ":oidc-provider/" + idp_url;
-
- // Creation time
- real_clock::time_point t = real_clock::now();
-
- struct timeval tv;
- real_clock::to_timeval(t, tv);
-
- char buf[30];
- struct tm result;
- gmtime_r(&tv.tv_sec, &result);
- strftime(buf,30,"%Y-%m-%dT%H:%M:%S", &result);
- sprintf(buf + strlen(buf),".%dZ",(int)tv.tv_usec/1000);
- creation_date.assign(buf, strlen(buf));
-
- ret = store_url(dpp, idp_url, exclusive, y);
- if (ret < 0) {
- ldpp_dout(dpp, 0) << "ERROR: storing role info in OIDC pool: "
- << provider_url << ": " << cpp_strerror(-ret) << dendl;
- return ret;
- }
-
- return 0;
-}
-
-int RGWOIDCProvider::get(const DoutPrefixProvider *dpp, optional_yield y)
-{
- string url, tenant;
- auto ret = get_tenant_url_from_arn(tenant, url);
- if (ret < 0) {
- ldpp_dout(dpp, 0) << "ERROR: failed to parse arn" << dendl;
- return -EINVAL;
- }
-
- if (this->tenant != tenant) {
- ldpp_dout(dpp, 0) << "ERROR: tenant in arn doesn't match that of user " << this->tenant << ", "
- << tenant << ": " << dendl;
- return -EINVAL;
- }
-
- ret = read_url(dpp, url, tenant, y);
- if (ret < 0) {
- return ret;
- }
-
- return 0;
-}
-
-void RGWOIDCProvider::dump(Formatter *f) const
+void RGWOIDCProviderInfo::dump(Formatter *f) const
{
- encode_json("OpenIDConnectProviderArn", arn, f);
+ encode_json("id", id, f);
+ encode_json("provider_url", provider_url, f);
+ encode_json("arn", arn, f);
+ encode_json("creation_date", creation_date, f);
+ encode_json("tenant", tenant, f);
+ encode_json("client_ids", client_ids, f);
+ encode_json("thumbprints", thumbprints, f);
}
-void RGWOIDCProvider::dump_all(Formatter *f) const
+void RGWOIDCProviderInfo::decode_json(JSONObj *obj)
{
- f->open_object_section("ClientIDList");
- for (auto it : client_ids) {
- encode_json("member", it, f);
- }
- f->close_section();
- encode_json("CreateDate", creation_date, f);
- f->open_object_section("ThumbprintList");
- for (auto it : thumbprints) {
- encode_json("member", it, f);
- }
- f->close_section();
- encode_json("Url", provider_url, f);
+ JSONDecoder::decode_json("id", id, obj);
+ JSONDecoder::decode_json("provider_url", provider_url, obj);
+ JSONDecoder::decode_json("arn", arn, obj);
+ JSONDecoder::decode_json("creation_date", creation_date, obj);
+ JSONDecoder::decode_json("tenant", tenant, obj);
+ JSONDecoder::decode_json("client_ids", client_ids, obj);
+ JSONDecoder::decode_json("thumbprints", thumbprints, obj);
}
-void RGWOIDCProvider::decode_json(JSONObj *obj)
+void RGWOIDCProviderInfo::generate_test_instances(std::list<RGWOIDCProviderInfo*>& l)
{
- JSONDecoder::decode_json("OpenIDConnectProviderArn", arn, obj);
+ auto p = new RGWOIDCProviderInfo;
+ p->id = "id";
+ p->provider_url = "server.example.com";
+ p->arn = "arn:aws:iam::acct:oidc-provider/server.example.com";
+ p->creation_date = "someday";
+ p->client_ids = {"a", "b"};
+ p->thumbprints = {"c", "d"};
+ l.push_back(p);
+ l.push_back(new RGWOIDCProviderInfo);
}
-
-bool RGWOIDCProvider::validate_input(const DoutPrefixProvider *dpp)
-{
- if (provider_url.length() > MAX_OIDC_URL_LEN) {
- ldpp_dout(dpp, 0) << "ERROR: Invalid length of url " << dendl;
- return false;
- }
- if (client_ids.size() > MAX_OIDC_NUM_CLIENT_IDS) {
- ldpp_dout(dpp, 0) << "ERROR: Invalid number of client ids " << dendl;
- return false;
- }
-
- for (auto& it : client_ids) {
- if (it.length() > MAX_OIDC_CLIENT_ID_LEN) {
- return false;
- }
- }
-
- if (thumbprints.size() > MAX_OIDC_NUM_THUMBPRINTS) {
- ldpp_dout(dpp, 0) << "ERROR: Invalid number of thumbprints " << thumbprints.size() << dendl;
- return false;
- }
-
- for (auto& it : thumbprints) {
- if (it.length() > MAX_OIDC_THUMBPRINT_LEN) {
- return false;
- }
- }
-
- return true;
-}
-
-const string& RGWOIDCProvider::get_url_oid_prefix()
-{
- return oidc_url_oid_prefix;
-}
-
-} } // namespace rgw::sal
diff --git a/src/rgw/rgw_oidc_provider.h b/src/rgw/rgw_oidc_provider.h
index f317bcf9e36..f56ec15cb50 100644
--- a/src/rgw/rgw_oidc_provider.h
+++ b/src/rgw/rgw_oidc_provider.h
@@ -3,80 +3,22 @@
#pragma once
+#include <list>
#include <string>
+#include <vector>
-#include "common/ceph_context.h"
#include "common/ceph_json.h"
-#include "rgw/rgw_sal.h"
-
-namespace rgw { namespace sal {
-
-class RGWOIDCProvider
+struct RGWOIDCProviderInfo
{
-public:
- static const std::string oidc_url_oid_prefix;
- static const std::string oidc_arn_prefix;
- static constexpr int MAX_OIDC_NUM_CLIENT_IDS = 100;
- static constexpr int MAX_OIDC_CLIENT_ID_LEN = 255;
- static constexpr int MAX_OIDC_NUM_THUMBPRINTS = 5;
- static constexpr int MAX_OIDC_THUMBPRINT_LEN = 40;
- static constexpr int MAX_OIDC_URL_LEN = 255;
-
-protected:
std::string id;
std::string provider_url;
std::string arn;
std::string creation_date;
- std::string tenant;
+ std::string tenant; // tenant-name or account-id
std::vector<std::string> client_ids;
std::vector<std::string> thumbprints;
- int get_tenant_url_from_arn(std::string& tenant, std::string& url);
- virtual int store_url(const DoutPrefixProvider *dpp, const std::string& url, bool exclusive, optional_yield y) = 0;
- virtual int read_url(const DoutPrefixProvider *dpp, const std::string& url, const std::string& tenant, optional_yield y) = 0;
- bool validate_input(const DoutPrefixProvider *dpp);
-
-public:
- void set_arn(std::string _arn) {
- arn = _arn;
- }
- void set_url(std::string _provider_url) {
- provider_url = _provider_url;
- }
- void set_tenant(std::string _tenant) {
- tenant = _tenant;
- }
- void set_client_ids(std::vector<std::string>& _client_ids) {
- client_ids = std::move(_client_ids);
- }
- void set_thumbprints(std::vector<std::string>& _thumbprints) {
- thumbprints = std::move(_thumbprints);
- }
-
- RGWOIDCProvider(std::string provider_url,
- std::string tenant,
- std::vector<std::string> client_ids,
- std::vector<std::string> thumbprints)
- : provider_url(std::move(provider_url)),
- tenant(std::move(tenant)),
- client_ids(std::move(client_ids)),
- thumbprints(std::move(thumbprints)) {
- }
-
- RGWOIDCProvider( std::string arn,
- std::string tenant)
- : arn(std::move(arn)),
- tenant(std::move(tenant)) {
- }
-
- RGWOIDCProvider(std::string tenant)
- : tenant(std::move(tenant)) {}
-
- RGWOIDCProvider() {}
-
- virtual ~RGWOIDCProvider() = default;
-
void encode(bufferlist& bl) const {
ENCODE_START(3, 1, bl);
encode(id, bl);
@@ -90,7 +32,7 @@ public:
}
void decode(bufferlist::const_iterator& bl) {
- DECODE_START(2, bl);
+ DECODE_START(3, bl);
decode(id, bl);
decode(provider_url, bl);
decode(arn, bl);
@@ -101,21 +43,8 @@ public:
DECODE_FINISH(bl);
}
- const std::string& get_provider_url() const { return provider_url; }
- const std::string& get_arn() const { return arn; }
- const std::string& get_create_date() const { return creation_date; }
- const std::vector<std::string>& get_client_ids() const { return client_ids;}
- const std::vector<std::string>& get_thumbprints() const { return thumbprints; }
-
- int create(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y);
- virtual int delete_obj(const DoutPrefixProvider *dpp, optional_yield y) = 0;
- int get(const DoutPrefixProvider *dpp, optional_yield y);
void dump(Formatter *f) const;
- void dump_all(Formatter *f) const;
void decode_json(JSONObj *obj);
-
- static const std::string& get_url_oid_prefix();
+ static void generate_test_instances(std::list<RGWOIDCProviderInfo*>& l);
};
-WRITE_CLASS_ENCODER(RGWOIDCProvider)
-
-} } // namespace rgw::sal
+WRITE_CLASS_ENCODER(RGWOIDCProviderInfo)
diff --git a/src/rgw/rgw_op.cc b/src/rgw/rgw_op.cc
index 92b01e08c0b..481e14b5416 100644
--- a/src/rgw/rgw_op.cc
+++ b/src/rgw/rgw_op.cc
@@ -56,6 +56,7 @@
#include "rgw_torrent.h"
#include "rgw_lua_data_filter.h"
#include "rgw_lua.h"
+#include "rgw_iam_managed_policy.h"
#include "services/svc_zone.h"
#include "services/svc_quota.h"
@@ -110,7 +111,7 @@ static constexpr auto S3_RUNTIME_RESOURCE_VAL = "${s3:ResourceTag";
int rgw_forward_request_to_master(const DoutPrefixProvider* dpp,
const rgw::SiteConfig& site,
- const rgw_user& uid,
+ const rgw_owner& effective_owner,
bufferlist* indata, JSONParser* jp,
req_info& req, optional_yield y)
{
@@ -143,8 +144,8 @@ int rgw_forward_request_to_master(const DoutPrefixProvider* dpp,
creds, zg->second.id, zg->second.api_name};
bufferlist outdata;
constexpr size_t max_response_size = 128 * 1024; // we expect a very small response
- int ret = conn.forward(dpp, uid, req, nullptr, max_response_size,
- indata, &outdata, y);
+ int ret = conn.forward(dpp, effective_owner, req, nullptr,
+ max_response_size, indata, &outdata, y);
if (ret < 0) {
return ret;
}
@@ -265,7 +266,7 @@ static int get_user_policy_from_attr(const DoutPrefixProvider *dpp,
int rgw_op_get_bucket_policy_from_attr(const DoutPrefixProvider *dpp,
CephContext *cct,
rgw::sal::Driver* driver,
- const rgw_user& bucket_owner,
+ const rgw_owner& bucket_owner,
map<string, bufferlist>& bucket_attrs,
RGWAccessControlPolicy& policy,
optional_yield y)
@@ -278,13 +279,7 @@ int rgw_op_get_bucket_policy_from_attr(const DoutPrefixProvider *dpp,
return ret;
} else {
ldpp_dout(dpp, 0) << "WARNING: couldn't find acl header for bucket, generating default" << dendl;
- std::unique_ptr<rgw::sal::User> user = driver->get_user(bucket_owner);
- /* object exists, but policy is broken */
- int r = user->load_user(dpp, y);
- if (r < 0)
- return r;
-
- policy.create_default(user->get_id(), user->get_display_name());
+ policy.create_default(bucket_owner, "");
}
return 0;
}
@@ -292,8 +287,7 @@ int rgw_op_get_bucket_policy_from_attr(const DoutPrefixProvider *dpp,
static int get_obj_policy_from_attr(const DoutPrefixProvider *dpp,
CephContext *cct,
rgw::sal::Driver* driver,
- RGWBucketInfo& bucket_info,
- map<string, bufferlist>& bucket_attrs,
+ const ACLOwner& bucket_owner,
RGWAccessControlPolicy& policy,
string *storage_class,
rgw::sal::Object* obj,
@@ -312,12 +306,8 @@ static int get_obj_policy_from_attr(const DoutPrefixProvider *dpp,
} else if (ret == -ENODATA) {
/* object exists, but policy is broken */
ldpp_dout(dpp, 0) << "WARNING: couldn't find acl header for object, generating default" << dendl;
- std::unique_ptr<rgw::sal::User> user = driver->get_user(bucket_info.owner);
- ret = user->load_user(dpp, y);
- if (ret < 0)
- return ret;
-
- policy.create_default(bucket_info.owner, user->get_display_name());
+ policy.create_default(bucket_owner.id, bucket_owner.display_name);
+ ret = 0;
}
if (storage_class) {
@@ -334,12 +324,15 @@ static int get_obj_policy_from_attr(const DoutPrefixProvider *dpp,
}
-static boost::optional<Policy> get_iam_policy_from_attr(CephContext* cct,
- map<string, bufferlist>& attrs,
- const string& tenant) {
- auto i = attrs.find(RGW_ATTR_IAM_POLICY);
- if (i != attrs.end()) {
- return Policy(cct, tenant, i->second, false);
+static boost::optional<Policy>
+get_iam_policy_from_attr(CephContext* cct,
+ const map<string, bufferlist>& attrs)
+{
+ if (auto i = attrs.find(RGW_ATTR_IAM_POLICY); i != attrs.end()) {
+ // resource policy is not restricted to the current tenant
+ const std::string* policy_tenant = nullptr;
+
+ return Policy(cct, policy_tenant, i->second.to_str(), false);
} else {
return none;
}
@@ -362,23 +355,6 @@ get_public_access_conf_from_attr(const map<string, bufferlist>& attrs)
return boost::none;
}
-vector<Policy> get_iam_user_policy_from_attr(CephContext* cct,
- map<string, bufferlist>& attrs,
- const string& tenant) {
- vector<Policy> policies;
- if (auto it = attrs.find(RGW_ATTR_USER_POLICY); it != attrs.end()) {
- bufferlist out_bl = attrs[RGW_ATTR_USER_POLICY];
- map<string, string> policy_map;
- decode(policy_map, out_bl);
- for (auto& it : policy_map) {
- bufferlist bl = bufferlist::static_from_string(it.second);
- Policy p(cct, tenant, bl, false);
- policies.push_back(std::move(p));
- }
- }
- return policies;
-}
-
static int read_bucket_policy(const DoutPrefixProvider *dpp,
rgw::sal::Driver* driver,
req_state *s,
@@ -441,57 +417,73 @@ static int read_obj_policy(const DoutPrefixProvider *dpp,
mpobj->set_in_extra_data(true);
object = mpobj.get();
}
- policy = get_iam_policy_from_attr(s->cct, bucket_attrs, bucket->get_tenant());
+ policy = get_iam_policy_from_attr(s->cct, bucket_attrs);
- int ret = get_obj_policy_from_attr(dpp, s->cct, driver, bucket_info,
- bucket_attrs, acl, storage_class, object,
- s->yield);
+ int ret = get_obj_policy_from_attr(dpp, s->cct, driver, s->bucket_owner,
+ acl, storage_class, object, s->yield);
if (ret == -ENOENT) {
- /* object does not exist checking the bucket's ACL to make sure
- that we send a proper error code */
+ // the object doesn't exist, but we can't expose that information to clients
+ // that don't have permission to list the bucket and learn that for
+ // themselves. in that case, return -EACCES instead
RGWAccessControlPolicy bucket_policy;
ret = rgw_op_get_bucket_policy_from_attr(dpp, s->cct, driver, bucket_info.owner,
bucket_attrs, bucket_policy, y);
if (ret < 0) {
return ret;
}
- const rgw_user& bucket_owner = bucket_policy.get_owner().id;
- if (bucket_owner != s->user->get_id() &&
- ! s->auth.identity->is_admin_of(bucket_owner)) {
- auto r = eval_identity_or_session_policies(dpp, s->iam_user_policies, s->env,
- rgw::IAM::s3ListBucket, ARN(bucket->get_key()));
- if (r == Effect::Allow)
- return -ENOENT;
- if (r == Effect::Deny)
- return -EACCES;
- if (policy) {
- ARN b_arn(bucket->get_key());
- r = policy->eval(s->env, *s->auth.identity, rgw::IAM::s3ListBucket, b_arn);
- if (r == Effect::Allow)
- return -ENOENT;
- if (r == Effect::Deny)
- return -EACCES;
- }
- if (! s->session_policies.empty()) {
- r = eval_identity_or_session_policies(dpp, s->session_policies, s->env,
- rgw::IAM::s3ListBucket, ARN(bucket->get_key()));
- if (r == Effect::Allow)
- return -ENOENT;
- if (r == Effect::Deny)
- return -EACCES;
- }
- if (! bucket_policy.verify_permission(s, *s->auth.identity, s->perm_mask, RGW_PERM_READ))
- ret = -EACCES;
- else
- ret = -ENOENT;
+
+ if (s->auth.identity->is_admin_of(bucket_policy.get_owner().id)) {
+ return -ENOENT;
+ }
+
+ if (verify_bucket_permission(dpp, s, bucket->get_key(), s->user_acl,
+ bucket_policy, policy, s->iam_identity_policies,
+ s->session_policies, rgw::IAM::s3ListBucket)) {
+ return -ENOENT;
} else {
- ret = -ENOENT;
+ return -EACCES;
}
}
return ret;
}
+// try to read swift account acls from the owning user
+static int get_swift_owner_account_acl(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ rgw::sal::Driver* driver,
+ const ACLOwner& owner,
+ RGWAccessControlPolicy& policy)
+{
+ // only rgw_user owners support swift acls
+ const rgw_user* uid = std::get_if<rgw_user>(&owner.id);
+ if (uid == nullptr) {
+ return 0;
+ }
+ if (uid->empty()) {
+ return 0;
+ }
+
+ std::unique_ptr<rgw::sal::User> user = driver->get_user(*uid);
+ int ret = user->read_attrs(dpp, y);
+ if (!ret) {
+ ret = get_user_policy_from_attr(dpp, dpp->get_cct(),
+ user->get_attrs(), policy);
+ }
+ if (-ENOENT == ret) {
+ /* In already existing clusters users won't have ACL. In such case
+ * assuming that only account owner has the rights seems to be
+ * reasonable. That allows to have only one verification logic.
+ * NOTE: there is small compatibility kludge for global, empty tenant:
+ * 1. if we try to reach an existing bucket, its owner is considered
+ * as account owner.
+ * 2. otherwise account owner is identity stored in s->owner. */
+ policy.create_default(owner.id, owner.display_name);
+ ret = 0;
+ }
+ return ret;
+}
+
/**
* Get the AccessControlPolicy for an user, bucket or object off of disk.
* s: The req_state to draw information from.
@@ -524,13 +516,8 @@ int rgw_build_bucket_policies(const DoutPrefixProvider *dpp, rgw::sal::Driver* d
}
}
- struct {
- rgw_user uid;
- std::string display_name;
- } acct_acl_user = {
- s->user->get_id(),
- s->user->get_display_name(),
- };
+ // ACLOwner for swift's s->user_acl. may be retargeted to s->bucket_owner
+ const ACLOwner* acct_acl_user = &s->owner;
if (!s->bucket_name.empty()) {
s->bucket_exists = true;
@@ -560,12 +547,9 @@ int rgw_build_bucket_policies(const DoutPrefixProvider *dpp, rgw::sal::Driver* d
ret = read_bucket_policy(dpp, driver, s, s->bucket->get_info(),
s->bucket->get_attrs(),
s->bucket_acl, s->bucket->get_key(), y);
- acct_acl_user = {
- s->bucket->get_info().owner,
- s->bucket_acl.get_owner().display_name,
- };
s->bucket_owner = s->bucket_acl.get_owner();
+ acct_acl_user = &s->bucket_owner;
s->zonegroup_endpoint = rgw::get_zonegroup_endpoint(zonegroup);
s->zonegroup_name = zonegroup.get_name();
@@ -602,56 +586,16 @@ int rgw_build_bucket_policies(const DoutPrefixProvider *dpp, rgw::sal::Driver* d
/* handle user ACL only for those APIs which support it */
if (s->dialect == "swift" && !s->user->get_id().empty()) {
- std::unique_ptr<rgw::sal::User> acl_user = driver->get_user(acct_acl_user.uid);
-
- ret = acl_user->read_attrs(dpp, y);
- if (!ret) {
- ret = get_user_policy_from_attr(dpp, s->cct, acl_user->get_attrs(), s->user_acl);
- }
- if (-ENOENT == ret) {
- /* In already existing clusters users won't have ACL. In such case
- * assuming that only account owner has the rights seems to be
- * reasonable. That allows to have only one verification logic.
- * NOTE: there is small compatibility kludge for global, empty tenant:
- * 1. if we try to reach an existing bucket, its owner is considered
- * as account owner.
- * 2. otherwise account owner is identity stored in s->user->user_id. */
- s->user_acl.create_default(acct_acl_user.uid,
- acct_acl_user.display_name);
- ret = 0;
- } else if (ret < 0) {
+ ret = get_swift_owner_account_acl(dpp, y, driver, *acct_acl_user, s->user_acl);
+ if (ret < 0) {
ldpp_dout(dpp, 0) << "NOTICE: couldn't get user attrs for handling ACL "
"(user_id=" << s->user->get_id() << ", ret=" << ret << ")" << dendl;
return ret;
}
}
- // We don't need user policies in case of STS token returned by AssumeRole,
- // hence the check for user type
- if (! s->user->get_id().empty() && s->auth.identity->get_identity_type() != TYPE_ROLE) {
- try {
- ret = s->user->read_attrs(dpp, y);
- if (ret == 0) {
- auto user_policies = get_iam_user_policy_from_attr(s->cct,
- s->user->get_attrs(),
- s->user->get_tenant());
- s->iam_user_policies.insert(s->iam_user_policies.end(),
- std::make_move_iterator(user_policies.begin()),
- std::make_move_iterator(user_policies.end()));
- } else {
- if (ret == -ENOENT)
- ret = 0;
- else ret = -EACCES;
- }
- } catch (const std::exception& e) {
- ldpp_dout(dpp, -1) << "Error reading IAM User Policy: " << e.what() << dendl;
- if (!s->system_request) {
- ret = -EACCES;
- }
- }
- }
try {
- s->iam_policy = get_iam_policy_from_attr(s->cct, s->bucket_attrs, s->bucket_tenant);
+ s->iam_policy = get_iam_policy_from_attr(s->cct, s->bucket_attrs);
} catch (const std::exception& e) {
ldpp_dout(dpp, 0) << "Error reading IAM Policy: " << e.what() << dendl;
@@ -876,7 +820,7 @@ static std::tuple<bool, bool> rgw_check_policy_condition(const DoutPrefixProvide
}
static std::tuple<bool, bool> rgw_check_policy_condition(const DoutPrefixProvider *dpp, req_state* s, bool check_obj_exist_tag=true) {
- return rgw_check_policy_condition(dpp, s->iam_policy, s->iam_user_policies, s->session_policies, check_obj_exist_tag);
+ return rgw_check_policy_condition(dpp, s->iam_policy, s->iam_identity_policies, s->session_policies, check_obj_exist_tag);
}
static void rgw_add_grant_to_iam_environment(rgw::IAM::Environment& e, req_state *s){
@@ -1236,7 +1180,11 @@ int RGWPutBucketTags::verify_permission(optional_yield y) {
if (has_s3_resource_tag)
rgw_iam_add_buckettags(this, s);
- return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketTagging);
+ if (!verify_bucket_permission(this, s, rgw::IAM::s3PutBucketTagging)) {
+ return -EACCES;
+ }
+
+ return 0;
}
void RGWPutBucketTags::execute(optional_yield y)
@@ -1246,7 +1194,7 @@ void RGWPutBucketTags::execute(optional_yield y)
if (op_ret < 0)
return;
- op_ret = rgw_forward_request_to_master(this, *s->penv.site, s->user->get_id(),
+ op_ret = rgw_forward_request_to_master(this, *s->penv.site, s->owner.id,
&in_data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
@@ -1272,12 +1220,16 @@ int RGWDeleteBucketTags::verify_permission(optional_yield y)
if (has_s3_resource_tag)
rgw_iam_add_buckettags(this, s);
- return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketTagging);
+ if (!verify_bucket_permission(this, s, rgw::IAM::s3PutBucketTagging)) {
+ return -EACCES;
+ }
+
+ return 0;
}
void RGWDeleteBucketTags::execute(optional_yield y)
{
- op_ret = rgw_forward_request_to_master(this, *s->penv.site, s->user->get_id(),
+ op_ret = rgw_forward_request_to_master(this, *s->penv.site, s->owner.id,
nullptr, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
@@ -1324,7 +1276,12 @@ int RGWPutBucketReplication::verify_permission(optional_yield y) {
auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false);
if (has_s3_resource_tag)
rgw_iam_add_buckettags(this, s);
- return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutReplicationConfiguration);
+
+ if (!verify_bucket_permission(this, s, rgw::IAM::s3PutReplicationConfiguration)) {
+ return -EACCES;
+ }
+
+ return 0;
}
void RGWPutBucketReplication::execute(optional_yield y) {
@@ -1333,7 +1290,7 @@ void RGWPutBucketReplication::execute(optional_yield y) {
if (op_ret < 0)
return;
- op_ret = rgw_forward_request_to_master(this, *s->penv.site, s->user->get_id(),
+ op_ret = rgw_forward_request_to_master(this, *s->penv.site, s->owner.id,
&in_data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
@@ -1370,12 +1327,16 @@ int RGWDeleteBucketReplication::verify_permission(optional_yield y)
if (has_s3_resource_tag)
rgw_iam_add_buckettags(this, s);
- return verify_bucket_owner_or_policy(s, rgw::IAM::s3DeleteReplicationConfiguration);
+ if (!verify_bucket_permission(this, s, rgw::IAM::s3DeleteReplicationConfiguration)) {
+ return -EACCES;
+ }
+
+ return 0;
}
void RGWDeleteBucketReplication::execute(optional_yield y)
{
- op_ret = rgw_forward_request_to_master(this, *s->penv.site, s->user->get_id(),
+ op_ret = rgw_forward_request_to_master(this, *s->penv.site, s->owner.id,
nullptr, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
@@ -1426,6 +1387,34 @@ int RGWOp::do_aws4_auth_completion()
return 0;
}
+static int get_owner_quota_info(DoutPrefixProvider* dpp,
+ optional_yield y,
+ rgw::sal::Driver* driver,
+ const rgw_owner& owner,
+ RGWQuota& quotas)
+{
+ return std::visit(fu2::overload(
+ [&] (const rgw_user& uid) {
+ auto user = driver->get_user(uid);
+ int r = user->load_user(dpp, y);
+ if (r >= 0) {
+ quotas = user->get_info().quota;
+ }
+ return r;
+ },
+ [&] (const rgw_account_id& account_id) {
+ RGWAccountInfo info;
+ rgw::sal::Attrs attrs; // ignored
+ RGWObjVersionTracker objv; // ignored
+ int r = driver->load_account_by_id(dpp, y, account_id, info, attrs, objv);
+ if (r >= 0) {
+ // no bucket quota
+ quotas.user_quota = info.quota;
+ }
+ return r;
+ }), owner);
+}
+
int RGWOp::init_quota()
{
/* no quota enforcement for system requests */
@@ -1442,30 +1431,25 @@ int RGWOp::init_quota()
return 0;
}
- std::unique_ptr<rgw::sal::User> owner_user =
- driver->get_user(s->bucket->get_info().owner);
- rgw::sal::User* user;
+ RGWQuota user_quotas;
- if (s->user->get_id() == s->bucket_owner.id) {
- user = s->user.get();
- } else {
- int r = owner_user->load_user(this, s->yield);
- if (r < 0)
- return r;
- user = owner_user.get();
-
+ // consult the bucket owner's quota
+ int r = get_owner_quota_info(this, s->yield, driver,
+ s->bucket_owner.id, user_quotas);
+ if (r < 0) {
+ return r;
}
driver->get_quota(quota);
if (s->bucket->get_info().quota.enabled) {
quota.bucket_quota = s->bucket->get_info().quota;
- } else if (user->get_info().quota.bucket_quota.enabled) {
- quota.bucket_quota = user->get_info().quota.bucket_quota;
+ } else if (user_quotas.bucket_quota.enabled) {
+ quota.bucket_quota = user_quotas.bucket_quota;
}
- if (user->get_info().quota.user_quota.enabled) {
- quota.user_quota = user->get_info().quota.user_quota;
+ if (user_quotas.user_quota.enabled) {
+ quota.user_quota = user_quotas.user_quota;
}
return 0;
@@ -1724,7 +1708,7 @@ int RGWGetObj::read_user_manifest_part(rgw::sal::Bucket* bucket,
ldpp_dout(this, 2) << "overriding permissions due to admin operation" << dendl;
} else if (!verify_object_permission(this, s, part->get_obj(), s->user_acl,
bucket_acl, obj_policy, bucket_policy,
- s->iam_user_policies, s->session_policies, action)) {
+ s->iam_identity_policies, s->session_policies, action)) {
return -EPERM;
}
if (ent.meta.size == 0) {
@@ -1955,6 +1939,7 @@ int RGWGetObj::handle_user_manifest(const char *prefix, optional_yield y)
return -EINVAL;
}
+ const std::string& auth_tenant = s->auth.identity->get_tenant();
const std::string bucket_name = url_decode(prefix_view.substr(0, pos));
const std::string obj_prefix = url_decode(prefix_view.substr(pos + 1));
@@ -1969,7 +1954,7 @@ int RGWGetObj::handle_user_manifest(const char *prefix, optional_yield y)
if (bucket_name.compare(s->bucket->get_name()) != 0) {
map<string, bufferlist> bucket_attrs;
- r = driver->load_bucket(this, rgw_bucket(s->user->get_tenant(), bucket_name),
+ r = driver->load_bucket(this, rgw_bucket(auth_tenant, bucket_name),
&ubucket, y);
if (r < 0) {
ldpp_dout(this, 0) << "could not get bucket info for bucket="
@@ -1982,7 +1967,7 @@ int RGWGetObj::handle_user_manifest(const char *prefix, optional_yield y)
ldpp_dout(this, 0) << "failed to read bucket policy" << dendl;
return r;
}
- _bucket_policy = get_iam_policy_from_attr(s->cct, bucket_attrs, s->user->get_tenant());
+ _bucket_policy = get_iam_policy_from_attr(s->cct, bucket_attrs);
bucket_policy = &_bucket_policy;
pbucket = ubucket.get();
} else {
@@ -2054,6 +2039,7 @@ int RGWGetObj::handle_slo_manifest(bufferlist& bl, optional_yield y)
vector<RGWAccessControlPolicy> allocated_acls;
map<string, pair<RGWAccessControlPolicy *, boost::optional<Policy>>> policies;
map<string, std::unique_ptr<rgw::sal::Bucket>> buckets;
+ const std::string& auth_tenant = s->auth.identity->get_tenant();
map<uint64_t, rgw_slo_part> slo_parts;
@@ -2099,8 +2085,7 @@ int RGWGetObj::handle_slo_manifest(bufferlist& bl, optional_yield y)
RGWAccessControlPolicy& _bucket_acl = allocated_acls.emplace_back();
std::unique_ptr<rgw::sal::Bucket> tmp_bucket;
- int r = driver->load_bucket(this, rgw_bucket(s->user->get_tenant(),
- bucket_name),
+ int r = driver->load_bucket(this, rgw_bucket(auth_tenant, bucket_name),
&tmp_bucket, y);
if (r < 0) {
ldpp_dout(this, 0) << "could not get bucket info for bucket="
@@ -2117,7 +2102,7 @@ int RGWGetObj::handle_slo_manifest(bufferlist& bl, optional_yield y)
return r;
}
auto _bucket_policy = get_iam_policy_from_attr(
- s->cct, tmp_bucket->get_attrs(), tmp_bucket->get_tenant());
+ s->cct, tmp_bucket->get_attrs());
bucket_policy = _bucket_policy.get_ptr();
buckets[bucket_name].swap(tmp_bucket);
policies[bucket_name] = make_pair(bucket_acl, _bucket_policy);
@@ -2509,13 +2494,7 @@ int RGWListBuckets::verify_permission(optional_yield y)
rgw::Partition partition = rgw::Partition::aws;
rgw::Service service = rgw::Service::s3;
- string tenant;
- if (s->auth.identity->get_identity_type() == TYPE_ROLE) {
- tenant = s->auth.identity->get_role_tenant();
- } else {
- tenant = s->user->get_tenant();
- }
-
+ const std::string& tenant = s->auth.identity->get_tenant();
if (!verify_user_permission(this, s, ARN(partition, service, "", tenant, "*"), rgw::IAM::s3ListAllMyBuckets, false)) {
return -EACCES;
}
@@ -2582,13 +2561,20 @@ void RGWListBuckets::execute(optional_yield y)
read_count = max_buckets;
}
- op_ret = s->user->list_buckets(this, marker, end_marker, read_count, should_get_stats(), listing, y);
+ if (s->auth.identity->is_anonymous()) {
+ ldpp_dout(this, 20) << "skipping list_buckets() for anonymous user" << dendl;
+ marker.clear();
+ break;
+ }
+
+ op_ret = driver->list_buckets(this, s->owner.id, s->auth.identity->get_tenant(),
+ marker, end_marker, read_count, should_get_stats(), listing, y);
if (op_ret < 0) {
/* hmm.. something wrong here.. the user was authenticated, so it
should exist */
- ldpp_dout(this, 10) << "WARNING: failed on rgw_get_user_buckets uid="
- << s->user->get_id() << dendl;
+ ldpp_dout(this, 10) << "WARNING: failed on list_buckets owner="
+ << s->owner.id << dendl;
break;
}
@@ -2666,7 +2652,8 @@ void RGWGetUsage::execute(optional_yield y)
}
}
- op_ret = rgw_user_sync_all_stats(this, driver, s->user.get(), y);
+ op_ret = rgw_sync_all_stats(this, y, driver, s->user->get_id(),
+ s->user->get_tenant());
if (op_ret < 0) {
ldpp_dout(this, 0) << "ERROR: failed to sync user stats" << dendl;
return;
@@ -2678,7 +2665,10 @@ void RGWGetUsage::execute(optional_yield y)
return;
}
- op_ret = s->user->read_stats(this, y, &stats);
+ ceph::real_time synced; // ignored
+ ceph::real_time updated; // ignored
+ op_ret = driver->load_stats(this, y, s->user->get_id(),
+ stats, synced, updated);
if (op_ret < 0) {
ldpp_dout(this, 0) << "ERROR: can't read user header" << dendl;
return;
@@ -2711,13 +2701,14 @@ void RGWStatAccount::execute(optional_yield y)
rgw::sal::BucketList listing;
do {
- op_ret = s->user->list_buckets(this, listing.next_marker, string(),
- max_buckets, true, listing, y);
+ op_ret = driver->list_buckets(this, s->owner.id, s->auth.identity->get_tenant(),
+ listing.next_marker, string(),
+ max_buckets, true, listing, y);
if (op_ret < 0) {
/* hmm.. something wrong here.. the user was authenticated, so it
should exist */
- ldpp_dout(this, 10) << "WARNING: failed on list_buckets uid="
- << s->user->get_id() << " ret=" << op_ret << dendl;
+ ldpp_dout(this, 10) << "WARNING: failed on list_buckets owner="
+ << s->owner.id << " ret=" << op_ret << dendl;
return;
}
@@ -2744,7 +2735,11 @@ int RGWGetBucketVersioning::verify_permission(optional_yield y)
if (has_s3_resource_tag)
rgw_iam_add_buckettags(this, s);
- return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketVersioning);
+ if (!verify_bucket_permission(this, s, rgw::IAM::s3GetBucketVersioning)) {
+ return -EACCES;
+ }
+
+ return 0;
}
void RGWGetBucketVersioning::pre_exec()
@@ -2770,7 +2765,11 @@ int RGWSetBucketVersioning::verify_permission(optional_yield y)
if (has_s3_resource_tag)
rgw_iam_add_buckettags(this, s);
- return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketVersioning);
+ if (!verify_bucket_permission(this, s, rgw::IAM::s3PutBucketVersioning)) {
+ return -EACCES;
+ }
+
+ return 0;
}
void RGWSetBucketVersioning::pre_exec()
@@ -2820,7 +2819,7 @@ void RGWSetBucketVersioning::execute(optional_yield y)
}
}
- op_ret = rgw_forward_request_to_master(this, *s->penv.site, s->user->get_id(),
+ op_ret = rgw_forward_request_to_master(this, *s->penv.site, s->owner.id,
&in_data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
@@ -2869,7 +2868,11 @@ int RGWGetBucketWebsite::verify_permission(optional_yield y)
if (has_s3_resource_tag)
rgw_iam_add_buckettags(this, s);
- return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketWebsite);
+ if (!verify_bucket_permission(this, s, rgw::IAM::s3GetBucketWebsite)) {
+ return -EACCES;
+ }
+
+ return 0;
}
void RGWGetBucketWebsite::pre_exec()
@@ -2890,7 +2893,11 @@ int RGWSetBucketWebsite::verify_permission(optional_yield y)
if (has_s3_resource_tag)
rgw_iam_add_buckettags(this, s);
- return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketWebsite);
+ if (!verify_bucket_permission(this, s, rgw::IAM::s3PutBucketWebsite)) {
+ return -EACCES;
+ }
+
+ return 0;
}
void RGWSetBucketWebsite::pre_exec()
@@ -2910,7 +2917,7 @@ void RGWSetBucketWebsite::execute(optional_yield y)
return;
}
- op_ret = rgw_forward_request_to_master(this, *s->penv.site, s->user->get_id(),
+ op_ret = rgw_forward_request_to_master(this, *s->penv.site, s->owner.id,
&in_data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << " forward_request_to_master returned ret=" << op_ret << dendl;
@@ -2937,7 +2944,11 @@ int RGWDeleteBucketWebsite::verify_permission(optional_yield y)
if (has_s3_resource_tag)
rgw_iam_add_buckettags(this, s);
- return verify_bucket_owner_or_policy(s, rgw::IAM::s3DeleteBucketWebsite);
+ if (!verify_bucket_permission(this, s, rgw::IAM::s3DeleteBucketWebsite)) {
+ return -EACCES;
+ }
+
+ return 0;
}
void RGWDeleteBucketWebsite::pre_exec()
@@ -2952,7 +2963,7 @@ void RGWDeleteBucketWebsite::execute(optional_yield y)
return;
}
- op_ret = rgw_forward_request_to_master(this, *s->penv.site, s->user->get_id(),
+ op_ret = rgw_forward_request_to_master(this, *s->penv.site, s->owner.id,
nullptr, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "NOTICE: forward_to_master failed on bucket=" << s->bucket->get_name()
@@ -3118,7 +3129,11 @@ int RGWGetBucketLogging::verify_permission(optional_yield y)
if (has_s3_resource_tag)
rgw_iam_add_buckettags(this, s);
- return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketLogging);
+ if (!verify_bucket_permission(this, s, rgw::IAM::s3GetBucketLogging)) {
+ return -EACCES;
+ }
+
+ return 0;
}
int RGWGetBucketLocation::verify_permission(optional_yield y)
@@ -3127,26 +3142,66 @@ int RGWGetBucketLocation::verify_permission(optional_yield y)
if (has_s3_resource_tag)
rgw_iam_add_buckettags(this, s);
- return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketLocation);
+ if (!verify_bucket_permission(this, s, rgw::IAM::s3GetBucketLocation)) {
+ return -EACCES;
+ }
+
+ return 0;
+}
+
+static int get_account_max_buckets(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ rgw::sal::Driver* driver,
+ const rgw_account_id& id,
+ int32_t& max_buckets)
+{
+ RGWAccountInfo info;
+ rgw::sal::Attrs attrs;
+ RGWObjVersionTracker objv;
+
+ int ret = driver->load_account_by_id(dpp, y, id, info, attrs, objv);
+ if (ret < 0) {
+ ldpp_dout(dpp, 4) << "failed to load account owner: " << cpp_strerror(ret) << dendl;
+ return ret;
+ }
+
+ max_buckets = info.max_buckets;
+ return 0;
}
// list the user's buckets to check whether they're at their maximum
-static int check_user_max_buckets(const DoutPrefixProvider* dpp,
- rgw::sal::User& user, optional_yield y)
+static int check_owner_max_buckets(const DoutPrefixProvider* dpp,
+ rgw::sal::Driver* driver, req_state* s,
+ optional_yield y)
{
- int32_t remaining = user.get_max_buckets();
+ int32_t remaining = 0;
+
+ const rgw_account_id* account = std::get_if<rgw_account_id>(&s->owner.id);
+ if (account) {
+ int ret = get_account_max_buckets(dpp, y, driver, *account, remaining);
+ if (ret < 0) {
+ return ret;
+ }
+ } else {
+ remaining = s->user->get_max_buckets();
+ }
+
+ if (remaining < 0) {
+ return -EPERM;
+ }
if (!remaining) { // unlimited
return 0;
}
- uint64_t max_buckets = dpp->get_cct()->_conf->rgw_list_buckets_max_chunk;
+ const uint64_t chunk_size = dpp->get_cct()->_conf->rgw_list_buckets_max_chunk;
+ const std::string& tenant = s->auth.identity->get_tenant();
rgw::sal::BucketList listing;
do {
- size_t to_read = std::max<size_t>(max_buckets, remaining);
+ size_t to_read = std::max<size_t>(chunk_size, remaining);
- int ret = user.list_buckets(dpp, listing.next_marker, string(),
- to_read, false, listing, y);
+ int ret = driver->list_buckets(dpp, s->owner.id, tenant, listing.next_marker,
+ "", to_read, false, listing, y);
if (ret < 0) {
return ret;
}
@@ -3177,7 +3232,7 @@ int RGWCreateBucket::verify_permission(optional_yield y)
return -EACCES;
}
- if (s->user->get_tenant() != s->bucket_tenant) {
+ if (s->auth.identity->get_tenant() != s->bucket_tenant) {
//AssumeRole is meant for cross account access
if (s->auth.identity->get_identity_type() != TYPE_ROLE) {
ldpp_dout(this, 10) << "user cannot create a bucket in a different tenant"
@@ -3188,11 +3243,7 @@ int RGWCreateBucket::verify_permission(optional_yield y)
}
}
- if (s->user->get_max_buckets() < 0) {
- return -EPERM;
- }
-
- return check_user_max_buckets(this, *s->user, y);
+ return check_owner_max_buckets(this, driver, s, y);
}
void RGWCreateBucket::pre_exec()
@@ -3522,9 +3573,8 @@ void RGWCreateBucket::execute(optional_yield y)
}
}
- s->bucket_owner.id = s->user->get_id();
- s->bucket_owner.display_name = s->user->get_display_name();
- createparams.owner = s->user->get_id();
+ s->bucket_owner = policy.get_owner();
+ createparams.owner = s->bucket_owner.id;
buffer::list aclbl;
policy.encode(aclbl);
@@ -3564,7 +3614,7 @@ void RGWCreateBucket::execute(optional_yield y)
// apply bucket creation on the master zone first
bufferlist in_data;
JSONParser jp;
- op_ret = rgw_forward_request_to_master(this, *s->penv.site, s->user->get_id(),
+ op_ret = rgw_forward_request_to_master(this, *s->penv.site, s->owner.id,
&in_data, &jp, s->info, y);
if (op_ret < 0) {
return;
@@ -3605,7 +3655,7 @@ void RGWCreateBucket::execute(optional_yield y)
op_ret = s->bucket->load_bucket(this, y);
if (op_ret < 0) {
return;
- } else if (s->bucket->get_owner() != s->user->get_id()) {
+ } else if (!s->auth.identity->is_owner_of(s->bucket->get_owner())) {
/* New bucket doesn't belong to the account we're operating on. */
op_ret = -EEXIST;
return;
@@ -3699,7 +3749,7 @@ void RGWDeleteBucket::execute(optional_yield y)
}
}
- op_ret = s->bucket->sync_user_stats(this, y, nullptr);
+ op_ret = s->bucket->sync_owner_stats(this, y, nullptr);
if ( op_ret < 0) {
ldpp_dout(this, 1) << "WARNING: failed to sync user stats before bucket delete: op_ret= " << op_ret << dendl;
}
@@ -3709,7 +3759,7 @@ void RGWDeleteBucket::execute(optional_yield y)
return;
}
- op_ret = rgw_forward_request_to_master(this, *s->penv.site, s->user->get_id(),
+ op_ret = rgw_forward_request_to_master(this, *s->penv.site, s->owner.id,
nullptr, nullptr, s->info, y);
if (op_ret < 0) {
if (op_ret == -ENOENT) {
@@ -3773,7 +3823,7 @@ int RGWPutObj::init_processing(optional_yield y) {
pos = copy_source_bucket_name.find(":");
if (pos == std::string::npos) {
// if tenant is not specified in x-amz-copy-source, use tenant of the requester
- copy_source_tenant_name = s->user->get_tenant();
+ copy_source_tenant_name = s->auth.identity->get_tenant();
} else {
copy_source_tenant_name = copy_source_bucket_name.substr(0, pos);
copy_source_bucket_name = copy_source_bucket_name.substr(pos + 1, copy_source_bucket_name.size());
@@ -3829,8 +3879,22 @@ int RGWPutObj::init_processing(optional_yield y) {
return ret;
}
}
-
} /* copy_source */
+
+ // reject public canned acls
+ if (s->bucket_access_conf && s->bucket_access_conf->block_public_acls() &&
+ (s->canned_acl.compare("public-read") ||
+ s->canned_acl.compare("public-read-write") ||
+ s->canned_acl.compare("authenticated-read"))) {
+ return -EACCES;
+ }
+
+ ret = get_params(y);
+ if (ret < 0) {
+ ldpp_dout(this, 20) << "get_params() returned ret=" << ret << dendl;
+ return ret;
+ }
+
return RGWOp::init_processing(y);
}
@@ -3854,129 +3918,54 @@ int RGWPutObj::verify_permission(optional_yield y)
return ret;
}
- /* admin request overrides permission checks */
- if (! s->auth.identity->is_admin_of(cs_acl.get_owner().id)) {
- if (policy || ! s->iam_user_policies.empty() || !s->session_policies.empty()) {
- //add source object tags for permission evaluation
- auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, policy, s->iam_user_policies, s->session_policies);
- if (has_s3_existing_tag || has_s3_resource_tag)
- rgw_iam_add_objtags(this, s, cs_object.get(), has_s3_existing_tag, has_s3_resource_tag);
- auto usr_policy_res = Effect::Pass;
- rgw::ARN obj_arn(cs_object->get_obj());
- for (auto& user_policy : s->iam_user_policies) {
- if (usr_policy_res = user_policy.eval(s->env, boost::none,
- cs_object->get_instance().empty() ?
- rgw::IAM::s3GetObject :
- rgw::IAM::s3GetObjectVersion,
- obj_arn); usr_policy_res == Effect::Deny)
- return -EACCES;
- else if (usr_policy_res == Effect::Allow)
- break;
- }
- rgw::IAM::Effect e = Effect::Pass;
- if (policy) {
- rgw::ARN obj_arn(cs_object->get_obj());
- e = policy->eval(s->env, *s->auth.identity,
- cs_object->get_instance().empty() ?
- rgw::IAM::s3GetObject :
- rgw::IAM::s3GetObjectVersion,
- obj_arn);
- }
- if (e == Effect::Deny) {
- return -EACCES;
- } else if (usr_policy_res == Effect::Pass && e == Effect::Pass &&
- !cs_acl.verify_permission(this, *s->auth.identity, s->perm_mask,
- RGW_PERM_READ)) {
- return -EACCES;
- }
- rgw_iam_remove_objtags(this, s, cs_object.get(), has_s3_existing_tag, has_s3_resource_tag);
- } else if (!cs_acl.verify_permission(this, *s->auth.identity, s->perm_mask,
- RGW_PERM_READ)) {
- return -EACCES;
- }
+ RGWAccessControlPolicy cs_bucket_acl;
+ ret = rgw_op_get_bucket_policy_from_attr(this, s->cct, driver,
+ copy_source_bucket_info.owner,
+ cs_attrs, cs_bucket_acl, y);
+ if (ret < 0) {
+ return ret;
}
- }
- if (s->bucket_access_conf && s->bucket_access_conf->block_public_acls()) {
- if (s->canned_acl.compare("public-read") ||
- s->canned_acl.compare("public-read-write") ||
- s->canned_acl.compare("authenticated-read"))
- return -EACCES;
- }
-
- int ret = get_params(y);
- if (ret < 0) {
- ldpp_dout(this, 20) << "get_params() returned ret=" << ret << dendl;
- return ret;
- }
-
- if (s->iam_policy || ! s->iam_user_policies.empty() || !s->session_policies.empty()) {
- rgw_add_grant_to_iam_environment(s->env, s);
+ // add source object tags for permission evaluation
+ auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, policy, s->iam_identity_policies, s->session_policies);
+ if (has_s3_existing_tag || has_s3_resource_tag)
+ rgw_iam_add_objtags(this, s, cs_object.get(), has_s3_existing_tag, has_s3_resource_tag);
- rgw_add_to_iam_environment(s->env, "s3:x-amz-acl", s->canned_acl);
+ const auto action = cs_object->get_instance().empty() ?
+ rgw::IAM::s3GetObject :
+ rgw::IAM::s3GetObjectVersion;
- if (obj_tags != nullptr && obj_tags->count() > 0){
- auto tags = obj_tags->get_tags();
- for (const auto& kv: tags){
- rgw_add_to_iam_environment(s->env, "s3:RequestObjectTag/"+kv.first, kv.second);
- }
+ if (!verify_object_permission(this, s, cs_object->get_obj(),
+ s->user_acl, cs_bucket_acl, cs_acl,
+ policy, s->iam_identity_policies,
+ s->session_policies, action)) {
+ return -EACCES;
}
- // add server-side encryption headers
- rgw_iam_add_crypt_attrs(s->env, s->info.crypt_attribute_map);
-
- // Add bucket tags for authorization
- auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false);
- if (has_s3_resource_tag)
- rgw_iam_add_buckettags(this, s);
+ rgw_iam_remove_objtags(this, s, cs_object.get(), has_s3_existing_tag, has_s3_resource_tag);
+ }
- auto identity_policy_res = eval_identity_or_session_policies(this, s->iam_user_policies, s->env,
- rgw::IAM::s3PutObject,
- s->object->get_obj());
- if (identity_policy_res == Effect::Deny)
- return -EACCES;
+ rgw_add_grant_to_iam_environment(s->env, s);
- rgw::IAM::Effect e = Effect::Pass;
- rgw::IAM::PolicyPrincipal princ_type = rgw::IAM::PolicyPrincipal::Other;
- if (s->iam_policy) {
- ARN obj_arn(s->object->get_obj());
- e = s->iam_policy->eval(s->env, *s->auth.identity,
- rgw::IAM::s3PutObject,
- obj_arn,
- princ_type);
- }
- if (e == Effect::Deny) {
- return -EACCES;
- }
+ rgw_add_to_iam_environment(s->env, "s3:x-amz-acl", s->canned_acl);
- if (!s->session_policies.empty()) {
- auto session_policy_res = eval_identity_or_session_policies(this, s->session_policies, s->env,
- rgw::IAM::s3PutObject,
- s->object->get_obj());
- if (session_policy_res == Effect::Deny) {
- return -EACCES;
- }
- if (princ_type == rgw::IAM::PolicyPrincipal::Role) {
- //Intersection of session policy and identity policy plus intersection of session policy and bucket policy
- if ((session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) ||
- (session_policy_res == Effect::Allow && e == Effect::Allow))
- return 0;
- } else if (princ_type == rgw::IAM::PolicyPrincipal::Session) {
- //Intersection of session policy and identity policy plus bucket policy
- if ((session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) || e == Effect::Allow)
- return 0;
- } else if (princ_type == rgw::IAM::PolicyPrincipal::Other) {// there was no match in the bucket policy
- if (session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow)
- return 0;
- }
- return -EACCES;
- }
- if (e == Effect::Allow || identity_policy_res == Effect::Allow) {
- return 0;
+ if (obj_tags != nullptr && obj_tags->count() > 0){
+ auto tags = obj_tags->get_tags();
+ for (const auto& kv: tags){
+ rgw_add_to_iam_environment(s->env, "s3:RequestObjectTag/"+kv.first, kv.second);
}
}
- if (!verify_bucket_permission_no_policy(this, s, RGW_PERM_WRITE)) {
+ // add server-side encryption headers
+ rgw_iam_add_crypt_attrs(s->env, s->info.crypt_attribute_map);
+
+ // Add bucket tags for authorization
+ auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false);
+ if (has_s3_resource_tag)
+ rgw_iam_add_buckettags(this, s);
+
+ if (!verify_bucket_permission(this, s, ARN(s->object->get_obj()),
+ rgw::IAM::s3PutObject)) {
return -EACCES;
}
@@ -4211,7 +4200,8 @@ void RGWPutObj::execute(optional_yield y)
/* Handle object versioning of Swift API. */
if (! multipart) {
- op_ret = s->object->swift_versioning_copy(this, s->yield);
+ op_ret = s->object->swift_versioning_copy(s->owner, s->user->get_id(),
+ this, s->yield);
if (op_ret < 0) {
return;
}
@@ -4258,7 +4248,7 @@ void RGWPutObj::execute(optional_yield y)
pdest_placement = &s->dest_placement;
ldpp_dout(this, 20) << "dest_placement for part=" << *pdest_placement << dendl;
processor = upload->get_writer(this, s->yield, s->object.get(),
- s->user->get_id(), pdest_placement,
+ s->owner, pdest_placement,
multipart_part_num, multipart_part_str);
} else if(append) {
if (s->bucket->versioned()) {
@@ -4266,7 +4256,7 @@ void RGWPutObj::execute(optional_yield y)
return;
}
processor = driver->get_append_writer(this, s->yield, s->object.get(),
- s->bucket_owner.id,
+ s->owner,
pdest_placement, s->req_id, position,
&cur_accounted_size);
} else {
@@ -4279,7 +4269,7 @@ void RGWPutObj::execute(optional_yield y)
}
}
processor = driver->get_atomic_writer(this, s->yield, s->object.get(),
- s->bucket_owner.id,
+ s->owner,
pdest_placement, olh_epoch, s->req_id);
}
@@ -4554,8 +4544,34 @@ void RGWPutObj::execute(optional_yield y)
}
}
+int RGWPostObj::init_processing(optional_yield y)
+{
+ /* Read in the data from the POST form. */
+ int ret = get_params(y);
+ if (ret < 0) {
+ return ret;
+ }
+
+ ret = verify_params();
+ if (ret < 0) {
+ return ret;
+ }
+
+ return RGWOp::init_processing(y);
+}
+
int RGWPostObj::verify_permission(optional_yield y)
{
+ // add server-side encryption headers
+ rgw_iam_add_crypt_attrs(s->env, s->info.crypt_attribute_map);
+
+ ldpp_dout(this, 20) << "user policy count=" << s->iam_identity_policies.size() << dendl;
+
+ if (!verify_bucket_permission(this, s, ARN(s->object->get_obj()),
+ rgw::IAM::s3PutObject)) {
+ return -EACCES;
+ }
+
return 0;
}
@@ -4570,82 +4586,6 @@ void RGWPostObj::execute(optional_yield y)
CompressorRef plugin;
char supplied_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1];
- /* Read in the data from the POST form. */
- op_ret = get_params(y);
- if (op_ret < 0) {
- return;
- }
-
- op_ret = verify_params();
- if (op_ret < 0) {
- return;
- }
-
- // add server-side encryption headers
- rgw_iam_add_crypt_attrs(s->env, s->info.crypt_attribute_map);
-
- if (s->iam_policy || ! s->iam_user_policies.empty() || !s->session_policies.empty()) {
- auto identity_policy_res = eval_identity_or_session_policies(this, s->iam_user_policies, s->env,
- rgw::IAM::s3PutObject,
- s->object->get_obj());
- if (identity_policy_res == Effect::Deny) {
- op_ret = -EACCES;
- return;
- }
-
- rgw::IAM::Effect e = Effect::Pass;
- rgw::IAM::PolicyPrincipal princ_type = rgw::IAM::PolicyPrincipal::Other;
- if (s->iam_policy) {
- ARN obj_arn(s->object->get_obj());
- e = s->iam_policy->eval(s->env, *s->auth.identity,
- rgw::IAM::s3PutObject,
- obj_arn,
- princ_type);
- }
- if (e == Effect::Deny) {
- op_ret = -EACCES;
- return;
- }
-
- if (!s->session_policies.empty()) {
- auto session_policy_res = eval_identity_or_session_policies(this, s->session_policies, s->env,
- rgw::IAM::s3PutObject,
- s->object->get_obj());
- if (session_policy_res == Effect::Deny) {
- op_ret = -EACCES;
- return;
- }
- if (princ_type == rgw::IAM::PolicyPrincipal::Role) {
- //Intersection of session policy and identity policy plus intersection of session policy and bucket policy
- if ((session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) ||
- (session_policy_res == Effect::Allow && e == Effect::Allow)) {
- op_ret = 0;
- return;
- }
- } else if (princ_type == rgw::IAM::PolicyPrincipal::Session) {
- //Intersection of session policy and identity policy plus bucket policy
- if ((session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) || e == Effect::Allow) {
- op_ret = 0;
- return;
- }
- } else if (princ_type == rgw::IAM::PolicyPrincipal::Other) {// there was no match in the bucket policy
- if (session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) {
- op_ret = 0;
- return;
- }
- }
- op_ret = -EACCES;
- return;
- }
- if (identity_policy_res == Effect::Pass && e == Effect::Pass && !verify_bucket_permission_no_policy(this, s, RGW_PERM_WRITE)) {
- op_ret = -EACCES;
- return;
- }
- } else if (!verify_bucket_permission_no_policy(this, s, RGW_PERM_WRITE)) {
- op_ret = -EACCES;
- return;
- }
-
// make reservation for notification if needed
std::unique_ptr<rgw::sal::Notification> res
= driver->get_notification(s->object.get(), s->src_object.get(), s, rgw::notify::ObjectCreatedPost, y);
@@ -4692,7 +4632,7 @@ void RGWPostObj::execute(optional_yield y)
std::unique_ptr<rgw::sal::Writer> processor;
processor = driver->get_atomic_writer(this, s->yield, obj.get(),
- s->bucket_owner.id,
+ s->owner,
&s->dest_placement, 0, s->req_id);
op_ret = processor->prepare(s->yield);
if (op_ret < 0) {
@@ -5154,93 +5094,33 @@ int RGWDeleteObj::handle_slo_manifest(bufferlist& bl, optional_yield y)
return 0;
}
-int RGWDeleteObj::verify_permission(optional_yield y)
+int RGWDeleteObj::init_processing(optional_yield y)
{
- int op_ret = get_params(y);
- if (op_ret) {
- return op_ret;
+ int ret = get_params(y);
+ if (ret) {
+ return ret;
}
+ return RGWOp::init_processing(y);
+}
+int RGWDeleteObj::verify_permission(optional_yield y)
+{
auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s);
- if (has_s3_existing_tag || has_s3_resource_tag)
- rgw_iam_add_objtags(this, s, has_s3_existing_tag, has_s3_resource_tag);
-
- if (s->iam_policy || ! s->iam_user_policies.empty() || ! s->session_policies.empty()) {
- if (s->bucket->get_info().obj_lock_enabled() && bypass_governance_mode) {
- auto r = eval_identity_or_session_policies(this, s->iam_user_policies, s->env,
- rgw::IAM::s3BypassGovernanceRetention, ARN(s->bucket->get_key(), s->object->get_name()));
- if (r == Effect::Deny) {
- bypass_perm = false;
- } else if (r == Effect::Pass && s->iam_policy) {
- ARN obj_arn(ARN(s->bucket->get_key(), s->object->get_name()));
- r = s->iam_policy->eval(s->env, *s->auth.identity, rgw::IAM::s3BypassGovernanceRetention, obj_arn);
- if (r == Effect::Deny) {
- bypass_perm = false;
- }
- } else if (r == Effect::Pass && !s->session_policies.empty()) {
- r = eval_identity_or_session_policies(this, s->session_policies, s->env,
- rgw::IAM::s3BypassGovernanceRetention, ARN(s->bucket->get_key(), s->object->get_name()));
- if (r == Effect::Deny) {
- bypass_perm = false;
- }
- }
- }
- auto identity_policy_res = eval_identity_or_session_policies(this, s->iam_user_policies, s->env,
- s->object->get_instance().empty() ?
- rgw::IAM::s3DeleteObject :
- rgw::IAM::s3DeleteObjectVersion,
- ARN(s->bucket->get_key(), s->object->get_name()));
- if (identity_policy_res == Effect::Deny) {
- return -EACCES;
- }
+ if (has_s3_existing_tag || has_s3_resource_tag)
+ rgw_iam_add_objtags(this, s, has_s3_existing_tag, has_s3_resource_tag);
- rgw::IAM::Effect r = Effect::Pass;
- rgw::IAM::PolicyPrincipal princ_type = rgw::IAM::PolicyPrincipal::Other;
- ARN obj_arn(ARN(s->bucket->get_key(), s->object->get_name()));
- if (s->iam_policy) {
- r = s->iam_policy->eval(s->env, *s->auth.identity,
- s->object->get_instance().empty() ?
- rgw::IAM::s3DeleteObject :
- rgw::IAM::s3DeleteObjectVersion,
- obj_arn,
- princ_type);
- }
- if (r == Effect::Deny)
- return -EACCES;
+ const auto arn = ARN{s->object->get_obj()};
+ const auto action = s->object->get_instance().empty() ?
+ rgw::IAM::s3DeleteObject :
+ rgw::IAM::s3DeleteObjectVersion;
- if (!s->session_policies.empty()) {
- auto session_policy_res = eval_identity_or_session_policies(this, s->session_policies, s->env,
- s->object->get_instance().empty() ?
- rgw::IAM::s3DeleteObject :
- rgw::IAM::s3DeleteObjectVersion,
- obj_arn);
- if (session_policy_res == Effect::Deny) {
- return -EACCES;
- }
- if (princ_type == rgw::IAM::PolicyPrincipal::Role) {
- //Intersection of session policy and identity policy plus intersection of session policy and bucket policy
- if ((session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) ||
- (session_policy_res == Effect::Allow && r == Effect::Allow)) {
- return 0;
- }
- } else if (princ_type == rgw::IAM::PolicyPrincipal::Session) {
- //Intersection of session policy and identity policy plus bucket policy
- if ((session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) || r == Effect::Allow) {
- return 0;
- }
- } else if (princ_type == rgw::IAM::PolicyPrincipal::Other) {// there was no match in the bucket policy
- if (session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) {
- return 0;
- }
- }
- return -EACCES;
- }
- if (r == Effect::Allow || identity_policy_res == Effect::Allow)
- return 0;
+ if (!verify_bucket_permission(this, s, arn, action)) {
+ return -EACCES;
}
- if (!verify_bucket_permission_no_policy(this, s, RGW_PERM_WRITE)) {
- return -EACCES;
+ if (s->bucket->get_info().obj_lock_enabled() && bypass_governance_mode) {
+ // require s3BypassGovernanceRetention for x-amz-bypass-governance-retention
+ bypass_perm = verify_bucket_permission(this, s, arn, rgw::IAM::s3BypassGovernanceRetention);
}
if (s->bucket->get_info().mfa_enabled() &&
@@ -5343,7 +5223,8 @@ void RGWDeleteObj::execute(optional_yield y)
s->object->set_atomic();
bool ver_restored = false;
- op_ret = s->object->swift_versioning_restore(ver_restored, this, y);
+ op_ret = s->object->swift_versioning_restore(s->owner, s->user->get_id(),
+ ver_restored, this, y);
if (op_ret < 0) {
return;
}
@@ -5361,7 +5242,7 @@ void RGWDeleteObj::execute(optional_yield y)
std::unique_ptr<rgw::sal::Object::DeleteOp> del_op = s->object->get_delete_op();
del_op->params.obj_owner = s->owner;
- del_op->params.bucket_owner = s->bucket_owner;
+ del_op->params.bucket_owner = s->bucket_owner.id;
del_op->params.versioning_status = s->bucket->get_info().versioning_status();
del_op->params.unmod_since = unmod_since;
del_op->params.high_precision_time = s->system_request;
@@ -5514,76 +5395,31 @@ int RGWCopyObj::verify_permission(optional_yield y)
}
}
- /* admin request overrides permission checks */
- if (!s->auth.identity->is_admin_of(src_acl.get_owner().id)) {
- if (src_policy || ! s->iam_user_policies.empty() || !s->session_policies.empty()) {
- auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, src_policy, s->iam_user_policies, s->session_policies);
- if (has_s3_existing_tag || has_s3_resource_tag)
- rgw_iam_add_objtags(this, s, s->src_object.get(), has_s3_existing_tag, has_s3_resource_tag);
-
- ARN obj_arn(s->src_object->get_obj());
- auto identity_policy_res = eval_identity_or_session_policies(this, s->iam_user_policies, s->env,
- s->src_object->get_instance().empty() ?
- rgw::IAM::s3GetObject :
- rgw::IAM::s3GetObjectVersion,
- obj_arn);
- if (identity_policy_res == Effect::Deny) {
- return -EACCES;
- }
- auto e = Effect::Pass;
- rgw::IAM::PolicyPrincipal princ_type = rgw::IAM::PolicyPrincipal::Other;
- if (src_policy) {
- e = src_policy->eval(s->env, *s->auth.identity,
- s->src_object->get_instance().empty() ?
- rgw::IAM::s3GetObject :
- rgw::IAM::s3GetObjectVersion,
- obj_arn,
- princ_type);
- }
- if (e == Effect::Deny) {
- return -EACCES;
- }
- if (!s->session_policies.empty()) {
- auto session_policy_res = eval_identity_or_session_policies(this, s->session_policies, s->env,
- s->src_object->get_instance().empty() ?
- rgw::IAM::s3GetObject :
- rgw::IAM::s3GetObjectVersion,
- obj_arn);
- if (session_policy_res == Effect::Deny) {
- return -EACCES;
- }
- if (princ_type == rgw::IAM::PolicyPrincipal::Role) {
- //Intersection of session policy and identity policy plus intersection of session policy and bucket policy
- if ((session_policy_res != Effect::Allow || identity_policy_res != Effect::Allow) &&
- (session_policy_res != Effect::Allow || e != Effect::Allow)) {
- return -EACCES;
- }
- } else if (princ_type == rgw::IAM::PolicyPrincipal::Session) {
- //Intersection of session policy and identity policy plus bucket policy
- if ((session_policy_res != Effect::Allow || identity_policy_res != Effect::Allow) && e != Effect::Allow) {
- return -EACCES;
- }
- } else if (princ_type == rgw::IAM::PolicyPrincipal::Other) {// there was no match in the bucket policy
- if (session_policy_res != Effect::Allow || identity_policy_res != Effect::Allow) {
- return -EACCES;
- }
- }
- }
- if (identity_policy_res == Effect::Pass && e == Effect::Pass &&
- !src_acl.verify_permission(this, *s->auth.identity, s->perm_mask,
- RGW_PERM_READ)) {
- return -EACCES;
- }
- //remove src object tags as it may interfere with policy evaluation of destination obj
- if (has_s3_existing_tag || has_s3_resource_tag)
- rgw_iam_remove_objtags(this, s, s->src_object.get(), has_s3_existing_tag, has_s3_resource_tag);
-
- } else if (!src_acl.verify_permission(this, *s->auth.identity,
- s->perm_mask,
- RGW_PERM_READ)) {
- return -EACCES;
- }
+ RGWAccessControlPolicy src_bucket_acl;
+ op_ret = rgw_op_get_bucket_policy_from_attr(this, s->cct, driver,
+ src_bucket->get_owner(),
+ src_bucket->get_attrs(),
+ src_bucket_acl, y);
+ if (op_ret < 0) {
+ return op_ret;
+ }
+
+ auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, src_policy, s->iam_identity_policies, s->session_policies);
+ if (has_s3_existing_tag || has_s3_resource_tag)
+ rgw_iam_add_objtags(this, s, s->src_object.get(), has_s3_existing_tag, has_s3_resource_tag);
+
+ const auto action = s->src_object->get_instance().empty() ?
+ rgw::IAM::s3GetObject :
+ rgw::IAM::s3GetObjectVersion;
+
+ if (!verify_bucket_permission(this, s, ARN(s->src_object->get_obj()),
+ s->user_acl, src_bucket_acl,
+ src_policy, s->iam_identity_policies,
+ s->session_policies, action)) {
+ return -EACCES;
}
+
+ rgw_iam_remove_objtags(this, s, s->src_object.get(), has_s3_existing_tag, has_s3_resource_tag);
}
RGWAccessControlPolicy dest_bucket_policy;
@@ -5597,74 +5433,21 @@ int RGWCopyObj::verify_permission(optional_yield y)
if (op_ret < 0) {
return op_ret;
}
- auto dest_iam_policy = get_iam_policy_from_attr(s->cct, s->bucket->get_attrs(), s->bucket->get_tenant());
- /* admin request overrides permission checks */
- if (! s->auth.identity->is_admin_of(dest_policy.get_owner().id)){
- if (dest_iam_policy != boost::none || ! s->iam_user_policies.empty() || !s->session_policies.empty()) {
- //Add destination bucket tags for authorization
- auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, dest_iam_policy, s->iam_user_policies, s->session_policies);
- if (has_s3_resource_tag)
- rgw_iam_add_buckettags(this, s, s->bucket.get());
-
- rgw_add_to_iam_environment(s->env, "s3:x-amz-copy-source", copy_source);
- if (md_directive)
- rgw_add_to_iam_environment(s->env, "s3:x-amz-metadata-directive",
- *md_directive);
-
- ARN obj_arn(s->object->get_obj());
- auto identity_policy_res = eval_identity_or_session_policies(this, s->iam_user_policies,
- s->env,
- rgw::IAM::s3PutObject,
- obj_arn);
- if (identity_policy_res == Effect::Deny) {
- return -EACCES;
- }
- auto e = Effect::Pass;
- rgw::IAM::PolicyPrincipal princ_type = rgw::IAM::PolicyPrincipal::Other;
- if (dest_iam_policy) {
- e = dest_iam_policy->eval(s->env, *s->auth.identity,
- rgw::IAM::s3PutObject,
- obj_arn,
- princ_type);
- }
- if (e == Effect::Deny) {
- return -EACCES;
- }
- if (!s->session_policies.empty()) {
- auto session_policy_res = eval_identity_or_session_policies(this, s->session_policies, s->env,
- rgw::IAM::s3PutObject, obj_arn);
- if (session_policy_res == Effect::Deny) {
- return false;
- }
- if (princ_type == rgw::IAM::PolicyPrincipal::Role) {
- //Intersection of session policy and identity policy plus intersection of session policy and bucket policy
- if ((session_policy_res != Effect::Allow || identity_policy_res != Effect::Allow) &&
- (session_policy_res != Effect::Allow || e == Effect::Allow)) {
- return -EACCES;
- }
- } else if (princ_type == rgw::IAM::PolicyPrincipal::Session) {
- //Intersection of session policy and identity policy plus bucket policy
- if ((session_policy_res != Effect::Allow || identity_policy_res != Effect::Allow) && e != Effect::Allow) {
- return -EACCES;
- }
- } else if (princ_type == rgw::IAM::PolicyPrincipal::Other) {// there was no match in the bucket policy
- if (session_policy_res != Effect::Allow || identity_policy_res != Effect::Allow) {
- return -EACCES;
- }
- }
- }
- if (identity_policy_res == Effect::Pass && e == Effect::Pass &&
- ! dest_bucket_policy.verify_permission(this,
- *s->auth.identity,
- s->perm_mask,
- RGW_PERM_WRITE)){
- return -EACCES;
- }
- } else if (! dest_bucket_policy.verify_permission(this, *s->auth.identity, s->perm_mask,
- RGW_PERM_WRITE)) {
- return -EACCES;
- }
+ auto dest_iam_policy = get_iam_policy_from_attr(s->cct, s->bucket->get_attrs());
+
+ //Add destination bucket tags for authorization
+ auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, dest_iam_policy, s->iam_identity_policies, s->session_policies);
+ if (has_s3_resource_tag)
+ rgw_iam_add_buckettags(this, s, s->bucket.get());
+
+ rgw_add_to_iam_environment(s->env, "s3:x-amz-copy-source", copy_source);
+ if (md_directive)
+ rgw_add_to_iam_environment(s->env, "s3:x-amz-metadata-directive",
+ *md_directive);
+ if (!verify_bucket_permission(this, s, ARN(s->object->get_obj()),
+ rgw::IAM::s3PutObject)) {
+ return -EACCES;
}
op_ret = init_dest_policy();
@@ -5819,12 +5602,14 @@ void RGWCopyObj::execute(optional_yield y)
/* Handle object versioning of Swift API. In case of copying to remote this
* should fail gently (op_ret == 0) as the dst_obj will not exist here. */
- op_ret = s->object->swift_versioning_copy(this, s->yield);
+ op_ret = s->object->swift_versioning_copy(s->owner, s->user->get_id(),
+ this, s->yield);
if (op_ret < 0) {
return;
}
- op_ret = s->src_object->copy_object(s->user.get(),
+ op_ret = s->src_object->copy_object(s->owner,
+ s->user->get_id(),
&s->info,
source_zone,
s->object.get(),
@@ -6032,7 +5817,7 @@ void RGWPutACLs::execute(optional_yield y)
if (op_ret < 0)
return;
- if (!existing_owner.id.empty() &&
+ if (!existing_owner.empty() &&
existing_owner.id != new_policy.get_owner().id) {
s->err.message = "Cannot modify ACL Owner";
op_ret = -EPERM;
@@ -6060,7 +5845,7 @@ void RGWPutACLs::execute(optional_yield y)
// forward bucket acl requests to meta master zone
if ((rgw::sal::Object::empty(s->object.get()))) {
- op_ret = rgw_forward_request_to_master(this, *s->penv.site, s->user->get_id(),
+ op_ret = rgw_forward_request_to_master(this, *s->penv.site, s->owner.id,
&data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
@@ -6186,7 +5971,7 @@ void RGWPutLC::execute(optional_yield y)
ldpp_dout(this, 15) << "New LifecycleConfiguration:" << ss.str() << dendl;
}
- op_ret = rgw_forward_request_to_master(this, *s->penv.site, s->user->get_id(),
+ op_ret = rgw_forward_request_to_master(this, *s->penv.site, s->owner.id,
&data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
@@ -6202,7 +5987,7 @@ void RGWPutLC::execute(optional_yield y)
void RGWDeleteLC::execute(optional_yield y)
{
- op_ret = rgw_forward_request_to_master(this, *s->penv.site, s->user->get_id(),
+ op_ret = rgw_forward_request_to_master(this, *s->penv.site, s->owner.id,
nullptr, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
@@ -6222,7 +6007,11 @@ int RGWGetCORS::verify_permission(optional_yield y)
if (has_s3_resource_tag)
rgw_iam_add_buckettags(this, s);
- return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketCORS);
+ if (!verify_bucket_permission(this, s, rgw::IAM::s3GetBucketCORS)) {
+ return -EACCES;
+ }
+
+ return 0;
}
void RGWGetCORS::execute(optional_yield y)
@@ -6244,7 +6033,11 @@ int RGWPutCORS::verify_permission(optional_yield y)
if (has_s3_resource_tag)
rgw_iam_add_buckettags(this, s);
- return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketCORS);
+ if (!verify_bucket_permission(this, s, rgw::IAM::s3PutBucketCORS)) {
+ return -EACCES;
+ }
+
+ return 0;
}
void RGWPutCORS::execute(optional_yield y)
@@ -6255,7 +6048,7 @@ void RGWPutCORS::execute(optional_yield y)
if (op_ret < 0)
return;
- op_ret = rgw_forward_request_to_master(this, *s->penv.site, s->user->get_id(),
+ op_ret = rgw_forward_request_to_master(this, *s->penv.site, s->owner.id,
&in_data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
@@ -6276,12 +6069,16 @@ int RGWDeleteCORS::verify_permission(optional_yield y)
rgw_iam_add_buckettags(this, s);
// No separate delete permission
- return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketCORS);
+ if (!verify_bucket_permission(this, s, rgw::IAM::s3PutBucketCORS)) {
+ return -EACCES;
+ }
+
+ return 0;
}
void RGWDeleteCORS::execute(optional_yield y)
{
- op_ret = rgw_forward_request_to_master(this, *s->penv.site, s->user->get_id(),
+ op_ret = rgw_forward_request_to_master(this, *s->penv.site, s->owner.id,
nullptr, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
@@ -6370,7 +6167,11 @@ int RGWGetRequestPayment::verify_permission(optional_yield y)
if (has_s3_resource_tag)
rgw_iam_add_buckettags(this, s);
- return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketRequestPayment);
+ if (!verify_bucket_permission(this, s, rgw::IAM::s3GetBucketRequestPayment)) {
+ return -EACCES;
+ }
+
+ return 0;
}
void RGWGetRequestPayment::pre_exec()
@@ -6389,7 +6190,11 @@ int RGWSetRequestPayment::verify_permission(optional_yield y)
if (has_s3_resource_tag)
rgw_iam_add_buckettags(this, s);
- return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketRequestPayment);
+ if (!verify_bucket_permission(this, s, rgw::IAM::s3PutBucketRequestPayment)) {
+ return -EACCES;
+ }
+
+ return 0;
}
void RGWSetRequestPayment::pre_exec()
@@ -6404,7 +6209,7 @@ void RGWSetRequestPayment::execute(optional_yield y)
if (op_ret < 0)
return;
- op_ret = rgw_forward_request_to_master(this, *s->penv.site, s->user->get_id(),
+ op_ret = rgw_forward_request_to_master(this, *s->penv.site, s->owner.id,
&in_data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
@@ -6430,58 +6235,7 @@ int RGWInitMultipart::verify_permission(optional_yield y)
// add server-side encryption headers
rgw_iam_add_crypt_attrs(s->env, s->info.crypt_attribute_map);
- if (s->iam_policy || ! s->iam_user_policies.empty() || !s->session_policies.empty()) {
- auto identity_policy_res = eval_identity_or_session_policies(this, s->iam_user_policies, s->env,
- rgw::IAM::s3PutObject,
- s->object->get_obj());
- if (identity_policy_res == Effect::Deny) {
- return -EACCES;
- }
-
- rgw::IAM::Effect e = Effect::Pass;
- rgw::IAM::PolicyPrincipal princ_type = rgw::IAM::PolicyPrincipal::Other;
- ARN obj_arn(s->object->get_obj());
- if (s->iam_policy) {
- e = s->iam_policy->eval(s->env, *s->auth.identity,
- rgw::IAM::s3PutObject,
- obj_arn,
- princ_type);
- }
- if (e == Effect::Deny) {
- return -EACCES;
- }
-
- if (!s->session_policies.empty()) {
- auto session_policy_res = eval_identity_or_session_policies(this, s->session_policies, s->env,
- rgw::IAM::s3PutObject,
- s->object->get_obj());
- if (session_policy_res == Effect::Deny) {
- return -EACCES;
- }
- if (princ_type == rgw::IAM::PolicyPrincipal::Role) {
- //Intersection of session policy and identity policy plus intersection of session policy and bucket policy
- if ((session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) ||
- (session_policy_res == Effect::Allow && e == Effect::Allow)) {
- return 0;
- }
- } else if (princ_type == rgw::IAM::PolicyPrincipal::Session) {
- //Intersection of session policy and identity policy plus bucket policy
- if ((session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) || e == Effect::Allow) {
- return 0;
- }
- } else if (princ_type == rgw::IAM::PolicyPrincipal::Other) {// there was no match in the bucket policy
- if (session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) {
- return 0;
- }
- }
- return -EACCES;
- }
- if (e == Effect::Allow || identity_policy_res == Effect::Allow) {
- return 0;
- }
- }
-
- if (!verify_bucket_permission_no_policy(this, s, RGW_PERM_WRITE)) {
+ if (!verify_bucket_permission(this, s, rgw::IAM::s3PutObject)) {
return -EACCES;
}
@@ -6551,58 +6305,8 @@ int RGWCompleteMultipart::verify_permission(optional_yield y)
// add server-side encryption headers
rgw_iam_add_crypt_attrs(s->env, s->info.crypt_attribute_map);
- if (s->iam_policy || ! s->iam_user_policies.empty() || ! s->session_policies.empty()) {
- auto identity_policy_res = eval_identity_or_session_policies(this, s->iam_user_policies, s->env,
- rgw::IAM::s3PutObject,
- s->object->get_obj());
- if (identity_policy_res == Effect::Deny) {
- return -EACCES;
- }
-
- rgw::IAM::Effect e = Effect::Pass;
- rgw::IAM::PolicyPrincipal princ_type = rgw::IAM::PolicyPrincipal::Other;
- rgw::ARN obj_arn(s->object->get_obj());
- if (s->iam_policy) {
- e = s->iam_policy->eval(s->env, *s->auth.identity,
- rgw::IAM::s3PutObject,
- obj_arn,
- princ_type);
- }
- if (e == Effect::Deny) {
- return -EACCES;
- }
-
- if (!s->session_policies.empty()) {
- auto session_policy_res = eval_identity_or_session_policies(this, s->session_policies, s->env,
- rgw::IAM::s3PutObject,
- s->object->get_obj());
- if (session_policy_res == Effect::Deny) {
- return -EACCES;
- }
- if (princ_type == rgw::IAM::PolicyPrincipal::Role) {
- //Intersection of session policy and identity policy plus intersection of session policy and bucket policy
- if ((session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) ||
- (session_policy_res == Effect::Allow && e == Effect::Allow)) {
- return 0;
- }
- } else if (princ_type == rgw::IAM::PolicyPrincipal::Session) {
- //Intersection of session policy and identity policy plus bucket policy
- if ((session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) || e == Effect::Allow) {
- return 0;
- }
- } else if (princ_type == rgw::IAM::PolicyPrincipal::Other) {// there was no match in the bucket policy
- if (session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) {
- return 0;
- }
- }
- return -EACCES;
- }
- if (e == Effect::Allow || identity_policy_res == Effect::Allow) {
- return 0;
- }
- }
-
- if (!verify_bucket_permission_no_policy(this, s, RGW_PERM_WRITE)) {
+ if (!verify_bucket_permission(this, s, ARN(s->object->get_obj()),
+ rgw::IAM::s3PutObject)) {
return -EACCES;
}
@@ -6833,58 +6537,8 @@ int RGWAbortMultipart::verify_permission(optional_yield y)
if (has_s3_existing_tag || has_s3_resource_tag)
rgw_iam_add_objtags(this, s, has_s3_existing_tag, has_s3_resource_tag);
- if (s->iam_policy || ! s->iam_user_policies.empty() || !s->session_policies.empty()) {
- auto identity_policy_res = eval_identity_or_session_policies(this, s->iam_user_policies, s->env,
- rgw::IAM::s3AbortMultipartUpload,
- s->object->get_obj());
- if (identity_policy_res == Effect::Deny) {
- return -EACCES;
- }
-
- rgw::IAM::Effect e = Effect::Pass;
- rgw::IAM::PolicyPrincipal princ_type = rgw::IAM::PolicyPrincipal::Other;
- ARN obj_arn(s->object->get_obj());
- if (s->iam_policy) {
- e = s->iam_policy->eval(s->env, *s->auth.identity,
- rgw::IAM::s3AbortMultipartUpload,
- obj_arn, princ_type);
- }
-
- if (e == Effect::Deny) {
- return -EACCES;
- }
-
- if (!s->session_policies.empty()) {
- auto session_policy_res = eval_identity_or_session_policies(this, s->session_policies, s->env,
- rgw::IAM::s3PutObject,
- s->object->get_obj());
- if (session_policy_res == Effect::Deny) {
- return -EACCES;
- }
- if (princ_type == rgw::IAM::PolicyPrincipal::Role) {
- //Intersection of session policy and identity policy plus intersection of session policy and bucket policy
- if ((session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) ||
- (session_policy_res == Effect::Allow && e == Effect::Allow)) {
- return 0;
- }
- } else if (princ_type == rgw::IAM::PolicyPrincipal::Session) {
- //Intersection of session policy and identity policy plus bucket policy
- if ((session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) || e == Effect::Allow) {
- return 0;
- }
- } else if (princ_type == rgw::IAM::PolicyPrincipal::Other) {// there was no match in the bucket policy
- if (session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) {
- return 0;
- }
- }
- return -EACCES;
- }
- if (e == Effect::Allow || identity_policy_res == Effect::Allow) {
- return 0;
- }
- }
-
- if (!verify_bucket_permission_no_policy(this, s, RGW_PERM_WRITE)) {
+ if (!verify_bucket_permission(this, s, ARN(s->object->get_obj()),
+ rgw::IAM::s3AbortMultipartUpload)) {
return -EACCES;
}
@@ -7027,99 +6681,27 @@ void RGWGetHealthCheck::execute(optional_yield y)
}
}
-int RGWDeleteMultiObj::verify_permission(optional_yield y)
+int RGWDeleteMultiObj::init_processing(optional_yield y)
{
- int op_ret = get_params(y);
- if (op_ret) {
- return op_ret;
+ int ret = get_params(y);
+ if (ret) {
+ return ret;
}
- auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s);
- if (has_s3_existing_tag || has_s3_resource_tag)
- rgw_iam_add_objtags(this, s, has_s3_existing_tag, has_s3_resource_tag);
-
- if (s->iam_policy || ! s->iam_user_policies.empty() || ! s->session_policies.empty()) {
- if (s->bucket->get_info().obj_lock_enabled() && bypass_governance_mode) {
- ARN bucket_arn(s->bucket->get_key());
- auto r = eval_identity_or_session_policies(this, s->iam_user_policies, s->env,
- rgw::IAM::s3BypassGovernanceRetention, ARN(s->bucket->get_key()));
- if (r == Effect::Deny) {
- bypass_perm = false;
- } else if (r == Effect::Pass && s->iam_policy) {
- r = s->iam_policy->eval(s->env, *s->auth.identity, rgw::IAM::s3BypassGovernanceRetention,
- bucket_arn);
- if (r == Effect::Deny) {
- bypass_perm = false;
- }
- } else if (r == Effect::Pass && !s->session_policies.empty()) {
- r = eval_identity_or_session_policies(this, s->session_policies, s->env,
- rgw::IAM::s3BypassGovernanceRetention, ARN(s->bucket->get_key()));
- if (r == Effect::Deny) {
- bypass_perm = false;
- }
- }
- }
-
- bool not_versioned = rgw::sal::Object::empty(s->object.get()) || s->object->get_instance().empty();
-
- auto identity_policy_res = eval_identity_or_session_policies(this, s->iam_user_policies, s->env,
- not_versioned ?
- rgw::IAM::s3DeleteObject :
- rgw::IAM::s3DeleteObjectVersion,
- ARN(s->bucket->get_key()));
- if (identity_policy_res == Effect::Deny) {
- return -EACCES;
- }
+ return RGWOp::init_processing(y);
+}
- rgw::IAM::Effect r = Effect::Pass;
- rgw::IAM::PolicyPrincipal princ_type = rgw::IAM::PolicyPrincipal::Other;
- rgw::ARN bucket_arn(s->bucket->get_key());
- if (s->iam_policy) {
- r = s->iam_policy->eval(s->env, *s->auth.identity,
- not_versioned ?
- rgw::IAM::s3DeleteObject :
- rgw::IAM::s3DeleteObjectVersion,
- bucket_arn,
- princ_type);
- }
- if (r == Effect::Deny)
- return -EACCES;
+int RGWDeleteMultiObj::verify_permission(optional_yield y)
+{
+ auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s);
+ if (has_s3_existing_tag || has_s3_resource_tag)
+ rgw_iam_add_objtags(this, s, has_s3_existing_tag, has_s3_resource_tag);
- if (!s->session_policies.empty()) {
- auto session_policy_res = eval_identity_or_session_policies(this, s->session_policies, s->env,
- not_versioned ?
- rgw::IAM::s3DeleteObject :
- rgw::IAM::s3DeleteObjectVersion,
- ARN(s->bucket->get_key()));
- if (session_policy_res == Effect::Deny) {
- return -EACCES;
- }
- if (princ_type == rgw::IAM::PolicyPrincipal::Role) {
- //Intersection of session policy and identity policy plus intersection of session policy and bucket policy
- if ((session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) ||
- (session_policy_res == Effect::Allow && r == Effect::Allow)) {
- return 0;
- }
- } else if (princ_type == rgw::IAM::PolicyPrincipal::Session) {
- //Intersection of session policy and identity policy plus bucket policy
- if ((session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) || r == Effect::Allow) {
- return 0;
- }
- } else if (princ_type == rgw::IAM::PolicyPrincipal::Other) {// there was no match in the bucket policy
- if (session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) {
- return 0;
- }
- }
- return -EACCES;
- }
- if (r == Effect::Allow || identity_policy_res == Effect::Allow)
- return 0;
+ if (s->bucket->get_info().obj_lock_enabled() && bypass_governance_mode) {
+ // require s3BypassGovernanceRetention for x-amz-bypass-governance-retention
+ bypass_perm = verify_bucket_permission(this, s, rgw::IAM::s3BypassGovernanceRetention);
}
- acl_allowed = verify_bucket_permission_no_policy(this, s, RGW_PERM_WRITE);
- if (!acl_allowed)
- return -EACCES;
-
return 0;
}
@@ -7162,73 +6744,22 @@ void RGWDeleteMultiObj::wait_flush(optional_yield y,
void RGWDeleteMultiObj::handle_individual_object(const rgw_obj_key& o, optional_yield y,
boost::asio::deadline_timer *formatter_flush_cond)
{
- std::string version_id;
std::unique_ptr<rgw::sal::Object> obj = bucket->get_object(o);
- if (s->iam_policy || ! s->iam_user_policies.empty() || !s->session_policies.empty()) {
- auto identity_policy_res = eval_identity_or_session_policies(this, s->iam_user_policies, s->env,
- o.instance.empty() ?
- rgw::IAM::s3DeleteObject :
- rgw::IAM::s3DeleteObjectVersion,
- ARN(obj->get_obj()));
- if (identity_policy_res == Effect::Deny) {
- send_partial_response(o, false, "", -EACCES, formatter_flush_cond);
- return;
- }
-
- rgw::IAM::Effect e = Effect::Pass;
- rgw::IAM::PolicyPrincipal princ_type = rgw::IAM::PolicyPrincipal::Other;
- if (s->iam_policy) {
- ARN obj_arn(obj->get_obj());
- e = s->iam_policy->eval(s->env,
- *s->auth.identity,
- o.instance.empty() ?
- rgw::IAM::s3DeleteObject :
- rgw::IAM::s3DeleteObjectVersion,
- obj_arn,
- princ_type);
- }
- if (e == Effect::Deny) {
- send_partial_response(o, false, "", -EACCES, formatter_flush_cond);
- return;
- }
-
- if (!s->session_policies.empty()) {
- auto session_policy_res = eval_identity_or_session_policies(this, s->session_policies, s->env,
- o.instance.empty() ?
- rgw::IAM::s3DeleteObject :
- rgw::IAM::s3DeleteObjectVersion,
- ARN(obj->get_obj()));
- if (session_policy_res == Effect::Deny) {
- send_partial_response(o, false, "", -EACCES, formatter_flush_cond);
- return;
- }
- if (princ_type == rgw::IAM::PolicyPrincipal::Role) {
- //Intersection of session policy and identity policy plus intersection of session policy and bucket policy
- if ((session_policy_res != Effect::Allow || identity_policy_res != Effect::Allow) &&
- (session_policy_res != Effect::Allow || e != Effect::Allow)) {
- send_partial_response(o, false, "", -EACCES, formatter_flush_cond);
- return;
- }
- } else if (princ_type == rgw::IAM::PolicyPrincipal::Session) {
- //Intersection of session policy and identity policy plus bucket policy
- if ((session_policy_res != Effect::Allow || identity_policy_res != Effect::Allow) && e != Effect::Allow) {
- send_partial_response(o, false, "", -EACCES, formatter_flush_cond);
- return;
- }
- } else if (princ_type == rgw::IAM::PolicyPrincipal::Other) {// there was no match in the bucket policy
- if (session_policy_res != Effect::Allow || identity_policy_res != Effect::Allow) {
- send_partial_response(o, false, "", -EACCES, formatter_flush_cond);
- return;
- }
- }
- send_partial_response(o, false, "", -EACCES, formatter_flush_cond);
- return;
- }
+ if (o.empty()) {
+ send_partial_response(o, false, "", -EINVAL, formatter_flush_cond);
+ return;
+ }
- if ((identity_policy_res == Effect::Pass && e == Effect::Pass && !acl_allowed)) {
- send_partial_response(o, false, "", -EACCES, formatter_flush_cond);
- return;
- }
+ // verify object delete permission
+ const auto action = o.instance.empty() ?
+ rgw::IAM::s3DeleteObject :
+ rgw::IAM::s3DeleteObjectVersion;
+ if (!verify_bucket_permission(this, s, ARN(obj->get_obj()), s->user_acl,
+ s->bucket_acl, s->iam_policy,
+ s->iam_identity_policies,
+ s->session_policies, action)) {
+ send_partial_response(o, false, "", -EACCES, formatter_flush_cond);
+ return;
}
uint64_t obj_size = 0;
@@ -7278,10 +6809,11 @@ void RGWDeleteMultiObj::handle_individual_object(const rgw_obj_key& o, optional_
obj->set_atomic();
+ std::string version_id; // empty
std::unique_ptr<rgw::sal::Object::DeleteOp> del_op = obj->get_delete_op();
del_op->params.versioning_status = obj->get_bucket()->get_info().versioning_status();
del_op->params.obj_owner = s->owner;
- del_op->params.bucket_owner = s->bucket_owner;
+ del_op->params.bucket_owner = s->bucket_owner.id;
del_op->params.marker_version_id = version_id;
op_ret = del_op->delete_obj(this, y, rgw::sal::FLAG_LOG_OP);
@@ -7418,14 +6950,14 @@ bool RGWBulkDelete::Deleter::verify_permission(RGWBucketInfo& binfo,
return false;
}
- auto policy = get_iam_policy_from_attr(s->cct, battrs, binfo.bucket.tenant);
+ auto policy = get_iam_policy_from_attr(s->cct, battrs);
bucket_owner = bacl.get_owner();
/* We can use global user_acl because each BulkDelete request is allowed
* to work on entities from a single account only. */
return verify_bucket_permission(dpp, s, binfo.bucket, s->user_acl,
- bacl, policy, s->iam_user_policies, s->session_policies, rgw::IAM::s3DeleteBucket);
+ bacl, policy, s->iam_identity_policies, s->session_policies, rgw::IAM::s3DeleteBucket);
}
bool RGWBulkDelete::Deleter::delete_single(const acct_path_t& path, optional_yield y)
@@ -7434,7 +6966,7 @@ bool RGWBulkDelete::Deleter::delete_single(const acct_path_t& path, optional_yie
ACLOwner bowner;
RGWObjVersionTracker ot;
- int ret = driver->load_bucket(dpp, rgw_bucket(s->user->get_tenant(),
+ int ret = driver->load_bucket(dpp, rgw_bucket(s->auth.identity->get_tenant(),
path.bucket_name),
&bucket, y);
if (ret < 0) {
@@ -7456,7 +6988,7 @@ bool RGWBulkDelete::Deleter::delete_single(const acct_path_t& path, optional_yie
std::unique_ptr<rgw::sal::Object::DeleteOp> del_op = obj->get_delete_op();
del_op->params.versioning_status = obj->get_bucket()->get_info().versioning_status();
del_op->params.obj_owner = bowner;
- del_op->params.bucket_owner = bucket_owner;
+ del_op->params.bucket_owner = bucket_owner.id;
ret = del_op->delete_obj(dpp, y, rgw::sal::FLAG_LOG_OP);
if (ret < 0) {
@@ -7468,7 +7000,7 @@ bool RGWBulkDelete::Deleter::delete_single(const acct_path_t& path, optional_yie
req_info req = s->info;
forward_req_info(dpp, s->cct, req, path.bucket_name);
- ret = rgw_forward_request_to_master(dpp, *s->penv.site, s->user->get_id(),
+ ret = rgw_forward_request_to_master(dpp, *s->penv.site, s->owner.id,
nullptr, nullptr, req, y);
if (ret < 0) {
goto delop_fail;
@@ -7576,9 +7108,9 @@ int RGWBulkUploadOp::verify_permission(optional_yield y)
return -EACCES;
}
- if (s->user->get_tenant() != s->bucket_tenant) {
+ if (s->auth.identity->get_tenant() != s->bucket_tenant) {
ldpp_dout(this, 10) << "user cannot create a bucket in a different tenant"
- << " (user_id.tenant=" << s->user->get_tenant()
+ << " (authorized user tenant=" << s->auth.identity->get_tenant()
<< " requested=" << s->bucket_tenant << ")" << dendl;
return -EACCES;
}
@@ -7646,7 +7178,7 @@ RGWBulkUploadOp::handle_upload_path(req_state *s)
int RGWBulkUploadOp::handle_dir_verify_permission(optional_yield y)
{
- return check_user_max_buckets(this, *s->user, y);
+ return check_owner_max_buckets(this, driver, s, y);
}
static void forward_req_info(const DoutPrefixProvider *dpp, CephContext *cct, req_info& info, const std::string& bucket_name)
@@ -7711,7 +7243,7 @@ int RGWBulkUploadOp::handle_dir(const std::string_view path, optional_yield y)
{
// create a default acl
RGWAccessControlPolicy policy;
- policy.create_default(s->user->get_id(), s->user->get_display_name());
+ policy.create_default(s->owner.id, s->owner.display_name);
ceph::bufferlist aclbl;
policy.encode(aclbl);
createparams.attrs[RGW_ATTR_ACL] = std::move(aclbl);
@@ -7724,7 +7256,7 @@ int RGWBulkUploadOp::handle_dir(const std::string_view path, optional_yield y)
req_info req = s->info;
forward_req_info(this, s->cct, req, bucket_name);
- ret = rgw_forward_request_to_master(this, *s->penv.site, s->user->get_id(),
+ ret = rgw_forward_request_to_master(this, *s->penv.site, s->owner.id,
&in_data, &jp, req, y);
if (ret < 0) {
return ret;
@@ -7758,55 +7290,11 @@ bool RGWBulkUploadOp::handle_file_verify_permission(RGWBucketInfo& binfo,
return false;
}
- auto policy = get_iam_policy_from_attr(s->cct, battrs, binfo.bucket.tenant);
+ auto policy = get_iam_policy_from_attr(s->cct, battrs);
- bucket_owner = bacl.get_owner();
- if (policy || ! s->iam_user_policies.empty() || !s->session_policies.empty()) {
- auto identity_policy_res = eval_identity_or_session_policies(this, s->iam_user_policies, s->env,
- rgw::IAM::s3PutObject, obj);
- if (identity_policy_res == Effect::Deny) {
- return false;
- }
-
- rgw::IAM::PolicyPrincipal princ_type = rgw::IAM::PolicyPrincipal::Other;
- ARN obj_arn(obj);
- auto e = policy->eval(s->env, *s->auth.identity,
- rgw::IAM::s3PutObject, obj_arn, princ_type);
- if (e == Effect::Deny) {
- return false;
- }
-
- if (!s->session_policies.empty()) {
- auto session_policy_res = eval_identity_or_session_policies(this, s->session_policies, s->env,
- rgw::IAM::s3PutObject, obj);
- if (session_policy_res == Effect::Deny) {
- return false;
- }
- if (princ_type == rgw::IAM::PolicyPrincipal::Role) {
- //Intersection of session policy and identity policy plus intersection of session policy and bucket policy
- if ((session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) ||
- (session_policy_res == Effect::Allow && e == Effect::Allow)) {
- return true;
- }
- } else if (princ_type == rgw::IAM::PolicyPrincipal::Session) {
- //Intersection of session policy and identity policy plus bucket policy
- if ((session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) || e == Effect::Allow) {
- return true;
- }
- } else if (princ_type == rgw::IAM::PolicyPrincipal::Other) {// there was no match in the bucket policy
- if (session_policy_res == Effect::Allow && identity_policy_res == Effect::Allow) {
- return true;
- }
- }
- return false;
- }
- if (e == Effect::Allow || identity_policy_res == Effect::Allow) {
- return true;
- }
- }
-
- return verify_bucket_permission_no_policy(this, s, s->user_acl,
- bacl, RGW_PERM_WRITE);
+ return verify_bucket_permission(this, s, ARN(obj), s->user_acl, bacl, policy,
+ s->iam_identity_policies, s->session_policies,
+ rgw::IAM::s3PutObject);
}
int RGWBulkUploadOp::handle_file(const std::string_view path,
@@ -7828,7 +7316,7 @@ int RGWBulkUploadOp::handle_file(const std::string_view path,
std::unique_ptr<rgw::sal::Bucket> bucket;
ACLOwner bowner;
- op_ret = driver->load_bucket(this, rgw_bucket(s->user->get_tenant(),
+ op_ret = driver->load_bucket(this, rgw_bucket(s->auth.identity->get_tenant(),
bucket_name),
&bucket, y);
if (op_ret < 0) {
@@ -7861,8 +7349,7 @@ int RGWBulkUploadOp::handle_file(const std::string_view path,
dest_placement.inherit_from(bucket->get_placement_rule());
std::unique_ptr<rgw::sal::Writer> processor;
- processor = driver->get_atomic_writer(this, s->yield, obj.get(),
- bowner.id,
+ processor = driver->get_atomic_writer(this, s->yield, obj.get(), bowner,
&s->dest_placement, 0, s->req_id);
op_ret = processor->prepare(s->yield);
if (op_ret < 0) {
@@ -7946,7 +7433,7 @@ int RGWBulkUploadOp::handle_file(const std::string_view path,
/* Create metadata: ACLs. */
RGWAccessControlPolicy policy;
- policy.create_default(s->user->get_id(), s->user->get_display_name());
+ policy.create_default(s->owner.id, s->owner.display_name);
ceph::bufferlist aclbl;
policy.encode(aclbl);
attrs.emplace(RGW_ATTR_ACL, std::move(aclbl));
@@ -8435,7 +7922,7 @@ void RGWPutBucketPolicy::execute(optional_yield y)
return;
}
- op_ret = rgw_forward_request_to_master(this, *s->penv.site, s->user->get_id(),
+ op_ret = rgw_forward_request_to_master(this, *s->penv.site, s->owner.id,
&data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 20) << "forward_request_to_master returned ret=" << op_ret << dendl;
@@ -8444,7 +7931,7 @@ void RGWPutBucketPolicy::execute(optional_yield y)
try {
const Policy p(
- s->cct, s->bucket_tenant, data,
+ s->cct, nullptr, data.to_str(),
s->cct->_conf.get_val<bool>("rgw_policy_reject_invalid_principals"));
rgw::sal::Attrs attrs(s->bucket_attrs);
if (s->bucket_access_conf &&
@@ -8537,7 +8024,7 @@ int RGWDeleteBucketPolicy::verify_permission(optional_yield y)
void RGWDeleteBucketPolicy::execute(optional_yield y)
{
- op_ret = rgw_forward_request_to_master(this, *s->penv.site, s->user->get_id(),
+ op_ret = rgw_forward_request_to_master(this, *s->penv.site, s->owner.id,
nullptr, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
@@ -8563,7 +8050,11 @@ int RGWPutBucketObjectLock::verify_permission(optional_yield y)
if (has_s3_resource_tag)
rgw_iam_add_buckettags(this, s);
- return verify_bucket_owner_or_policy(s, rgw::IAM::s3PutBucketObjectLockConfiguration);
+ if (!verify_bucket_permission(this, s, rgw::IAM::s3PutBucketObjectLockConfiguration)) {
+ return -EACCES;
+ }
+
+ return 0;
}
void RGWPutBucketObjectLock::execute(optional_yield y)
@@ -8604,7 +8095,7 @@ void RGWPutBucketObjectLock::execute(optional_yield y)
return;
}
- op_ret = rgw_forward_request_to_master(this, *s->penv.site, s->user->get_id(),
+ op_ret = rgw_forward_request_to_master(this, *s->penv.site, s->owner.id,
&data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 20) << __func__ << "forward_request_to_master returned ret=" << op_ret << dendl;
@@ -8630,7 +8121,11 @@ int RGWGetBucketObjectLock::verify_permission(optional_yield y)
if (has_s3_resource_tag)
rgw_iam_add_buckettags(this, s);
- return verify_bucket_owner_or_policy(s, rgw::IAM::s3GetBucketObjectLockConfiguration);
+ if (!verify_bucket_permission(this, s, rgw::IAM::s3GetBucketObjectLockConfiguration)) {
+ return -EACCES;
+ }
+
+ return 0;
}
void RGWGetBucketObjectLock::execute(optional_yield y)
@@ -8967,7 +8462,7 @@ void RGWPutBucketPublicAccessBlock::execute(optional_yield y)
return;
}
- op_ret = rgw_forward_request_to_master(this, *s->penv.site, s->user->get_id(),
+ op_ret = rgw_forward_request_to_master(this, *s->penv.site, s->owner.id,
&data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
@@ -9049,7 +8544,7 @@ int RGWDeleteBucketPublicAccessBlock::verify_permission(optional_yield y)
void RGWDeleteBucketPublicAccessBlock::execute(optional_yield y)
{
- op_ret = rgw_forward_request_to_master(this, *s->penv.site, s->user->get_id(),
+ op_ret = rgw_forward_request_to_master(this, *s->penv.site, s->owner.id,
nullptr, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
@@ -9105,7 +8600,7 @@ void RGWPutBucketEncryption::execute(optional_yield y)
return;
}
- op_ret = rgw_forward_request_to_master(this, *s->penv.site, s->user->get_id(),
+ op_ret = rgw_forward_request_to_master(this, *s->penv.site, s->owner.id,
&data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 20) << "forward_request_to_master returned ret=" << op_ret << dendl;
@@ -9160,7 +8655,7 @@ int RGWDeleteBucketEncryption::verify_permission(optional_yield y)
void RGWDeleteBucketEncryption::execute(optional_yield y)
{
- op_ret = rgw_forward_request_to_master(this, *s->penv.site, s->user->get_id(),
+ op_ret = rgw_forward_request_to_master(this, *s->penv.site, s->owner.id,
nullptr, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
diff --git a/src/rgw/rgw_op.h b/src/rgw/rgw_op.h
index fa61aefc563..5f947fd53c5 100644
--- a/src/rgw/rgw_op.h
+++ b/src/rgw/rgw_op.h
@@ -70,7 +70,7 @@ namespace rgw::auth::registry { class StrategyRegistry; }
int rgw_forward_request_to_master(const DoutPrefixProvider* dpp,
const rgw::SiteConfig& site,
- const rgw_user& uid,
+ const rgw_owner& effective_owner,
bufferlist* indata, JSONParser* jp,
req_info& req, optional_yield y);
@@ -1322,6 +1322,7 @@ public:
attrs.emplace(std::move(key), std::move(bl)); /* key and bl are r-value refs */
}
+ int init_processing(optional_yield y) override;
int verify_permission(optional_yield y) override;
void pre_exec() override;
void execute(optional_yield y) override;
@@ -1446,6 +1447,7 @@ public:
bypass_governance_mode(false) {
}
+ int init_processing(optional_yield y) override;
int verify_permission(optional_yield y) override;
void pre_exec() override;
void execute(optional_yield y) override;
@@ -2023,7 +2025,6 @@ protected:
rgw::sal::Bucket* bucket;
bool quiet;
bool status_dumped;
- bool acl_allowed = false;
bool bypass_perm;
bool bypass_governance_mode;
@@ -2035,6 +2036,7 @@ public:
bypass_governance_mode = false;
}
+ int init_processing(optional_yield y) override;
int verify_permission(optional_yield y) override;
void pre_exec() override;
void execute(optional_yield y) override;
@@ -2070,9 +2072,6 @@ extern int rgw_build_object_policies(const DoutPrefixProvider *dpp, rgw::sal::Dr
req_state *s, bool prefetch_data, optional_yield y);
extern void rgw_build_iam_environment(rgw::sal::Driver* driver,
req_state* s);
-extern std::vector<rgw::IAM::Policy> get_iam_user_policy_from_attr(CephContext* cct,
- std::map<std::string, bufferlist>& attrs,
- const std::string& tenant);
inline int get_system_versioning_params(req_state *s,
uint64_t *olh_epoch,
diff --git a/src/rgw/rgw_op_type.h b/src/rgw/rgw_op_type.h
index a7a68d4ade5..fc661b51dbe 100644
--- a/src/rgw/rgw_op_type.h
+++ b/src/rgw/rgw_op_type.h
@@ -52,19 +52,6 @@ enum RGWOpType {
RGW_OP_GET_CROSS_DOMAIN_POLICY,
RGW_OP_GET_HEALTH_CHECK,
RGW_OP_GET_INFO,
- RGW_OP_CREATE_ROLE,
- RGW_OP_DELETE_ROLE,
- RGW_OP_GET_ROLE,
- RGW_OP_MODIFY_ROLE_TRUST_POLICY,
- RGW_OP_LIST_ROLES,
- RGW_OP_PUT_ROLE_POLICY,
- RGW_OP_GET_ROLE_POLICY,
- RGW_OP_LIST_ROLE_POLICIES,
- RGW_OP_DELETE_ROLE_POLICY,
- RGW_OP_TAG_ROLE,
- RGW_OP_LIST_ROLE_TAGS,
- RGW_OP_UNTAG_ROLE,
- RGW_OP_UPDATE_ROLE,
RGW_OP_PUT_BUCKET_POLICY,
RGW_OP_GET_BUCKET_POLICY,
RGW_OP_DELETE_BUCKET_POLICY,
@@ -74,16 +61,60 @@ enum RGWOpType {
RGW_OP_PUT_LC,
RGW_OP_GET_LC,
RGW_OP_DELETE_LC,
- RGW_OP_PUT_USER_POLICY,
- RGW_OP_GET_USER_POLICY,
- RGW_OP_LIST_USER_POLICIES,
- RGW_OP_DELETE_USER_POLICY,
RGW_OP_PUT_BUCKET_OBJ_LOCK,
RGW_OP_GET_BUCKET_OBJ_LOCK,
RGW_OP_PUT_OBJ_RETENTION,
RGW_OP_GET_OBJ_RETENTION,
RGW_OP_PUT_OBJ_LEGAL_HOLD,
RGW_OP_GET_OBJ_LEGAL_HOLD,
+ // IAM
+ RGW_OP_PUT_USER_POLICY,
+ RGW_OP_GET_USER_POLICY,
+ RGW_OP_LIST_USER_POLICIES,
+ RGW_OP_DELETE_USER_POLICY,
+ RGW_OP_ATTACH_USER_POLICY,
+ RGW_OP_DETACH_USER_POLICY,
+ RGW_OP_LIST_ATTACHED_USER_POLICIES,
+ RGW_OP_CREATE_ROLE,
+ RGW_OP_DELETE_ROLE,
+ RGW_OP_GET_ROLE,
+ RGW_OP_MODIFY_ROLE_TRUST_POLICY,
+ RGW_OP_LIST_ROLES,
+ RGW_OP_PUT_ROLE_POLICY,
+ RGW_OP_GET_ROLE_POLICY,
+ RGW_OP_LIST_ROLE_POLICIES,
+ RGW_OP_DELETE_ROLE_POLICY,
+ RGW_OP_ATTACH_ROLE_POLICY,
+ RGW_OP_DETACH_ROLE_POLICY,
+ RGW_OP_LIST_ATTACHED_ROLE_POLICIES,
+ RGW_OP_TAG_ROLE,
+ RGW_OP_LIST_ROLE_TAGS,
+ RGW_OP_UNTAG_ROLE,
+ RGW_OP_UPDATE_ROLE,
+ RGW_OP_CREATE_USER,
+ RGW_OP_GET_USER,
+ RGW_OP_UPDATE_USER,
+ RGW_OP_DELETE_USER,
+ RGW_OP_LIST_USERS,
+ RGW_OP_CREATE_ACCESS_KEY,
+ RGW_OP_UPDATE_ACCESS_KEY,
+ RGW_OP_DELETE_ACCESS_KEY,
+ RGW_OP_LIST_ACCESS_KEYS,
+ RGW_OP_CREATE_GROUP,
+ RGW_OP_GET_GROUP,
+ RGW_OP_UPDATE_GROUP,
+ RGW_OP_DELETE_GROUP,
+ RGW_OP_LIST_GROUPS,
+ RGW_OP_ADD_USER_TO_GROUP,
+ RGW_OP_REMOVE_USER_FROM_GROUP,
+ RGW_OP_LIST_GROUPS_FOR_USER,
+ RGW_OP_PUT_GROUP_POLICY,
+ RGW_OP_GET_GROUP_POLICY,
+ RGW_OP_LIST_GROUP_POLICIES,
+ RGW_OP_DELETE_GROUP_POLICY,
+ RGW_OP_ATTACH_GROUP_POLICY,
+ RGW_OP_DETACH_GROUP_POLICY,
+ RGW_OP_LIST_ATTACHED_GROUP_POLICIES,
/* rgw specific */
RGW_OP_ADMIN_SET_METADATA,
RGW_OP_GET_OBJ_LAYOUT,
diff --git a/src/rgw/rgw_polparser.cc b/src/rgw/rgw_polparser.cc
index f81eda7fe97..217972f27c9 100644
--- a/src/rgw/rgw_polparser.cc
+++ b/src/rgw/rgw_polparser.cc
@@ -6,6 +6,7 @@
#include <exception>
#include <fstream>
#include <iostream>
+#include <optional>
#include <string>
#include <string_view>
@@ -19,14 +20,14 @@
#include "rgw/rgw_iam_policy.h"
// Returns true on success
-bool parse(CephContext* cct, const std::string& tenant,
+bool parse(CephContext* cct, const std::string* tenant,
const std::string& fname, std::istream& in) noexcept
{
bufferlist bl;
bl.append(in);
try {
auto p = rgw::IAM::Policy(
- cct, tenant, bl,
+ cct, tenant, bl.to_str(),
cct->_conf.get_val<bool>("rgw_policy_reject_invalid_principals"));
} catch (const rgw::IAM::PolicyParseException& e) {
std::cerr << fname << ": " << e.what() << std::endl;
@@ -56,7 +57,7 @@ void usage(std::string_view cmdname)
int main(int argc, const char** argv)
{
std::string_view cmdname = argv[0];
- std::string tenant;
+ std::optional<std::string> tenant;
auto args = argv_to_vec(argc, argv);
if (ceph_argparse_need_usage(args)) {
@@ -82,9 +83,10 @@ int main(int argc, const char** argv)
}
bool success = true;
+ const std::string* t = tenant ? &*tenant : nullptr;
if (args.empty()) {
- success = parse(cct.get(), tenant, "(stdin)", std::cin);
+ success = parse(cct.get(), t, "(stdin)", std::cin);
} else {
for (const auto& file : args) {
std::ifstream in;
@@ -93,7 +95,7 @@ int main(int argc, const char** argv)
std::cerr << "Can't read " << file << std::endl;
success = false;
}
- if (!parse(cct.get(), tenant, file, in)) {
+ if (!parse(cct.get(), t, file, in)) {
success = false;
}
}
diff --git a/src/rgw/rgw_process.cc b/src/rgw/rgw_process.cc
index 1765e83d622..10e544b577d 100644
--- a/src/rgw/rgw_process.cc
+++ b/src/rgw/rgw_process.cc
@@ -366,7 +366,13 @@ int process_request(const RGWProcessEnv& penv,
/* FIXME: remove this after switching all handlers to the new authentication
* infrastructure. */
if (nullptr == s->auth.identity) {
- s->auth.identity = rgw::auth::transform_old_authinfo(s);
+ auto result = rgw::auth::transform_old_authinfo(
+ op, yield, driver, s->user.get());
+ if (!result) {
+ abort_early(s, op, result.error(), handler, yield);
+ goto done;
+ }
+ s->auth.identity = std::move(result).value();
}
ldpp_dout(op, 2) << "normalizing buckets and tenants" << dendl;
diff --git a/src/rgw/rgw_pubsub.cc b/src/rgw/rgw_pubsub.cc
index 4a420f004c5..160ecee1768 100644
--- a/src/rgw/rgw_pubsub.cc
+++ b/src/rgw/rgw_pubsub.cc
@@ -2,6 +2,7 @@
// vim: ts=8 sw=2 smarttab ft=cpp
#include "services/svc_zone.h"
+#include "rgw_account.h"
#include "rgw_b64.h"
#include "rgw_sal.h"
#include "rgw_pubsub.h"
@@ -11,7 +12,9 @@
#include "rgw_arn.h"
#include "rgw_pubsub_push.h"
#include "rgw_bucket.h"
+#include "driver/rados/rgw_notify.h"
#include "common/errno.h"
+#include "include/function2.hpp"
#include <regex>
#include <algorithm>
@@ -26,6 +29,16 @@ std::string get_topic_metadata_key(std::string_view tenant,
return string_cat_reserve(tenant, topic_tenant_delim, topic_name);
}
+std::string get_topic_metadata_key(const rgw_pubsub_topic& topic)
+{
+ // use account id or tenant name
+ std::string_view tenant = std::visit(fu2::overload(
+ [] (const rgw_user& u) -> std::string_view { return u.tenant; },
+ [] (const rgw_account_id& a) -> std::string_view { return a; }
+ ), topic.owner);
+ return get_topic_metadata_key(tenant, topic.name);
+}
+
void parse_topic_metadata_key(const std::string& key,
std::string& tenant,
std::string& name)
@@ -367,7 +380,7 @@ void rgw_pubsub_s3_event::dump(Formatter *f) const {
void rgw_pubsub_topic::dump(Formatter *f) const
{
- encode_json("user", user, f);
+ encode_json("owner", owner, f);
encode_json("name", name, f);
encode_json("dest", dest, f);
encode_json("arn", arn, f);
@@ -377,7 +390,7 @@ void rgw_pubsub_topic::dump(Formatter *f) const
void rgw_pubsub_topic::dump_xml(Formatter *f) const
{
- encode_xml("User", user, f);
+ encode_xml("User", to_string(owner), f);
encode_xml("Name", name, f);
encode_xml("EndPoint", dest, f);
encode_xml("TopicArn", arn, f);
@@ -395,9 +408,7 @@ void encode_xml_key_value_entry(const std::string& key, const std::string& value
void rgw_pubsub_topic::dump_xml_as_attributes(Formatter *f) const
{
f->open_array_section("Attributes");
- std::string str_user;
- user.to_str(str_user);
- encode_xml_key_value_entry("User", str_user, f);
+ encode_xml_key_value_entry("User", to_string(owner), f);
encode_xml_key_value_entry("Name", name, f);
encode_xml_key_value_entry("EndPoint", dest.to_json_str(), f);
encode_xml_key_value_entry("TopicArn", arn, f);
@@ -408,7 +419,7 @@ void rgw_pubsub_topic::dump_xml_as_attributes(Formatter *f) const
}
void rgw_pubsub_topic::decode_json(JSONObj* f) {
- JSONDecoder::decode_json("user", user, f);
+ JSONDecoder::decode_json("owner", owner, f);
JSONDecoder::decode_json("name", name, f);
JSONDecoder::decode_json("dest", dest, f);
JSONDecoder::decode_json("arn", arn, f);
@@ -468,6 +479,7 @@ void rgw_pubsub_dest::dump(Formatter *f) const
encode_json("push_endpoint_topic", arn_topic, f);
encode_json("stored_secret", stored_secret, f);
encode_json("persistent", persistent, f);
+ encode_json("persistent_queue", persistent_queue, f);
encode_json("time_to_live", time_to_live!=DEFAULT_GLOBAL_VALUE? std::to_string(time_to_live): DEFAULT_CONFIG, f);
encode_json("max_retries", max_retries!=DEFAULT_GLOBAL_VALUE? std::to_string(max_retries): DEFAULT_CONFIG, f);
encode_json("retry_sleep_duration", retry_sleep_duration!=DEFAULT_GLOBAL_VALUE? std::to_string(retry_sleep_duration): DEFAULT_CONFIG, f);
@@ -515,6 +527,7 @@ void rgw_pubsub_dest::decode_json(JSONObj* f) {
JSONDecoder::decode_json("push_endpoint_topic", arn_topic, f);
JSONDecoder::decode_json("stored_secret", stored_secret, f);
JSONDecoder::decode_json("persistent", persistent, f);
+ JSONDecoder::decode_json("persistent_queue", persistent_queue, f);
std::string ttl;
JSONDecoder::decode_json("time_to_live", ttl, f);
time_to_live = ttl == DEFAULT_CONFIG ? DEFAULT_GLOBAL_VALUE : std::stoul(ttl);
@@ -530,10 +543,6 @@ void rgw_pubsub_dest::decode_json(JSONObj* f) {
: std::stoul(sleep_dur);
}
-RGWPubSub::RGWPubSub(rgw::sal::Driver* _driver, const std::string& _tenant)
- : driver(_driver), tenant(_tenant)
-{}
-
RGWPubSub::RGWPubSub(rgw::sal::Driver* _driver,
const std::string& _tenant,
const rgw::SiteConfig& site)
@@ -547,6 +556,12 @@ int RGWPubSub::get_topics(const DoutPrefixProvider* dpp,
rgw_pubsub_topics& result, std::string& next_marker,
optional_yield y) const
{
+ if (rgw::account::validate_id(tenant)) {
+ // if our tenant is an account, return the account listing
+ return list_account_topics(dpp, start_marker, max_items,
+ result, next_marker, y);
+ }
+
if (!use_notification_v2 || driver->stat_topics_v1(tenant, y, dpp) != -ENOENT) {
// in case of v1 or during migration we use v1 topics
// v1 returns all topics, ignoring marker/max_items
@@ -595,6 +610,36 @@ int RGWPubSub::get_topics(const DoutPrefixProvider* dpp,
return ret;
}
+int RGWPubSub::list_account_topics(const DoutPrefixProvider* dpp,
+ const std::string& start_marker,
+ int max_items, rgw_pubsub_topics& result,
+ std::string& next_marker,
+ optional_yield y) const
+{
+ if (max_items > 1000) {
+ max_items = 1000;
+ }
+
+ rgw::sal::TopicList listing;
+ int ret = driver->list_account_topics(dpp, y, tenant, start_marker,
+ max_items, listing);
+ if (ret < 0) {
+ return ret;
+ }
+
+ for (const auto& topic_name : listing.topics) {
+ rgw_pubsub_topic topic;
+ int r = get_topic(dpp, topic_name, topic, y, nullptr);
+ if (r < 0) {
+ continue;
+ }
+ result.topics[topic_name] = std::move(topic);
+ }
+
+ next_marker = std::move(listing.next_marker);
+ return 0;
+}
+
int RGWPubSub::read_topics_v1(const DoutPrefixProvider *dpp, rgw_pubsub_topics& result,
RGWObjVersionTracker *objv_tracker, optional_yield y) const
{
@@ -957,9 +1002,9 @@ int RGWPubSub::Bucket::remove_notifications(const DoutPrefixProvider *dpp, optio
return 0;
}
-int RGWPubSub::create_topic(const DoutPrefixProvider* dpp,
- const rgw_pubsub_topic& topic,
- optional_yield y) const {
+int RGWPubSub::create_topic_v2(const DoutPrefixProvider* dpp,
+ const rgw_pubsub_topic& topic,
+ optional_yield y) const {
RGWObjVersionTracker objv_tracker;
objv_tracker.generate_new_write_ver(dpp->get_cct());
constexpr bool exclusive = false;
@@ -975,7 +1020,7 @@ int RGWPubSub::create_topic(const DoutPrefixProvider* dpp,
const std::string& name,
const rgw_pubsub_dest& dest, const std::string& arn,
const std::string& opaque_data,
- const rgw_user& user,
+ const rgw_owner& owner,
const std::string& policy_text,
optional_yield y) const {
if (use_notification_v2) {
@@ -985,13 +1030,13 @@ int RGWPubSub::create_topic(const DoutPrefixProvider* dpp,
return -ERR_SERVICE_UNAVAILABLE;
}
rgw_pubsub_topic new_topic;
- new_topic.user = user;
+ new_topic.owner = owner;
new_topic.name = name;
new_topic.dest = dest;
new_topic.arn = arn;
new_topic.opaque_data = opaque_data;
new_topic.policy_text = policy_text;
- return create_topic(dpp, new_topic, y);
+ return create_topic_v2(dpp, new_topic, y);
}
RGWObjVersionTracker objv_tracker;
rgw_pubsub_topics topics;
@@ -1004,7 +1049,7 @@ int RGWPubSub::create_topic(const DoutPrefixProvider* dpp,
}
rgw_pubsub_topic& new_topic = topics.topics[name];
- new_topic.user = user;
+ new_topic.owner = owner;
new_topic.name = name;
new_topic.dest = dest;
new_topic.arn = arn;
@@ -1042,7 +1087,17 @@ int RGWPubSub::remove_topic_v2(const DoutPrefixProvider* dpp,
<< dendl;
return ret;
}
- return ret;
+
+ const rgw_pubsub_dest& dest = topic.dest;
+ if (!dest.push_endpoint.empty() && dest.persistent &&
+ !dest.persistent_queue.empty()) {
+ ret = rgw::notify::remove_persistent_topic(dest.persistent_queue, y);
+ if (ret < 0 && ret != -ENOENT) {
+ ldpp_dout(dpp, 1) << "WARNING: failed to remove queue for "
+ "persistent topic: " << cpp_strerror(ret) << dendl;
+ } // not fatal
+ }
+ return 0;
}
int RGWPubSub::remove_topic(const DoutPrefixProvider *dpp, const std::string& name, optional_yield y) const
@@ -1068,7 +1123,12 @@ int RGWPubSub::remove_topic(const DoutPrefixProvider *dpp, const std::string& na
return 0;
}
- topics.topics.erase(name);
+ auto t = topics.topics.find(name);
+ if (t == topics.topics.end()) {
+ return -ENOENT;
+ }
+ const rgw_pubsub_dest dest = std::move(t->second.dest);
+ topics.topics.erase(t);
ret = write_topics_v1(dpp, topics, &objv_tracker, y);
if (ret < 0) {
@@ -1076,5 +1136,13 @@ int RGWPubSub::remove_topic(const DoutPrefixProvider *dpp, const std::string& na
return ret;
}
+ if (!dest.push_endpoint.empty() && dest.persistent &&
+ !dest.persistent_queue.empty()) {
+ ret = rgw::notify::remove_persistent_topic(dest.persistent_queue, y);
+ if (ret < 0 && ret != -ENOENT) {
+ ldpp_dout(dpp, 1) << "WARNING: failed to remove queue for "
+ "persistent topic: " << cpp_strerror(ret) << dendl;
+ } // not fatal
+ }
return 0;
}
diff --git a/src/rgw/rgw_pubsub.h b/src/rgw/rgw_pubsub.h
index f03d7542b73..3835407eb45 100644
--- a/src/rgw/rgw_pubsub.h
+++ b/src/rgw/rgw_pubsub.h
@@ -3,7 +3,8 @@
#pragma once
-#include "rgw_sal.h"
+#include "common/versioned_variant.h"
+#include "rgw_sal_fwd.h"
#include "rgw_tools.h"
#include "rgw_zone.h"
#include "rgw_notify_event_type.h"
@@ -341,12 +342,14 @@ struct rgw_pubsub_dest {
std::string arn_topic;
bool stored_secret = false;
bool persistent = false;
+ // rados object name of the persistent queue in the 'notif' pool
+ std::string persistent_queue;
uint32_t time_to_live;
uint32_t max_retries;
uint32_t retry_sleep_duration;
void encode(bufferlist& bl) const {
- ENCODE_START(6, 1, bl);
+ ENCODE_START(7, 1, bl);
encode("", bl);
encode("", bl);
encode(push_endpoint, bl);
@@ -357,6 +360,7 @@ struct rgw_pubsub_dest {
encode(time_to_live, bl);
encode(max_retries, bl);
encode(retry_sleep_duration, bl);
+ encode(persistent_queue, bl);
ENCODE_FINISH(bl);
}
@@ -383,6 +387,13 @@ struct rgw_pubsub_dest {
decode(max_retries, bl);
decode(retry_sleep_duration, bl);
}
+ if (struct_v >= 7) {
+ decode(persistent_queue, bl);
+ } else if (persistent) {
+ // persistent topics created before v7 did not support tenant namespacing.
+ // continue to use 'arn_topic' alone as the queue's rados object name
+ persistent_queue = arn_topic;
+ }
DECODE_FINISH(bl);
}
@@ -394,7 +405,7 @@ struct rgw_pubsub_dest {
WRITE_CLASS_ENCODER(rgw_pubsub_dest)
struct rgw_pubsub_topic {
- rgw_user user;
+ rgw_owner owner;
std::string name;
rgw_pubsub_dest dest;
std::string arn;
@@ -403,7 +414,8 @@ struct rgw_pubsub_topic {
void encode(bufferlist& bl) const {
ENCODE_START(4, 1, bl);
- encode(user, bl);
+ // converted from rgw_user to rgw_owner
+ ceph::converted_variant::encode(owner, bl);
encode(name, bl);
encode(dest, bl);
encode(arn, bl);
@@ -414,7 +426,8 @@ struct rgw_pubsub_topic {
void decode(bufferlist::const_iterator& bl) {
DECODE_START(4, bl);
- decode(user, bl);
+ // converted from rgw_user to rgw_owner
+ ceph::converted_variant::decode(owner, bl);
decode(name, bl);
if (struct_v >= 2) {
decode(dest, bl);
@@ -429,18 +442,10 @@ struct rgw_pubsub_topic {
DECODE_FINISH(bl);
}
- std::string to_str() const {
- return user.tenant + "/" + name;
- }
-
void dump(Formatter *f) const;
void dump_xml(Formatter *f) const;
void dump_xml_as_attributes(Formatter *f) const;
void decode_json(JSONObj* obj);
-
- bool operator<(const rgw_pubsub_topic& t) const {
- return to_str().compare(t.to_str());
- }
};
WRITE_CLASS_ENCODER(rgw_pubsub_topic)
@@ -567,12 +572,28 @@ class RGWPubSub
int write_topics_v1(const DoutPrefixProvider *dpp, const rgw_pubsub_topics& topics,
RGWObjVersionTracker* objv_tracker, optional_yield y) const;
-public:
- RGWPubSub(rgw::sal::Driver* _driver, const std::string& tenant);
+ // remove a topic according to its name
+ // if the topic does not exists it is a no-op (considered success)
+ // return 0 on success, error code otherwise
+ int remove_topic_v2(const DoutPrefixProvider* dpp,
+ const std::string& name,
+ optional_yield y) const;
+ // create a topic with a name only
+ // if the topic already exists it is a no-op (considered success)
+ // return 0 on success, error code otherwise
+ int create_topic_v2(const DoutPrefixProvider* dpp,
+ const rgw_pubsub_topic& topic,
+ optional_yield y) const;
- RGWPubSub(rgw::sal::Driver* _driver,
- const std::string& _tenant,
- const rgw::SiteConfig& site);
+ int list_account_topics(const DoutPrefixProvider* dpp,
+ const std::string& start_marker, int max_items,
+ rgw_pubsub_topics& result, std::string& next_marker,
+ optional_yield y) const;
+
+public:
+ RGWPubSub(rgw::sal::Driver* _driver,
+ const std::string& _tenant,
+ const rgw::SiteConfig& site);
class Bucket {
friend class RGWPubSub;
@@ -642,24 +663,12 @@ public:
// return 0 on success, error code otherwise
int create_topic(const DoutPrefixProvider* dpp, const std::string& name,
const rgw_pubsub_dest& dest, const std::string& arn,
- const std::string& opaque_data, const rgw_user& user,
+ const std::string& opaque_data, const rgw_owner& owner,
const std::string& policy_text, optional_yield y) const;
// remove a topic according to its name
// if the topic does not exists it is a no-op (considered success)
// return 0 on success, error code otherwise
int remove_topic(const DoutPrefixProvider *dpp, const std::string& name, optional_yield y) const;
- // remove a topic according to its name
- // if the topic does not exists it is a no-op (considered success)
- // return 0 on success, error code otherwise
- int remove_topic_v2(const DoutPrefixProvider* dpp,
- const std::string& name,
- optional_yield y) const;
- // create a topic with a name only
- // if the topic already exists it is a no-op (considered success)
- // return 0 on success, error code otherwise
- int create_topic(const DoutPrefixProvider* dpp,
- const rgw_pubsub_topic& topic,
- optional_yield y) const;
};
namespace rgw::notify {
@@ -692,8 +701,9 @@ int get_bucket_notifications(const DoutPrefixProvider* dpp,
rgw_pubsub_bucket_topics& bucket_topics);
// format and parse topic metadata keys as tenant:name
-std::string get_topic_metadata_key(std::string_view topic_name,
- std::string_view tenant);
+std::string get_topic_metadata_key(std::string_view tenant,
+ std::string_view topic_name);
+std::string get_topic_metadata_key(const rgw_pubsub_topic& topic);
void parse_topic_metadata_key(const std::string& key,
std::string& tenant_name,
std::string& topic_name);
diff --git a/src/rgw/rgw_quota.cc b/src/rgw/rgw_quota.cc
index 836086b5be2..eadd712d664 100644
--- a/src/rgw/rgw_quota.cc
+++ b/src/rgw/rgw_quota.cc
@@ -14,6 +14,7 @@
*/
+#include "include/function2.hpp"
#include "include/utime.h"
#include "common/lru_map.h"
#include "common/RefCountedObj.h"
@@ -66,14 +67,14 @@ protected:
}
};
- virtual int fetch_stats_from_storage(const rgw_user& user, const rgw_bucket& bucket, RGWStorageStats& stats, optional_yield y, const DoutPrefixProvider *dpp) = 0;
+ virtual int fetch_stats_from_storage(const rgw_owner& owner, const rgw_bucket& bucket, RGWStorageStats& stats, optional_yield y, const DoutPrefixProvider *dpp) = 0;
- virtual bool map_find(const rgw_user& user, const rgw_bucket& bucket, RGWQuotaCacheStats& qs) = 0;
+ virtual bool map_find(const rgw_owner& owner, const rgw_bucket& bucket, RGWQuotaCacheStats& qs) = 0;
- virtual bool map_find_and_update(const rgw_user& user, const rgw_bucket& bucket, typename lru_map<T, RGWQuotaCacheStats>::UpdateContext *ctx) = 0;
- virtual void map_add(const rgw_user& user, const rgw_bucket& bucket, RGWQuotaCacheStats& qs) = 0;
+ virtual bool map_find_and_update(const rgw_owner& owner, const rgw_bucket& bucket, typename lru_map<T, RGWQuotaCacheStats>::UpdateContext *ctx) = 0;
+ virtual void map_add(const rgw_owner& owner, const rgw_bucket& bucket, RGWQuotaCacheStats& qs) = 0;
- virtual void data_modified(const rgw_user& user, rgw_bucket& bucket) {}
+ virtual void data_modified(const rgw_owner& owner, const rgw_bucket& bucket) {}
public:
RGWQuotaCache(rgw::sal::Driver* _driver, int size) : driver(_driver), stats_map(size) {
async_refcount = new RefCountedWaitObject;
@@ -82,54 +83,54 @@ public:
async_refcount->put_wait(); /* wait for all pending async requests to complete */
}
- int get_stats(const rgw_user& user, const rgw_bucket& bucket, RGWStorageStats& stats, optional_yield y,
+ int get_stats(const rgw_owner& owner, const rgw_bucket& bucket, RGWStorageStats& stats, optional_yield y,
const DoutPrefixProvider* dpp);
- void adjust_stats(const rgw_user& user, rgw_bucket& bucket, int objs_delta, uint64_t added_bytes, uint64_t removed_bytes);
+ void adjust_stats(const rgw_owner& owner, rgw_bucket& bucket, int objs_delta, uint64_t added_bytes, uint64_t removed_bytes);
- void set_stats(const rgw_user& user, const rgw_bucket& bucket, RGWQuotaCacheStats& qs, const RGWStorageStats& stats);
- int async_refresh(const rgw_user& user, const rgw_bucket& bucket, RGWQuotaCacheStats& qs);
- void async_refresh_response(const rgw_user& user, rgw_bucket& bucket, const RGWStorageStats& stats);
- void async_refresh_fail(const rgw_user& user, rgw_bucket& bucket);
+ void set_stats(const rgw_owner& owner, const rgw_bucket& bucket, RGWQuotaCacheStats& qs, const RGWStorageStats& stats);
+ int async_refresh(const rgw_owner& owner, const rgw_bucket& bucket, RGWQuotaCacheStats& qs);
+ void async_refresh_response(const rgw_owner& owner, rgw_bucket& bucket, const RGWStorageStats& stats);
+ void async_refresh_fail(const rgw_owner& owner, rgw_bucket& bucket);
/// start an async refresh that will eventually call async_refresh_response or
/// async_refresh_fail. hold a reference to the waiter until completion
- virtual int init_refresh(const rgw_user& user, const rgw_bucket& bucket,
+ virtual int init_refresh(const rgw_owner& owner, const rgw_bucket& bucket,
boost::intrusive_ptr<RefCountedWaitObject> waiter) = 0;
};
template<class T>
-int RGWQuotaCache<T>::async_refresh(const rgw_user& user, const rgw_bucket& bucket, RGWQuotaCacheStats& qs)
+int RGWQuotaCache<T>::async_refresh(const rgw_owner& owner, const rgw_bucket& bucket, RGWQuotaCacheStats& qs)
{
/* protect against multiple updates */
StatsAsyncTestSet test_update;
- if (!map_find_and_update(user, bucket, &test_update)) {
+ if (!map_find_and_update(owner, bucket, &test_update)) {
/* most likely we just raced with another update */
return 0;
}
- return init_refresh(user, bucket, async_refcount);
+ return init_refresh(owner, bucket, async_refcount);
}
template<class T>
-void RGWQuotaCache<T>::async_refresh_fail(const rgw_user& user, rgw_bucket& bucket)
+void RGWQuotaCache<T>::async_refresh_fail(const rgw_owner& owner, rgw_bucket& bucket)
{
ldout(driver->ctx(), 20) << "async stats refresh response for bucket=" << bucket << dendl;
}
template<class T>
-void RGWQuotaCache<T>::async_refresh_response(const rgw_user& user, rgw_bucket& bucket, const RGWStorageStats& stats)
+void RGWQuotaCache<T>::async_refresh_response(const rgw_owner& owner, rgw_bucket& bucket, const RGWStorageStats& stats)
{
ldout(driver->ctx(), 20) << "async stats refresh response for bucket=" << bucket << dendl;
RGWQuotaCacheStats qs;
- map_find(user, bucket, qs);
+ map_find(owner, bucket, qs);
- set_stats(user, bucket, qs, stats);
+ set_stats(owner, bucket, qs, stats);
}
template<class T>
-void RGWQuotaCache<T>::set_stats(const rgw_user& user, const rgw_bucket& bucket, RGWQuotaCacheStats& qs, const RGWStorageStats& stats)
+void RGWQuotaCache<T>::set_stats(const rgw_owner& owner, const rgw_bucket& bucket, RGWQuotaCacheStats& qs, const RGWStorageStats& stats)
{
qs.stats = stats;
qs.expiration = ceph_clock_now();
@@ -137,16 +138,16 @@ void RGWQuotaCache<T>::set_stats(const rgw_user& user, const rgw_bucket& bucket,
qs.expiration += driver->ctx()->_conf->rgw_bucket_quota_ttl;
qs.async_refresh_time += driver->ctx()->_conf->rgw_bucket_quota_ttl / 2;
- map_add(user, bucket, qs);
+ map_add(owner, bucket, qs);
}
template<class T>
-int RGWQuotaCache<T>::get_stats(const rgw_user& user, const rgw_bucket& bucket, RGWStorageStats& stats, optional_yield y, const DoutPrefixProvider* dpp) {
+int RGWQuotaCache<T>::get_stats(const rgw_owner& owner, const rgw_bucket& bucket, RGWStorageStats& stats, optional_yield y, const DoutPrefixProvider* dpp) {
RGWQuotaCacheStats qs;
utime_t now = ceph_clock_now();
- if (map_find(user, bucket, qs)) {
+ if (map_find(owner, bucket, qs)) {
if (qs.async_refresh_time.sec() > 0 && now >= qs.async_refresh_time) {
- int r = async_refresh(user, bucket, qs);
+ int r = async_refresh(owner, bucket, qs);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: quota async refresh returned ret=" << r << dendl;
@@ -160,11 +161,11 @@ int RGWQuotaCache<T>::get_stats(const rgw_user& user, const rgw_bucket& bucket,
}
}
- int ret = fetch_stats_from_storage(user, bucket, stats, y, dpp);
+ int ret = fetch_stats_from_storage(owner, bucket, stats, y, dpp);
if (ret < 0 && ret != -ENOENT)
return ret;
- set_stats(user, bucket, qs, stats);
+ set_stats(owner, bucket, qs, stats);
return 0;
}
@@ -212,42 +213,41 @@ public:
template<class T>
-void RGWQuotaCache<T>::adjust_stats(const rgw_user& user, rgw_bucket& bucket, int objs_delta,
+void RGWQuotaCache<T>::adjust_stats(const rgw_owner& owner, rgw_bucket& bucket, int objs_delta,
uint64_t added_bytes, uint64_t removed_bytes)
{
RGWQuotaStatsUpdate<T> update(objs_delta, added_bytes, removed_bytes);
- map_find_and_update(user, bucket, &update);
+ map_find_and_update(owner, bucket, &update);
- data_modified(user, bucket);
+ data_modified(owner, bucket);
}
class RGWBucketStatsCache : public RGWQuotaCache<rgw_bucket> {
protected:
- bool map_find(const rgw_user& user, const rgw_bucket& bucket, RGWQuotaCacheStats& qs) override {
+ bool map_find(const rgw_owner& owner, const rgw_bucket& bucket, RGWQuotaCacheStats& qs) override {
return stats_map.find(bucket, qs);
}
- bool map_find_and_update(const rgw_user& user, const rgw_bucket& bucket, lru_map<rgw_bucket, RGWQuotaCacheStats>::UpdateContext *ctx) override {
+ bool map_find_and_update(const rgw_owner& owner, const rgw_bucket& bucket, lru_map<rgw_bucket, RGWQuotaCacheStats>::UpdateContext *ctx) override {
return stats_map.find_and_update(bucket, NULL, ctx);
}
- void map_add(const rgw_user& user, const rgw_bucket& bucket, RGWQuotaCacheStats& qs) override {
+ void map_add(const rgw_owner& owner, const rgw_bucket& bucket, RGWQuotaCacheStats& qs) override {
stats_map.add(bucket, qs);
}
- int fetch_stats_from_storage(const rgw_user& user, const rgw_bucket& bucket, RGWStorageStats& stats, optional_yield y, const DoutPrefixProvider *dpp) override;
+ int fetch_stats_from_storage(const rgw_owner& owner, const rgw_bucket& bucket, RGWStorageStats& stats, optional_yield y, const DoutPrefixProvider *dpp) override;
public:
explicit RGWBucketStatsCache(rgw::sal::Driver* _driver) : RGWQuotaCache<rgw_bucket>(_driver, _driver->ctx()->_conf->rgw_bucket_quota_cache_size) {
}
- int init_refresh(const rgw_user& user, const rgw_bucket& bucket,
+ int init_refresh(const rgw_owner& owner, const rgw_bucket& bucket,
boost::intrusive_ptr<RefCountedWaitObject> waiter) override;
};
-int RGWBucketStatsCache::fetch_stats_from_storage(const rgw_user& _u, const rgw_bucket& _b, RGWStorageStats& stats, optional_yield y, const DoutPrefixProvider *dpp)
+int RGWBucketStatsCache::fetch_stats_from_storage(const rgw_owner& owner, const rgw_bucket& _b, RGWStorageStats& stats, optional_yield y, const DoutPrefixProvider *dpp)
{
- std::unique_ptr<rgw::sal::User> user = driver->get_user(_u);
std::unique_ptr<rgw::sal::Bucket> bucket;
int r = driver->load_bucket(dpp, _b, &bucket, y);
@@ -289,26 +289,26 @@ int RGWBucketStatsCache::fetch_stats_from_storage(const rgw_user& _u, const rgw_
class BucketAsyncRefreshHandler : public rgw::sal::ReadStatsCB {
RGWBucketStatsCache* cache;
boost::intrusive_ptr<RefCountedWaitObject> waiter;
- rgw_user user;
+ rgw_owner owner;
rgw_bucket bucket;
public:
BucketAsyncRefreshHandler(RGWBucketStatsCache* cache,
boost::intrusive_ptr<RefCountedWaitObject> waiter,
- const rgw_user& user, const rgw_bucket& bucket)
- : cache(cache), waiter(std::move(waiter)), user(user), bucket(bucket) {}
+ const rgw_owner& owner, const rgw_bucket& bucket)
+ : cache(cache), waiter(std::move(waiter)), owner(owner), bucket(bucket) {}
void handle_response(int r, const RGWStorageStats& stats) override {
if (r < 0) {
- cache->async_refresh_fail(user, bucket);
+ cache->async_refresh_fail(owner, bucket);
return;
}
- cache->async_refresh_response(user, bucket, stats);
+ cache->async_refresh_response(owner, bucket, stats);
}
};
-int RGWBucketStatsCache::init_refresh(const rgw_user& user, const rgw_bucket& bucket,
+int RGWBucketStatsCache::init_refresh(const rgw_owner& owner, const rgw_bucket& bucket,
boost::intrusive_ptr<RefCountedWaitObject> waiter)
{
std::unique_ptr<rgw::sal::Bucket> rbucket;
@@ -328,7 +328,7 @@ int RGWBucketStatsCache::init_refresh(const rgw_user& user, const rgw_bucket& bu
}
boost::intrusive_ptr handler = new BucketAsyncRefreshHandler(
- this, std::move(waiter), user, bucket);
+ this, std::move(waiter), owner, bucket);
r = rbucket->read_stats_async(&dp, index, RGW_NO_SHARD, std::move(handler));
if (r < 0) {
@@ -339,36 +339,34 @@ int RGWBucketStatsCache::init_refresh(const rgw_user& user, const rgw_bucket& bu
return 0;
}
-class RGWUserStatsCache : public RGWQuotaCache<rgw_user> {
+class RGWOwnerStatsCache : public RGWQuotaCache<rgw_owner> {
const DoutPrefixProvider *dpp;
std::atomic<bool> down_flag = { false };
- ceph::shared_mutex mutex = ceph::make_shared_mutex("RGWUserStatsCache");
- map<rgw_bucket, rgw_user> modified_buckets;
+ ceph::shared_mutex mutex = ceph::make_shared_mutex("RGWOwnerStatsCache");
+ map<rgw_bucket, rgw_owner> modified_buckets;
/* thread, sync recent modified buckets info */
class BucketsSyncThread : public Thread {
CephContext *cct;
- RGWUserStatsCache *stats;
+ RGWOwnerStatsCache *stats;
- ceph::mutex lock = ceph::make_mutex("RGWUserStatsCache::BucketsSyncThread");
+ ceph::mutex lock = ceph::make_mutex("RGWOwnerStatsCache::BucketsSyncThread");
ceph::condition_variable cond;
public:
- BucketsSyncThread(CephContext *_cct, RGWUserStatsCache *_s) : cct(_cct), stats(_s) {}
+ BucketsSyncThread(CephContext *_cct, RGWOwnerStatsCache *_s) : cct(_cct), stats(_s) {}
void *entry() override {
ldout(cct, 20) << "BucketsSyncThread: start" << dendl;
do {
- map<rgw_bucket, rgw_user> buckets;
+ map<rgw_bucket, rgw_owner> buckets;
stats->swap_modified_buckets(buckets);
- for (map<rgw_bucket, rgw_user>::iterator iter = buckets.begin(); iter != buckets.end(); ++iter) {
- rgw_bucket bucket = iter->first;
- rgw_user& user = iter->second;
- ldout(cct, 20) << "BucketsSyncThread: sync user=" << user << " bucket=" << bucket << dendl;
+ for (const auto& [bucket, owner] : buckets) {
+ ldout(cct, 20) << "BucketsSyncThread: sync owner=" << owner << " bucket=" << bucket << dendl;
const DoutPrefix dp(cct, dout_subsys, "rgw bucket sync thread: ");
- int r = stats->sync_bucket(user, bucket, null_yield, &dp);
+ int r = stats->sync_bucket(owner, bucket, null_yield, &dp);
if (r < 0) {
ldout(cct, 0) << "WARNING: sync_bucket() returned r=" << r << dendl;
}
@@ -400,23 +398,27 @@ class RGWUserStatsCache : public RGWQuotaCache<rgw_user> {
* users that didn't have quota turned on before (or existed before the user objclass
* tracked stats) need to get their backend stats up to date.
*/
- class UserSyncThread : public Thread {
+ class OwnerSyncThread : public Thread {
CephContext *cct;
- RGWUserStatsCache *stats;
+ RGWOwnerStatsCache *stats;
+ const std::string metadata_section;
- ceph::mutex lock = ceph::make_mutex("RGWUserStatsCache::UserSyncThread");
+ ceph::mutex lock = ceph::make_mutex("RGWOwnerStatsCache::OwnerSyncThread");
ceph::condition_variable cond;
public:
- UserSyncThread(CephContext *_cct, RGWUserStatsCache *_s) : cct(_cct), stats(_s) {}
+ OwnerSyncThread(CephContext *_cct, RGWOwnerStatsCache *_s,
+ const std::string& metadata_section)
+ : cct(_cct), stats(_s), metadata_section(metadata_section)
+ {}
void *entry() override {
- ldout(cct, 20) << "UserSyncThread: start" << dendl;
+ ldout(cct, 20) << "OwnerSyncThread: start" << dendl;
do {
const DoutPrefix dp(cct, dout_subsys, "rgw user sync thread: ");
- int ret = stats->sync_all_users(&dp, null_yield);
+ int ret = stats->sync_all_owners(&dp, metadata_section);
if (ret < 0) {
- ldout(cct, 5) << "ERROR: sync_all_users() returned ret=" << ret << dendl;
+ ldout(cct, 5) << "ERROR: sync_all_owners() returned ret=" << ret << dendl;
}
if (stats->going_down())
@@ -425,7 +427,7 @@ class RGWUserStatsCache : public RGWQuotaCache<rgw_user> {
std::unique_lock l{lock};
cond.wait_for(l, std::chrono::seconds(cct->_conf->rgw_user_quota_sync_interval));
} while (!stats->going_down());
- ldout(cct, 20) << "UserSyncThread: done" << dendl;
+ ldout(cct, 20) << "OwnerSyncThread: done" << dendl;
return NULL;
}
@@ -436,29 +438,33 @@ class RGWUserStatsCache : public RGWQuotaCache<rgw_user> {
}
};
- BucketsSyncThread *buckets_sync_thread;
- UserSyncThread *user_sync_thread;
+ // TODO: AccountSyncThread and sync_all_accounts()
+
+ BucketsSyncThread* buckets_sync_thread = nullptr;
+ OwnerSyncThread* user_sync_thread = nullptr;
+ OwnerSyncThread* account_sync_thread = nullptr;
protected:
- bool map_find(const rgw_user& user,const rgw_bucket& bucket, RGWQuotaCacheStats& qs) override {
- return stats_map.find(user, qs);
+ bool map_find(const rgw_owner& owner,const rgw_bucket& bucket, RGWQuotaCacheStats& qs) override {
+ return stats_map.find(owner, qs);
}
- bool map_find_and_update(const rgw_user& user, const rgw_bucket& bucket, lru_map<rgw_user, RGWQuotaCacheStats>::UpdateContext *ctx) override {
- return stats_map.find_and_update(user, NULL, ctx);
+ bool map_find_and_update(const rgw_owner& owner, const rgw_bucket& bucket, lru_map<rgw_owner, RGWQuotaCacheStats>::UpdateContext *ctx) override {
+ return stats_map.find_and_update(owner, NULL, ctx);
}
- void map_add(const rgw_user& user, const rgw_bucket& bucket, RGWQuotaCacheStats& qs) override {
- stats_map.add(user, qs);
+ void map_add(const rgw_owner& owner, const rgw_bucket& bucket, RGWQuotaCacheStats& qs) override {
+ stats_map.add(owner, qs);
}
- int fetch_stats_from_storage(const rgw_user& user, const rgw_bucket& bucket, RGWStorageStats& stats, optional_yield y, const DoutPrefixProvider *dpp) override;
- int sync_bucket(const rgw_user& rgw_user, rgw_bucket& bucket, optional_yield y, const DoutPrefixProvider *dpp);
- int sync_user(const DoutPrefixProvider *dpp, const rgw_user& user, optional_yield y);
- int sync_all_users(const DoutPrefixProvider *dpp, optional_yield y);
+ int fetch_stats_from_storage(const rgw_owner& owner, const rgw_bucket& bucket, RGWStorageStats& stats, optional_yield y, const DoutPrefixProvider *dpp) override;
+ int sync_bucket(const rgw_owner& owner, const rgw_bucket& bucket, optional_yield y, const DoutPrefixProvider *dpp);
+ int sync_owner(const DoutPrefixProvider *dpp, const rgw_owner& owner, optional_yield y);
+ int sync_all_owners(const DoutPrefixProvider *dpp,
+ const std::string& metadata_section);
- void data_modified(const rgw_user& user, rgw_bucket& bucket) override;
+ void data_modified(const rgw_owner& owner, const rgw_bucket& bucket) override;
- void swap_modified_buckets(map<rgw_bucket, rgw_user>& out) {
+ void swap_modified_buckets(map<rgw_bucket, rgw_owner>& out) {
std::unique_lock lock{mutex};
modified_buckets.swap(out);
}
@@ -476,24 +482,23 @@ protected:
}
public:
- RGWUserStatsCache(const DoutPrefixProvider *dpp, rgw::sal::Driver* _driver, bool quota_threads)
- : RGWQuotaCache<rgw_user>(_driver, _driver->ctx()->_conf->rgw_bucket_quota_cache_size), dpp(dpp)
+ RGWOwnerStatsCache(const DoutPrefixProvider *dpp, rgw::sal::Driver* _driver, bool quota_threads)
+ : RGWQuotaCache<rgw_owner>(_driver, _driver->ctx()->_conf->rgw_bucket_quota_cache_size), dpp(dpp)
{
if (quota_threads) {
buckets_sync_thread = new BucketsSyncThread(driver->ctx(), this);
buckets_sync_thread->create("rgw_buck_st_syn");
- user_sync_thread = new UserSyncThread(driver->ctx(), this);
+ user_sync_thread = new OwnerSyncThread(driver->ctx(), this, "user");
user_sync_thread->create("rgw_user_st_syn");
- } else {
- buckets_sync_thread = NULL;
- user_sync_thread = NULL;
+ account_sync_thread = new OwnerSyncThread(driver->ctx(), this, "account");
+ account_sync_thread->create("rgw_acct_st_syn");
}
}
- ~RGWUserStatsCache() override {
+ ~RGWOwnerStatsCache() override {
stop();
}
- int init_refresh(const rgw_user& user, const rgw_bucket& bucket,
+ int init_refresh(const rgw_owner& owner, const rgw_bucket& bucket,
boost::intrusive_ptr<RefCountedWaitObject> waiter) override;
bool going_down() {
@@ -507,105 +512,130 @@ public:
stop_thread(&buckets_sync_thread);
}
stop_thread(&user_sync_thread);
+ stop_thread(&account_sync_thread);
}
};
-class UserAsyncRefreshHandler : public rgw::sal::ReadStatsCB {
- RGWUserStatsCache* cache;
+class OwnerAsyncRefreshHandler : public rgw::sal::ReadStatsCB {
+ RGWOwnerStatsCache* cache;
boost::intrusive_ptr<RefCountedWaitObject> waiter;
rgw_bucket bucket;
- rgw_user user;
+ rgw_owner owner;
public:
- UserAsyncRefreshHandler(RGWUserStatsCache* cache,
- boost::intrusive_ptr<RefCountedWaitObject> waiter,
- const rgw_user& user, const rgw_bucket& bucket)
- : cache(cache), waiter(std::move(waiter)), bucket(bucket), user(user)
+ OwnerAsyncRefreshHandler(RGWOwnerStatsCache* cache,
+ boost::intrusive_ptr<RefCountedWaitObject> waiter,
+ const rgw_owner& owner, const rgw_bucket& bucket)
+ : cache(cache), waiter(std::move(waiter)), bucket(bucket), owner(owner)
{}
void handle_response(int r, const RGWStorageStats& stats) override;
};
-int RGWUserStatsCache::init_refresh(const rgw_user& user, const rgw_bucket& bucket,
- boost::intrusive_ptr<RefCountedWaitObject> waiter)
+int RGWOwnerStatsCache::init_refresh(const rgw_owner& owner, const rgw_bucket& bucket,
+ boost::intrusive_ptr<RefCountedWaitObject> waiter)
{
- boost::intrusive_ptr handler = new UserAsyncRefreshHandler(
- this, std::move(waiter), user, bucket);
+ boost::intrusive_ptr cb = new OwnerAsyncRefreshHandler(
+ this, std::move(waiter), owner, bucket);
- std::unique_ptr<rgw::sal::User> ruser = driver->get_user(user);
+ ldpp_dout(dpp, 20) << "initiating async quota refresh for owner=" << owner << dendl;
- ldpp_dout(dpp, 20) << "initiating async quota refresh for user=" << user << dendl;
- int r = ruser->read_stats_async(dpp, std::move(handler));
+ int r = driver->load_stats_async(dpp, owner, std::move(cb));
if (r < 0) {
- ldpp_dout(dpp, 0) << "could not get bucket info for user=" << user << dendl;
+ ldpp_dout(dpp, 0) << "could not read stats for owner=" << owner << dendl;
return r;
}
return 0;
}
-void UserAsyncRefreshHandler::handle_response(int r, const RGWStorageStats& stats)
+void OwnerAsyncRefreshHandler::handle_response(int r, const RGWStorageStats& stats)
{
if (r < 0) {
- cache->async_refresh_fail(user, bucket);
+ cache->async_refresh_fail(owner, bucket);
return;
}
- cache->async_refresh_response(user, bucket, stats);
+ cache->async_refresh_response(owner, bucket, stats);
}
-int RGWUserStatsCache::fetch_stats_from_storage(const rgw_user& _u,
- const rgw_bucket& _b,
- RGWStorageStats& stats,
- optional_yield y,
- const DoutPrefixProvider *dpp)
+int RGWOwnerStatsCache::fetch_stats_from_storage(const rgw_owner& owner,
+ const rgw_bucket& bucket,
+ RGWStorageStats& stats,
+ optional_yield y,
+ const DoutPrefixProvider *dpp)
{
- std::unique_ptr<rgw::sal::User> user = driver->get_user(_u);
- int r = user->read_stats(dpp, y, &stats);
+ ceph::real_time synced; // ignored
+ ceph::real_time updated; // ignored
+ int r = driver->load_stats(dpp, y, owner, stats, synced, updated);
if (r < 0) {
- ldpp_dout(dpp, 0) << "could not get user stats for user=" << user << dendl;
+ ldpp_dout(dpp, 0) << "could not read stats for owner " << owner << dendl;
return r;
}
return 0;
}
-int RGWUserStatsCache::sync_bucket(const rgw_user& _u, rgw_bucket& _b, optional_yield y, const DoutPrefixProvider *dpp)
+int RGWOwnerStatsCache::sync_bucket(const rgw_owner& owner, const rgw_bucket& b,
+ optional_yield y, const DoutPrefixProvider *dpp)
{
- std::unique_ptr<rgw::sal::User> user = driver->get_user(_u);
std::unique_ptr<rgw::sal::Bucket> bucket;
- int r = driver->load_bucket(dpp, _b, &bucket, y);
+ int r = driver->load_bucket(dpp, b, &bucket, y);
if (r < 0) {
- ldpp_dout(dpp, 0) << "could not get bucket info for bucket=" << _b << " r=" << r << dendl;
+ ldpp_dout(dpp, 0) << "could not get bucket info for bucket=" << b << " r=" << r << dendl;
return r;
}
RGWBucketEnt ent;
- r = bucket->sync_user_stats(dpp, y, &ent);
+ r = bucket->sync_owner_stats(dpp, y, &ent);
if (r < 0) {
- ldpp_dout(dpp, 0) << "ERROR: sync_user_stats() for user=" << _u << ", bucket=" << bucket << " returned " << r << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: sync_owner_stats() for bucket=" << bucket << " returned " << r << dendl;
return r;
}
return bucket->check_bucket_shards(dpp, ent.count, y);
}
-int RGWUserStatsCache::sync_user(const DoutPrefixProvider *dpp, const rgw_user& _u, optional_yield y)
+// for account owners, we need to look up the tenant name by account id
+static int get_owner_tenant(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ rgw::sal::Driver* driver,
+ const rgw_owner& owner,
+ std::string& tenant)
+{
+ return std::visit(fu2::overload(
+ [&] (const rgw_user& user) {
+ tenant = user.tenant;
+ return 0;
+ },
+ [&] (const rgw_account_id& account) {
+ RGWAccountInfo info;
+ rgw::sal::Attrs attrs;
+ RGWObjVersionTracker objv;
+ int ret = driver->load_account_by_id(dpp, y, account, info, attrs, objv);
+ if (ret >= 0) {
+ tenant = std::move(info.tenant);
+ }
+ return ret;
+ }), owner);
+}
+
+int RGWOwnerStatsCache::sync_owner(const DoutPrefixProvider *dpp,
+ const rgw_owner& owner, optional_yield y)
{
RGWStorageStats stats;
ceph::real_time last_stats_sync;
ceph::real_time last_stats_update;
- std::unique_ptr<rgw::sal::User> user = driver->get_user(rgw_user(_u.to_str()));
- int ret = user->read_stats(dpp, y, &stats, &last_stats_sync, &last_stats_update);
+ int ret = driver->load_stats(dpp, y, owner, stats, last_stats_sync, last_stats_update);
if (ret < 0) {
- ldpp_dout(dpp, 5) << "ERROR: can't read user header: ret=" << ret << dendl;
+ ldpp_dout(dpp, 5) << "ERROR: can't read owner stats: ret=" << ret << dendl;
return ret;
}
if (!driver->ctx()->_conf->rgw_user_quota_sync_idle_users &&
last_stats_update < last_stats_sync) {
- ldpp_dout(dpp, 20) << "user is idle, not doing a full sync (user=" << user << ")" << dendl;
+ ldpp_dout(dpp, 20) << "owner is idle, not doing a full sync (owner=" << owner << ")" << dendl;
return 0;
}
@@ -613,9 +643,17 @@ int RGWUserStatsCache::sync_user(const DoutPrefixProvider *dpp, const rgw_user&
when_need_full_sync += make_timespan(driver->ctx()->_conf->rgw_user_quota_sync_wait_time);
// check if enough time passed since last full sync
- /* FIXME: missing check? */
+ if (when_need_full_sync > ceph::real_clock::now()) {
+ return 0;
+ }
+
+ std::string tenant;
+ ret = get_owner_tenant(dpp, y, driver, owner, tenant);
+ if (ret < 0) {
+ return ret;
+ }
- ret = rgw_user_sync_all_stats(dpp, driver, user.get(), y);
+ ret = rgw_sync_all_stats(dpp, y, driver, owner, tenant);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed user stats sync, ret=" << ret << dendl;
return ret;
@@ -624,12 +662,11 @@ int RGWUserStatsCache::sync_user(const DoutPrefixProvider *dpp, const rgw_user&
return 0;
}
-int RGWUserStatsCache::sync_all_users(const DoutPrefixProvider *dpp, optional_yield y)
+int RGWOwnerStatsCache::sync_all_owners(const DoutPrefixProvider *dpp,
+ const std::string& metadata_section)
{
- string key = "user";
void *handle;
-
- int ret = driver->meta_list_keys_init(dpp, key, string(), &handle);
+ int ret = driver->meta_list_keys_init(dpp, metadata_section, string(), &handle);
if (ret < 0) {
ldpp_dout(dpp, 10) << "ERROR: can't get key: ret=" << ret << dendl;
return ret;
@@ -643,30 +680,28 @@ int RGWUserStatsCache::sync_all_users(const DoutPrefixProvider *dpp, optional_yi
ret = driver->meta_list_keys_next(dpp, handle, max, keys, &truncated);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: lists_keys_next(): ret=" << ret << dendl;
- goto done;
+ break;
}
for (list<string>::iterator iter = keys.begin();
iter != keys.end() && !going_down();
++iter) {
- rgw_user user(*iter);
- ldpp_dout(dpp, 20) << "RGWUserStatsCache: sync user=" << user << dendl;
- int ret = sync_user(dpp, user, y);
- if (ret < 0) {
- ldpp_dout(dpp, 5) << "ERROR: sync_user() failed, user=" << user << " ret=" << ret << dendl;
-
- /* continuing to next user */
+ const rgw_owner owner = parse_owner(*iter);
+ ldpp_dout(dpp, 20) << "RGWOwnerStatsCache: sync owner=" << owner << dendl;
+ int r = sync_owner(dpp, owner, null_yield);
+ if (r < 0) {
+ ldpp_dout(dpp, 5) << "ERROR: sync_owner() failed, owner=" << owner
+ << " ret=" << r << dendl;
+ /* continuing to next owner */
continue;
}
}
} while (truncated);
- ret = 0;
-done:
driver->meta_list_keys_complete(handle);
return ret;
}
-void RGWUserStatsCache::data_modified(const rgw_user& user, rgw_bucket& bucket)
+void RGWOwnerStatsCache::data_modified(const rgw_owner& owner, const rgw_bucket& bucket)
{
/* racy, but it's ok */
mutex.lock_shared();
@@ -675,7 +710,7 @@ void RGWUserStatsCache::data_modified(const rgw_user& user, rgw_bucket& bucket)
if (need_update) {
std::unique_lock lock{mutex};
- modified_buckets[bucket] = user;
+ modified_buckets[bucket] = owner;
}
}
@@ -837,7 +872,7 @@ const RGWQuotaInfoApplier& RGWQuotaInfoApplier::get_instance(
class RGWQuotaHandlerImpl : public RGWQuotaHandler {
rgw::sal::Driver* driver;
RGWBucketStatsCache bucket_stats_cache;
- RGWUserStatsCache user_stats_cache;
+ RGWOwnerStatsCache owner_stats_cache;
int check_quota(const DoutPrefixProvider *dpp,
const char * const entity,
@@ -872,12 +907,12 @@ class RGWQuotaHandlerImpl : public RGWQuotaHandler {
public:
RGWQuotaHandlerImpl(const DoutPrefixProvider *dpp, rgw::sal::Driver* _driver, bool quota_threads) : driver(_driver),
bucket_stats_cache(_driver),
- user_stats_cache(dpp, _driver, quota_threads) {}
+ owner_stats_cache(dpp, _driver, quota_threads) {}
int check_quota(const DoutPrefixProvider *dpp,
- const rgw_user& user,
- rgw_bucket& bucket,
- RGWQuota& quota,
+ const rgw_owner& owner,
+ const rgw_bucket& bucket,
+ const RGWQuota& quota,
uint64_t num_objs,
uint64_t size, optional_yield y) override {
@@ -895,7 +930,7 @@ public:
const DoutPrefix dp(driver->ctx(), dout_subsys, "rgw quota handler: ");
if (quota.bucket_quota.enabled) {
RGWStorageStats bucket_stats;
- int ret = bucket_stats_cache.get_stats(user, bucket, bucket_stats, y, &dp);
+ int ret = bucket_stats_cache.get_stats(owner, bucket, bucket_stats, y, &dp);
if (ret < 0) {
return ret;
}
@@ -906,12 +941,12 @@ public:
}
if (quota.user_quota.enabled) {
- RGWStorageStats user_stats;
- int ret = user_stats_cache.get_stats(user, bucket, user_stats, y, &dp);
+ RGWStorageStats owner_stats;
+ int ret = owner_stats_cache.get_stats(owner, bucket, owner_stats, y, &dp);
if (ret < 0) {
return ret;
}
- ret = check_quota(dpp, "user", quota.user_quota, user_stats, num_objs, size);
+ ret = check_quota(dpp, "user", quota.user_quota, owner_stats, num_objs, size);
if (ret < 0) {
return ret;
}
@@ -919,9 +954,9 @@ public:
return 0;
}
- void update_stats(const rgw_user& user, rgw_bucket& bucket, int obj_delta, uint64_t added_bytes, uint64_t removed_bytes) override {
- bucket_stats_cache.adjust_stats(user, bucket, obj_delta, added_bytes, removed_bytes);
- user_stats_cache.adjust_stats(user, bucket, obj_delta, added_bytes, removed_bytes);
+ void update_stats(const rgw_owner& owner, rgw_bucket& bucket, int obj_delta, uint64_t added_bytes, uint64_t removed_bytes) override {
+ bucket_stats_cache.adjust_stats(owner, bucket, obj_delta, added_bytes, removed_bytes);
+ owner_stats_cache.adjust_stats(owner, bucket, obj_delta, added_bytes, removed_bytes);
}
void check_bucket_shards(const DoutPrefixProvider *dpp, uint64_t max_objs_per_shard,
diff --git a/src/rgw/rgw_quota.h b/src/rgw/rgw_quota.h
index 632cb48171b..838fb2439a9 100644
--- a/src/rgw/rgw_quota.h
+++ b/src/rgw/rgw_quota.h
@@ -30,15 +30,15 @@ public:
RGWQuotaHandler() {}
virtual ~RGWQuotaHandler() {
}
- virtual int check_quota(const DoutPrefixProvider *dpp, const rgw_user& bucket_owner, rgw_bucket& bucket,
- RGWQuota& quota,
+ virtual int check_quota(const DoutPrefixProvider *dpp, const rgw_owner& bucket_owner,
+ const rgw_bucket& bucket, const RGWQuota& quota,
uint64_t num_objs, uint64_t size, optional_yield y) = 0;
virtual void check_bucket_shards(const DoutPrefixProvider *dpp, uint64_t max_objs_per_shard,
uint64_t num_shards, uint64_t num_objs, bool is_multisite,
bool& need_resharding, uint32_t *suggested_num_shards) = 0;
- virtual void update_stats(const rgw_user& bucket_owner, rgw_bucket& bucket, int obj_delta, uint64_t added_bytes, uint64_t removed_bytes) = 0;
+ virtual void update_stats(const rgw_owner& bucket_owner, rgw_bucket& bucket, int obj_delta, uint64_t added_bytes, uint64_t removed_bytes) = 0;
static RGWQuotaHandler *generate_handler(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, bool quota_threads);
static void free_handler(RGWQuotaHandler *handler);
diff --git a/src/rgw/rgw_rest.cc b/src/rgw/rgw_rest.cc
index a7c35f3773c..3a0652e82e2 100644
--- a/src/rgw/rgw_rest.cc
+++ b/src/rgw/rgw_rest.cc
@@ -498,17 +498,26 @@ void dump_time(req_state *s, const char *name, real_time t)
s->formatter->dump_string(name, buf);
}
-void dump_owner(req_state *s, const rgw_user& id, const string& name,
+void dump_owner(req_state *s, const std::string& id, const string& name,
const char *section)
{
if (!section)
section = "Owner";
s->formatter->open_object_section(section);
- s->formatter->dump_string("ID", id.to_str());
- s->formatter->dump_string("DisplayName", name);
+ s->formatter->dump_string("ID", id);
+ if (!name.empty()) {
+ s->formatter->dump_string("DisplayName", name);
+ }
s->formatter->close_section();
}
+void dump_owner(req_state *s, const rgw_owner& owner, const string& name,
+ const char *section)
+{
+ std::string id = to_string(owner);
+ dump_owner(s, id, name, section);
+}
+
void dump_access_control(req_state *s, const char *origin,
const char *meth,
const char *hdr, const char *exp_hdr,
@@ -581,7 +590,7 @@ void end_header(req_state* s, RGWOp* op, const char *content_type,
dump_trans_id(s);
if ((!s->is_err()) && s->bucket &&
- (s->bucket->get_info().owner != s->user->get_id()) &&
+ (!s->auth.identity->is_owner_of(s->bucket->get_info().owner)) &&
(s->bucket->get_info().requester_pays)) {
dump_header(s, "x-amz-request-charged", "requester");
}
@@ -670,13 +679,13 @@ void abort_early(req_state *s, RGWOp* op, int err_no,
if (op != NULL) {
int new_err_no;
new_err_no = op->error_handler(err_no, &error_content, y);
- ldpp_dout(s, 1) << "op->ERRORHANDLER: err_no=" << err_no
+ ldpp_dout(s, 20) << "op->ERRORHANDLER: err_no=" << err_no
<< " new_err_no=" << new_err_no << dendl;
err_no = new_err_no;
} else if (handler != NULL) {
int new_err_no;
new_err_no = handler->error_handler(err_no, &error_content, y);
- ldpp_dout(s, 1) << "handler->ERRORHANDLER: err_no=" << err_no
+ ldpp_dout(s, 20) << "handler->ERRORHANDLER: err_no=" << err_no
<< " new_err_no=" << new_err_no << dendl;
err_no = new_err_no;
}
@@ -1865,20 +1874,6 @@ static http_op op_from_method(const char *method)
int RGWHandler_REST::init_permissions(RGWOp* op, optional_yield y)
{
if (op->get_type() == RGW_OP_CREATE_BUCKET) {
- // We don't need user policies in case of STS token returned by AssumeRole, hence the check for user type
- if (! s->user->get_id().empty() && s->auth.identity->get_identity_type() != TYPE_ROLE) {
- try {
- if (auto ret = s->user->read_attrs(s, y); ! ret) {
- auto user_policies = get_iam_user_policy_from_attr(s->cct, s->user->get_attrs(), s->user->get_tenant());
- s->iam_user_policies.insert(s->iam_user_policies.end(),
- std::make_move_iterator(user_policies.begin()),
- std::make_move_iterator(user_policies.end()));
-
- }
- } catch (const std::exception& e) {
- ldpp_dout(op, -1) << "Error reading IAM User Policy: " << e.what() << dendl;
- }
- }
rgw_build_iam_environment(driver, s);
return 0;
}
diff --git a/src/rgw/rgw_rest.h b/src/rgw/rgw_rest.h
index 8ee587e7c7b..fae60c50f4d 100644
--- a/src/rgw/rgw_rest.h
+++ b/src/rgw/rgw_rest.h
@@ -705,7 +705,9 @@ extern void end_header(req_state *s,
bool force_no_error = false);
extern void dump_start(req_state *s);
extern void list_all_buckets_start(req_state *s);
-extern void dump_owner(req_state *s, const rgw_user& id,
+extern void dump_owner(req_state *s, const std::string& id,
+ const std::string& name, const char *section = NULL);
+extern void dump_owner(req_state *s, const rgw_owner& id,
const std::string& name, const char *section = NULL);
inline void dump_urlsafe(req_state *s, bool encode_key, const char* key, const std::string& val, bool encode_slash = true) {
if (encode_key) {
diff --git a/src/rgw/rgw_rest_account.cc b/src/rgw/rgw_rest_account.cc
new file mode 100644
index 00000000000..1e1d367c4a7
--- /dev/null
+++ b/src/rgw/rgw_rest_account.cc
@@ -0,0 +1,241 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab ft=cpp
+
+/*
+ * Ceph - scalable distributed file system
+ *
+ * Copyright (C) 2020 SUSE LLC
+ *
+ * This is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License version 2.1, as published by the Free Software
+ * Foundation. See file COPYING.
+ *
+ */
+
+#include "rgw_rest_account.h"
+#include "rgw_account.h"
+#include "rgw_process_env.h"
+
+class RGWOp_Account_Create : public RGWRESTOp {
+public:
+ int check_caps(const RGWUserCaps& caps) override {
+ return caps.check_cap("accounts", RGW_CAP_WRITE);
+ }
+
+ void execute(optional_yield y) override;
+
+ const char* name() const override { return "create_account"; }
+};
+
+void RGWOp_Account_Create::execute(optional_yield y)
+{
+ rgw::account::AdminOpState op_state;
+ RESTArgs::get_string(s, "id", "", &op_state.account_id);
+ RESTArgs::get_string(s, "tenant", "", &op_state.tenant);
+ RESTArgs::get_string(s, "name", "", &op_state.account_name);
+ RESTArgs::get_string(s, "email", "", &op_state.email);
+
+ uint32_t max_users = 0;
+ bool has_max_users = false;
+ RESTArgs::get_uint32(s, "max-users", 0, &max_users, &has_max_users);
+ if (has_max_users) {
+ op_state.max_users = max_users;
+ }
+
+ uint32_t max_roles = 0;
+ bool has_max_roles = false;
+ RESTArgs::get_uint32(s, "max-roles", 0, &max_roles, &has_max_roles);
+ if (has_max_roles) {
+ op_state.max_roles = max_roles;
+ }
+
+ uint32_t max_groups = 0;
+ bool has_max_groups = false;
+ RESTArgs::get_uint32(s, "max-groups", 0, &max_groups, &has_max_groups);
+ if (has_max_groups) {
+ op_state.max_groups = max_groups;
+ }
+
+ uint32_t max_access_keys = 0;
+ bool has_max_access_keys = false;
+ RESTArgs::get_uint32(s, "max-access-keys", 0, &max_access_keys, &has_max_access_keys);
+ if (has_max_access_keys) {
+ op_state.max_access_keys = max_access_keys;
+ }
+
+ uint32_t max_buckets = 0;
+ bool has_max_buckets = false;
+ RESTArgs::get_uint32(s, "max-buckets", 0, &max_buckets, &has_max_buckets);
+ if (has_max_buckets) {
+ op_state.max_buckets = max_buckets;
+ }
+
+ if (!driver->is_meta_master()) {
+ bufferlist data;
+ JSONParser parser;
+ op_ret = rgw_forward_request_to_master(this, *s->penv.site, s->user->get_id(),
+ &data, &parser, s->info, y);
+ if (op_ret < 0) {
+ ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
+ return;
+ }
+
+ // the master zone may have generated its own account id, use the same
+ std::string meta_master_id;
+ JSONDecoder::decode_json("id", meta_master_id, &parser);
+ if (meta_master_id.empty()) {
+ ldpp_dout(this, 4) << "forward_request_to_master returned empty account id" << dendl;
+ op_ret = -EINVAL;
+ return;
+ }
+ op_state.account_id = meta_master_id;
+ }
+
+ op_ret = rgw::account::create(this, driver, op_state,
+ s->err.message, flusher, y);
+ if (op_ret < 0) {
+ if (op_ret == -EEXIST) {
+ op_ret = -ERR_ACCOUNT_EXISTS;
+ }
+ }
+}
+
+class RGWOp_Account_Modify : public RGWRESTOp {
+public:
+ int check_caps(const RGWUserCaps& caps) override {
+ return caps.check_cap("accounts", RGW_CAP_WRITE);
+ }
+
+ void execute(optional_yield y) override;
+
+ const char* name() const override { return "modify_account"; }
+};
+
+void RGWOp_Account_Modify::execute(optional_yield y)
+{
+ bufferlist data;
+ op_ret = rgw_forward_request_to_master(this, *s->penv.site, s->user->get_id(),
+ &data, nullptr, s->info, y);
+ if (op_ret < 0) {
+ ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
+ return;
+ }
+
+ rgw::account::AdminOpState op_state;
+ RESTArgs::get_string(s, "id", "", &op_state.account_id);
+ RESTArgs::get_string(s, "tenant", "", &op_state.tenant);
+ RESTArgs::get_string(s, "name", "", &op_state.account_name);
+ RESTArgs::get_string(s, "email", "", &op_state.email);
+
+ uint32_t max_users = 0;
+ bool has_max_users = false;
+ RESTArgs::get_uint32(s, "max-users", 0, &max_users, &has_max_users);
+ if (has_max_users) {
+ op_state.max_users = max_users;
+ }
+
+ uint32_t max_roles = 0;
+ bool has_max_roles = false;
+ RESTArgs::get_uint32(s, "max-roles", 0, &max_roles, &has_max_roles);
+ if (has_max_roles) {
+ op_state.max_roles = max_roles;
+ }
+
+ uint32_t max_groups = 0;
+ bool has_max_groups = false;
+ RESTArgs::get_uint32(s, "max-groups", 0, &max_groups, &has_max_groups);
+ if (has_max_groups) {
+ op_state.max_groups = max_groups;
+ }
+
+ uint32_t max_access_keys = 0;
+ bool has_max_access_keys = false;
+ RESTArgs::get_uint32(s, "max-access-keys", 0, &max_access_keys, &has_max_access_keys);
+ if (has_max_access_keys) {
+ op_state.max_access_keys = max_access_keys;
+ }
+
+ uint32_t max_buckets = 0;
+ bool has_max_buckets = false;
+ RESTArgs::get_uint32(s, "max-buckets", 0, &max_buckets, &has_max_buckets);
+ if (has_max_buckets) {
+ op_state.max_buckets = max_buckets;
+ }
+
+ op_ret = rgw::account::modify(this, driver, op_state,
+ s->err.message, flusher, y);
+}
+
+
+class RGWOp_Account_Get : public RGWRESTOp {
+public:
+ int check_caps(const RGWUserCaps& caps) override {
+ return caps.check_cap("account", RGW_CAP_READ);
+ }
+
+ void execute(optional_yield y) override;
+
+ const char* name() const override { return "get_account"; }
+};
+
+void RGWOp_Account_Get::execute(optional_yield y)
+{
+ rgw::account::AdminOpState op_state;
+ RESTArgs::get_string(s, "id", "", &op_state.account_id);
+ RESTArgs::get_string(s, "tenant", "", &op_state.tenant);
+ RESTArgs::get_string(s, "name", "", &op_state.account_name);
+
+ op_ret = rgw::account::info(this, driver, op_state,
+ s->err.message, flusher, y);
+}
+
+class RGWOp_Account_Delete : public RGWRESTOp {
+public:
+ int check_caps(const RGWUserCaps& caps) override {
+ return caps.check_cap("account", RGW_CAP_WRITE);
+ }
+
+ void execute(optional_yield y) override;
+
+ const char* name() const override { return "delete_account"; }
+};
+
+void RGWOp_Account_Delete::execute(optional_yield y)
+{
+ bufferlist data;
+ op_ret = rgw_forward_request_to_master(this, *s->penv.site, s->user->get_id(),
+ &data, nullptr, s->info, y);
+ if (op_ret < 0) {
+ ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
+ return;
+ }
+
+ rgw::account::AdminOpState op_state;
+ RESTArgs::get_string(s, "id", "", &op_state.account_id);
+ RESTArgs::get_string(s, "tenant", "", &op_state.tenant);
+ RESTArgs::get_string(s, "name", "", &op_state.account_name);
+
+ op_ret = rgw::account::remove(this, driver, op_state,
+ s->err.message, flusher, y);
+}
+
+RGWOp* RGWHandler_Account::op_post()
+{
+ return new RGWOp_Account_Create;
+}
+
+RGWOp* RGWHandler_Account::op_put()
+{
+ return new RGWOp_Account_Modify;
+}
+
+RGWOp* RGWHandler_Account::op_get()
+{
+ return new RGWOp_Account_Get;
+}
+
+RGWOp* RGWHandler_Account::op_delete()
+{
+ return new RGWOp_Account_Delete;
+}
diff --git a/src/rgw/rgw_rest_account.h b/src/rgw/rgw_rest_account.h
new file mode 100644
index 00000000000..2df07a0efc3
--- /dev/null
+++ b/src/rgw/rgw_rest_account.h
@@ -0,0 +1,46 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab ft=cpp
+
+/*
+ * Ceph - scalable distributed file system
+ *
+ * Copyright (C) 2020 SUSE LLC
+ *
+ * This is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License version 2.1, as published by the Free Software
+ * Foundation. See file COPYING.
+ *
+ */
+
+#pragma once
+
+#include "rgw_rest.h"
+#include "rgw_rest_s3.h"
+
+class RGWHandler_Account : public RGWHandler_Auth_S3 {
+ protected:
+ RGWOp *op_get() override;
+ RGWOp *op_put() override;
+ RGWOp *op_post() override;
+ RGWOp *op_delete() override;
+ public:
+ using RGWHandler_Auth_S3::RGWHandler_Auth_S3;
+ ~RGWHandler_Account() override = default;
+
+ int read_permissions(RGWOp*, optional_yield y) override {
+ return 0;
+ }
+};
+
+class RGWRESTMgr_Account : public RGWRESTMgr {
+ public:
+ RGWRESTMgr_Account() = default;
+ ~RGWRESTMgr_Account() override = default;
+
+ RGWHandler_REST *get_handler(rgw::sal::Driver* driver, struct req_state*,
+ const rgw::auth::StrategyRegistry& auth_registry,
+ const std::string&) override {
+ return new RGWHandler_Account(auth_registry);
+ }
+};
diff --git a/src/rgw/rgw_rest_client.cc b/src/rgw/rgw_rest_client.cc
index 164fcf59e8b..941856e6006 100644
--- a/src/rgw/rgw_rest_client.cc
+++ b/src/rgw/rgw_rest_client.cc
@@ -506,7 +506,7 @@ static void grants_by_type_add_one_grant(map<int, string>& grants_by_type, int p
s.append(", ");
if (const auto user = grant.get_user(); user) {
- s.append("id=\"" + user->id.to_str() + "\"");
+ s.append("id=\"" + to_string(user->id) + "\"");
} else if (const auto email = grant.get_email(); email) {
s.append("emailAddress=\"" + email->address + "\"");
} else if (const auto group = grant.get_group(); group) {
diff --git a/src/rgw/rgw_rest_conn.cc b/src/rgw/rgw_rest_conn.cc
index 2c1ff1a0a3f..39d171dfb52 100644
--- a/src/rgw/rgw_rest_conn.cc
+++ b/src/rgw/rgw_rest_conn.cc
@@ -147,13 +147,13 @@ void RGWRESTConn::set_url_unconnectable(const std::string& endpoint)
ldout(cct, 10) << "set endpoint unconnectable. url=" << endpoint << dendl;
}
-void RGWRESTConn::populate_params(param_vec_t& params, const rgw_user *uid, const string& zonegroup)
+void RGWRESTConn::populate_params(param_vec_t& params, const rgw_owner* uid, const string& zonegroup)
{
populate_uid(params, uid);
populate_zonegroup(params, zonegroup);
}
-int RGWRESTConn::forward(const DoutPrefixProvider *dpp, const rgw_user& uid, const req_info& info, obj_version *objv, size_t max_response, bufferlist *inbl, bufferlist *outbl, optional_yield y)
+int RGWRESTConn::forward(const DoutPrefixProvider *dpp, const rgw_owner& uid, const req_info& info, obj_version *objv, size_t max_response, bufferlist *inbl, bufferlist *outbl, optional_yield y)
{
int ret = 0;
@@ -225,9 +225,8 @@ int RGWRESTConn::put_obj_send_init(const rgw_obj& obj, const rgw_http_param_pair
if (ret < 0)
return ret;
- rgw_user uid;
param_vec_t params;
- populate_params(params, &uid, self_zone_group);
+ populate_params(params, nullptr, self_zone_group);
if (extra_params) {
append_param_list(params, extra_params);
@@ -240,7 +239,7 @@ int RGWRESTConn::put_obj_send_init(const rgw_obj& obj, const rgw_http_param_pair
return 0;
}
-int RGWRESTConn::put_obj_async_init(const DoutPrefixProvider *dpp, const rgw_user& uid, const rgw_obj& obj,
+int RGWRESTConn::put_obj_async_init(const DoutPrefixProvider *dpp, const rgw_owner& uid, const rgw_obj& obj,
map<string, bufferlist>& attrs,
RGWRESTStreamS3PutObj **req)
{
@@ -296,7 +295,7 @@ static void set_header(T val, map<string, string>& headers, const string& header
}
-int RGWRESTConn::get_obj(const DoutPrefixProvider *dpp, const rgw_user& uid, req_info *info /* optional */, const rgw_obj& obj,
+int RGWRESTConn::get_obj(const DoutPrefixProvider *dpp, const rgw_owner& uid, req_info *info /* optional */, const rgw_obj& obj,
const real_time *mod_ptr, const real_time *unmod_ptr,
uint32_t mod_zone_id, uint64_t mod_pg_ver,
bool prepend_metadata, bool get_op, bool rgwx_stat,
diff --git a/src/rgw/rgw_rest_conn.h b/src/rgw/rgw_rest_conn.h
index 07ca83212ca..b2d1affb640 100644
--- a/src/rgw/rgw_rest_conn.h
+++ b/src/rgw/rgw_rest_conn.h
@@ -127,10 +127,10 @@ public:
}
size_t get_endpoint_count() const { return endpoints.size(); }
- virtual void populate_params(param_vec_t& params, const rgw_user *uid, const std::string& zonegroup);
+ virtual void populate_params(param_vec_t& params, const rgw_owner* uid, const std::string& zonegroup);
/* sync request */
- int forward(const DoutPrefixProvider *dpp, const rgw_user& uid, const req_info& info, obj_version *objv, size_t max_response, bufferlist *inbl, bufferlist *outbl, optional_yield y);
+ int forward(const DoutPrefixProvider *dpp, const rgw_owner& uid, const req_info& info, obj_version *objv, size_t max_response, bufferlist *inbl, bufferlist *outbl, optional_yield y);
/* sync request */
int forward_iam_request(const DoutPrefixProvider *dpp, const req_info& info, obj_version *objv, size_t max_response, bufferlist *inbl, bufferlist *outbl, optional_yield y);
@@ -138,13 +138,13 @@ public:
/* async requests */
int put_obj_send_init(const rgw_obj& obj, const rgw_http_param_pair *extra_params, RGWRESTStreamS3PutObj **req);
- int put_obj_async_init(const DoutPrefixProvider *dpp, const rgw_user& uid, const rgw_obj& obj,
+ int put_obj_async_init(const DoutPrefixProvider *dpp, const rgw_owner& uid, const rgw_obj& obj,
std::map<std::string, bufferlist>& attrs, RGWRESTStreamS3PutObj **req);
int complete_request(RGWRESTStreamS3PutObj *req, std::string& etag,
ceph::real_time *mtime, optional_yield y);
struct get_obj_params {
- rgw_user uid;
+ rgw_owner uid;
req_info *info{nullptr};
const ceph::real_time *mod_ptr{nullptr};
const ceph::real_time *unmod_ptr{nullptr};
@@ -172,7 +172,7 @@ public:
int get_obj(const DoutPrefixProvider *dpp, const rgw_obj& obj, const get_obj_params& params, bool send, RGWRESTStreamRWRequest **req);
- int get_obj(const DoutPrefixProvider *dpp, const rgw_user& uid, req_info *info /* optional */, const rgw_obj& obj,
+ int get_obj(const DoutPrefixProvider *dpp, const rgw_owner& uid, req_info *info /* optional */, const rgw_obj& obj,
const ceph::real_time *mod_ptr, const ceph::real_time *unmod_ptr,
uint32_t mod_zone_id, uint64_t mod_pg_ver,
bool prepend_metadata, bool get_op, bool rgwx_stat, bool sync_manifest,
@@ -221,12 +221,9 @@ private:
params.push_back(param_pair_t(RGW_SYS_PARAM_PREFIX "zonegroup", zonegroup));
}
}
- void populate_uid(param_vec_t& params, const rgw_user *uid) {
+ void populate_uid(param_vec_t& params, const rgw_owner* uid) {
if (uid) {
- std::string uid_str = uid->to_str();
- if (!uid->empty()){
- params.push_back(param_pair_t(RGW_SYS_PARAM_PREFIX "uid", uid_str));
- }
+ params.emplace_back(RGW_SYS_PARAM_PREFIX "uid", to_string(*uid));
}
}
};
@@ -241,7 +238,8 @@ public:
RGWRESTConn(_cct, _remote_id, endpoints, _cred, _zone_group, _api_name, _host_style) {}
~S3RESTConn() override = default;
- void populate_params(param_vec_t& params, const rgw_user *uid, const std::string& zonegroup) override {
+ void populate_params(param_vec_t& params, const rgw_owner* uid,
+ const std::string& zonegroup) override {
// do not populate any params in S3 REST Connection.
return;
}
diff --git a/src/rgw/rgw_rest_iam.cc b/src/rgw/rgw_rest_iam.cc
index b9e8779c10a..c6f5a584c70 100644
--- a/src/rgw/rgw_rest_iam.cc
+++ b/src/rgw/rgw_rest_iam.cc
@@ -1,6 +1,7 @@
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
+#include <regex>
#include <boost/tokenizer.hpp>
#include "rgw_auth_s3.h"
@@ -9,6 +10,10 @@
#include "rgw_rest_role.h"
#include "rgw_rest_user_policy.h"
#include "rgw_rest_oidc_provider.h"
+#include "rgw_rest_iam_group.h"
+#include "rgw_rest_iam_user.h"
+#include "rgw_rest_conn.h"
+#include "driver/rados/rgw_zone.h"
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_rgw
@@ -26,10 +31,16 @@ static const std::unordered_map<std::string_view, op_generator> op_generators =
{"GetRolePolicy", [](const bufferlist& bl_post_body) -> RGWOp* {return new RGWGetRolePolicy;}},
{"ListRolePolicies", [](const bufferlist& bl_post_body) -> RGWOp* {return new RGWListRolePolicies;}},
{"DeleteRolePolicy", [](const bufferlist& bl_post_body) -> RGWOp* {return new RGWDeleteRolePolicy(bl_post_body);}},
- {"PutUserPolicy", [](const bufferlist& bl_post_body) -> RGWOp* {return new RGWPutUserPolicy;}},
+ {"AttachRolePolicy", make_iam_attach_role_policy_op},
+ {"DetachRolePolicy", make_iam_detach_role_policy_op},
+ {"ListAttachedRolePolicies", make_iam_list_attached_role_policies_op},
+ {"PutUserPolicy", [](const bufferlist& bl_post_body) -> RGWOp* {return new RGWPutUserPolicy(bl_post_body);}},
{"GetUserPolicy", [](const bufferlist& bl_post_body) -> RGWOp* {return new RGWGetUserPolicy;}},
{"ListUserPolicies", [](const bufferlist& bl_post_body) -> RGWOp* {return new RGWListUserPolicies;}},
- {"DeleteUserPolicy", [](const bufferlist& bl_post_body) -> RGWOp* {return new RGWDeleteUserPolicy;}},
+ {"DeleteUserPolicy", [](const bufferlist& bl_post_body) -> RGWOp* {return new RGWDeleteUserPolicy(bl_post_body);}},
+ {"AttachUserPolicy", make_iam_attach_user_policy_op},
+ {"DetachUserPolicy", make_iam_detach_user_policy_op},
+ {"ListAttachedUserPolicies", make_iam_list_attached_user_policies_op},
{"CreateOpenIDConnectProvider", [](const bufferlist& bl_post_body) -> RGWOp* {return new RGWCreateOIDCProvider;}},
{"ListOpenIDConnectProviders", [](const bufferlist& bl_post_body) -> RGWOp* {return new RGWListOIDCProviders;}},
{"GetOpenIDConnectProvider", [](const bufferlist& bl_post_body) -> RGWOp* {return new RGWGetOIDCProvider;}},
@@ -37,7 +48,31 @@ static const std::unordered_map<std::string_view, op_generator> op_generators =
{"TagRole", [](const bufferlist& bl_post_body) -> RGWOp* {return new RGWTagRole(bl_post_body);}},
{"ListRoleTags", [](const bufferlist& bl_post_body) -> RGWOp* {return new RGWListRoleTags;}},
{"UntagRole", [](const bufferlist& bl_post_body) -> RGWOp* {return new RGWUntagRole(bl_post_body);}},
- {"UpdateRole", [](const bufferlist& bl_post_body) -> RGWOp* {return new RGWUpdateRole(bl_post_body);}}
+ {"UpdateRole", [](const bufferlist& bl_post_body) -> RGWOp* {return new RGWUpdateRole(bl_post_body);}},
+ {"CreateUser", make_iam_create_user_op},
+ {"GetUser", make_iam_get_user_op},
+ {"UpdateUser", make_iam_update_user_op},
+ {"DeleteUser", make_iam_delete_user_op},
+ {"ListUsers", make_iam_list_users_op},
+ {"CreateAccessKey", make_iam_create_access_key_op},
+ {"UpdateAccessKey", make_iam_update_access_key_op},
+ {"DeleteAccessKey", make_iam_delete_access_key_op},
+ {"ListAccessKeys", make_iam_list_access_keys_op},
+ {"CreateGroup", make_iam_create_group_op},
+ {"GetGroup", make_iam_get_group_op},
+ {"UpdateGroup", make_iam_update_group_op},
+ {"DeleteGroup", make_iam_delete_group_op},
+ {"ListGroups", make_iam_list_groups_op},
+ {"AddUserToGroup", make_iam_add_user_to_group_op},
+ {"RemoveUserFromGroup", make_iam_remove_user_from_group_op},
+ {"ListGroupsForUser", make_iam_list_groups_for_user_op},
+ {"PutGroupPolicy", make_iam_put_group_policy_op},
+ {"GetGroupPolicy", make_iam_get_group_policy_op},
+ {"ListGroupPolicies", make_iam_list_group_policies_op},
+ {"DeleteGroupPolicy", make_iam_delete_group_policy_op},
+ {"AttachGroupPolicy", make_iam_attach_group_policy_op},
+ {"DetachGroupPolicy", make_iam_detach_group_policy_op},
+ {"ListAttachedGroupPolicies", make_iam_list_attached_group_policies_op},
};
bool RGWHandler_REST_IAM::action_exists(const req_state* s)
@@ -88,3 +123,196 @@ RGWRESTMgr_IAM::get_handler(rgw::sal::Driver* driver,
bufferlist bl;
return new RGWHandler_REST_IAM(auth_registry, bl);
}
+
+static constexpr size_t MAX_POLICY_NAME_LEN = 128;
+
+bool validate_iam_policy_name(const std::string& name, std::string& err)
+{
+ if (name.empty()) {
+ err = "Missing required element PolicyName";
+ return false;
+ }
+
+ if (name.size() > MAX_POLICY_NAME_LEN) {
+ err = "PolicyName too long";
+ return false;
+ }
+
+ std::regex regex_policy_name("[A-Za-z0-9:=,.@-]+");
+ if (! std::regex_match(name, regex_policy_name)) {
+ err = "PolicyName contains invalid characters";
+ return false;
+ }
+
+ return true;
+}
+
+bool validate_iam_policy_arn(const std::string& arn, std::string& err)
+{
+ if (arn.empty()) {
+ err = "Missing required element PolicyArn";
+ return false;
+ }
+
+ if (arn.size() > 2048) {
+ err = "PolicyArn must be at most 2048 characters long";
+ return false;
+ }
+
+ if (arn.size() < 20) {
+ err = "PolicyArn must be at least 20 characters long";
+ return false;
+ }
+
+ return true;
+}
+
+static constexpr size_t MAX_USER_NAME_LEN = 64;
+
+bool validate_iam_user_name(const std::string& name, std::string& err)
+{
+ if (name.empty()) {
+ err = "Missing required element UserName";
+ return false;
+ }
+ if (name.size() > MAX_USER_NAME_LEN) {
+ err = "UserName too long";
+ return false;
+ }
+ const std::regex pattern("[\\w+=,.@-]+");
+ if (!std::regex_match(name, pattern)) {
+ err = "UserName contains invalid characters";
+ return false;
+ }
+ return true;
+}
+
+bool validate_iam_role_name(const std::string& name, std::string& err)
+{
+ if (name.empty()) {
+ err = "Missing required element RoleName";
+ return false;
+ }
+ if (name.size() > rgw::sal::RGWRole::MAX_ROLE_NAME_LEN) {
+ err = "RoleName too long";
+ return false;
+ }
+ const std::regex pattern("[\\w+=,.@-]+");
+ if (!std::regex_match(name, pattern)) {
+ err = "RoleName contains invalid characters";
+ return false;
+ }
+ return true;
+}
+
+static constexpr size_t MAX_GROUP_NAME_LEN = 128;
+
+bool validate_iam_group_name(const std::string& name, std::string& err)
+{
+ if (name.empty()) {
+ err = "Missing required element GroupName";
+ return false;
+ }
+ if (name.size() > MAX_GROUP_NAME_LEN) {
+ err = "GroupName too long";
+ return false;
+ }
+ const std::regex pattern("[\\w+=,.@-]+");
+ if (!std::regex_match(name, pattern)) {
+ err = "GroupName contains invalid characters";
+ return false;
+ }
+ return true;
+}
+
+static constexpr size_t MAX_PATH_LEN = 512;
+
+bool validate_iam_path(const std::string& path, std::string& err)
+{
+ if (path.size() > MAX_PATH_LEN) {
+ err = "Path too long";
+ return false;
+ }
+ const std::regex pattern("(/[!-~]+/)|(/)");
+ if (!std::regex_match(path, pattern)) {
+ err = "Path contains invalid characters";
+ return false;
+ }
+ return true;
+}
+
+std::string iam_user_arn(const RGWUserInfo& info)
+{
+ if (info.type == TYPE_ROOT) {
+ return fmt::format("arn:aws:iam::{}:root", info.account_id);
+ }
+ std::string_view acct = !info.account_id.empty()
+ ? info.account_id : info.user_id.tenant;
+ std::string_view path = info.path;
+ if (path.empty()) {
+ path = "/";
+ }
+ return fmt::format("arn:aws:iam::{}:user{}{}",
+ acct, path, info.display_name);
+}
+
+std::string iam_group_arn(const RGWGroupInfo& info)
+{
+ std::string_view path = info.path;
+ if (path.empty()) {
+ path = "/";
+ }
+ return fmt::format("arn:aws:iam::{}:group{}{}",
+ info.account_id, path, info.name);
+}
+
+int forward_iam_request_to_master(const DoutPrefixProvider* dpp,
+ const rgw::SiteConfig& site,
+ const RGWUserInfo& user,
+ bufferlist& indata,
+ RGWXMLDecoder::XMLParser& parser,
+ req_info& req, optional_yield y)
+{
+ const auto& period = site.get_period();
+ if (!period) {
+ return 0; // not multisite
+ }
+ if (site.is_meta_master()) {
+ return 0; // don't need to forward metadata requests
+ }
+ const auto& pmap = period->period_map;
+ auto zg = pmap.zonegroups.find(pmap.master_zonegroup);
+ if (zg == pmap.zonegroups.end()) {
+ return -EINVAL;
+ }
+ auto z = zg->second.zones.find(zg->second.master_zone);
+ if (z == zg->second.zones.end()) {
+ return -EINVAL;
+ }
+
+ RGWAccessKey creds;
+ if (auto i = user.access_keys.begin(); i != user.access_keys.end()) {
+ creds.id = i->first;
+ creds.key = i->second.key;
+ }
+
+ // use the master zone's endpoints
+ auto conn = RGWRESTConn{dpp->get_cct(), z->second.id, z->second.endpoints,
+ std::move(creds), zg->second.id, zg->second.api_name};
+ bufferlist outdata;
+ constexpr size_t max_response_size = 128 * 1024; // we expect a very small response
+ int ret = conn.forward_iam_request(dpp, req, nullptr, max_response_size,
+ &indata, &outdata, y);
+ if (ret < 0) {
+ return ret;
+ }
+
+ std::string r = rgw_bl_str(outdata);
+ boost::replace_all(r, "&quot;", "\"");
+
+ if (!parser.parse(r.c_str(), r.length(), 1)) {
+ ldpp_dout(dpp, 0) << "ERROR: failed to parse response from master zonegroup" << dendl;
+ return -EIO;
+ }
+ return 0;
+}
diff --git a/src/rgw/rgw_rest_iam.h b/src/rgw/rgw_rest_iam.h
index 3e579ab35ce..00f6ff7dfc4 100644
--- a/src/rgw/rgw_rest_iam.h
+++ b/src/rgw/rgw_rest_iam.h
@@ -3,9 +3,93 @@
#pragma once
+#include <concepts>
+
#include "rgw_auth.h"
#include "rgw_auth_filters.h"
#include "rgw_rest.h"
+#include "rgw_role.h"
+#include "rgw_sal.h"
+#include "rgw_xml.h"
+
+
+class DoutPrefixProvider;
+namespace rgw { class SiteConfig; }
+struct RGWUserInfo;
+struct RGWGroupInfo;
+
+bool validate_iam_policy_name(const std::string& name, std::string& err);
+bool validate_iam_policy_arn(const std::string& arn, std::string& err);
+bool validate_iam_user_name(const std::string& name, std::string& err);
+bool validate_iam_role_name(const std::string& name, std::string& err);
+bool validate_iam_group_name(const std::string& name, std::string& err);
+bool validate_iam_path(const std::string& path, std::string& err);
+
+std::string iam_user_arn(const RGWUserInfo& info);
+std::string iam_group_arn(const RGWGroupInfo& info);
+
+int forward_iam_request_to_master(const DoutPrefixProvider* dpp,
+ const rgw::SiteConfig& site,
+ const RGWUserInfo& user,
+ bufferlist& indata,
+ RGWXMLDecoder::XMLParser& parser,
+ req_info& req, optional_yield y);
+
+/// Perform an atomic read-modify-write operation on the given user metadata.
+/// Racing writes are detected here as ECANCELED errors, where we reload the
+/// updated user metadata and retry the operation.
+template <std::invocable<> F>
+int retry_raced_user_write(const DoutPrefixProvider* dpp, optional_yield y,
+ rgw::sal::User* u, const F& f)
+{
+ int r = f();
+ for (int i = 0; i < 10 && r == -ECANCELED; ++i) {
+ u->get_version_tracker().clear();
+ r = u->load_user(dpp, y);
+ if (r >= 0) {
+ r = f();
+ }
+ }
+ return r;
+}
+
+/// Perform an atomic read-modify-write operation on the given group metadata.
+/// Racing writes are detected here as ECANCELED errors, where we reload the
+/// updated group metadata and retry the operation.
+template <std::invocable<> F>
+int retry_raced_group_write(const DoutPrefixProvider* dpp, optional_yield y,
+ rgw::sal::Driver* driver, RGWGroupInfo& info,
+ rgw::sal::Attrs& attrs, RGWObjVersionTracker& objv,
+ const F& f)
+{
+ int r = f();
+ for (int i = 0; i < 10 && r == -ECANCELED; ++i) {
+ objv.clear();
+ r = driver->load_group_by_id(dpp, y, info.id, info, attrs, objv);
+ if (r >= 0) {
+ r = f();
+ }
+ }
+ return r;
+}
+
+/// Perform an atomic read-modify-write operation on the given role metadata.
+/// Racing writes are detected here as ECANCELED errors, where we reload the
+/// updated group metadata and retry the operation.
+template <std::invocable<> F>
+int retry_raced_role_write(const DoutPrefixProvider* dpp, optional_yield y,
+ rgw::sal::RGWRole* role, const F& f)
+{
+ int r = f();
+ for (int i = 0; i < 10 && r == -ECANCELED; ++i) {
+ role->get_objv_tracker().clear();
+ r = role->get_by_id(dpp, y);
+ if (r >= 0) {
+ r = f();
+ }
+ }
+ return r;
+}
class RGWHandler_REST_IAM : public RGWHandler_REST {
const rgw::auth::StrategyRegistry& auth_registry;
diff --git a/src/rgw/rgw_rest_iam_group.cc b/src/rgw/rgw_rest_iam_group.cc
new file mode 100644
index 00000000000..0fbe469ee07
--- /dev/null
+++ b/src/rgw/rgw_rest_iam_group.cc
@@ -0,0 +1,2122 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab ft=cpp
+
+/*
+ * Ceph - scalable distributed file system
+ *
+ * Copyright contributors to the Ceph project
+ *
+ * This is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License version 2.1, as published by the Free Software
+ * Foundation. See file COPYING.
+ *
+ */
+
+#include "rgw_rest_iam_group.h"
+
+#include <utility>
+#include "include/buffer.h"
+#include "common/errno.h"
+#include "rgw_arn.h"
+#include "rgw_common.h"
+#include "rgw_iam_managed_policy.h"
+#include "rgw_op.h"
+#include "rgw_process_env.h"
+#include "rgw_rest.h"
+#include "rgw_rest_iam.h"
+
+
+static std::string make_resource_name(const RGWGroupInfo& info)
+{
+ std::string_view path = info.path;
+ if (path.empty()) {
+ path = "/";
+ }
+ return string_cat_reserve(path, info.name);
+}
+
+static void dump_iam_group(const RGWGroupInfo& info, Formatter* f)
+{
+ encode_json("Path", info.path, f);
+ encode_json("GroupName", info.name, f);
+ encode_json("GroupId", info.id, f);
+ encode_json("Arn", iam_group_arn(info), f);
+}
+
+static void dump_iam_user(const RGWUserInfo& info, Formatter* f)
+{
+ encode_json("Path", info.path, f);
+ encode_json("UserName", info.display_name, f);
+ encode_json("UserId", info.user_id, f);
+ encode_json("Arn", iam_user_arn(info), f);
+}
+
+
+// CreateGroup
+class RGWCreateGroup_IAM : public RGWOp {
+ bufferlist post_body;
+ RGWGroupInfo info;
+
+ int forward_to_master(optional_yield y, const rgw::SiteConfig& site, std::string& uid);
+ public:
+ explicit RGWCreateGroup_IAM(const ceph::bufferlist& post_body)
+ : post_body(post_body) {}
+
+ int init_processing(optional_yield y) override;
+ int verify_permission(optional_yield y) override;
+ void execute(optional_yield y) override;
+ void send_response() override;
+
+ const char* name() const override { return "create_group"; }
+ RGWOpType get_type() override { return RGW_OP_CREATE_GROUP; }
+};
+
+int RGWCreateGroup_IAM::init_processing(optional_yield y)
+{
+ // use account id from authenticated user/role. with AssumeRole, this may not
+ // match the account of s->user
+ if (const auto& account = s->auth.identity->get_account(); account) {
+ info.account_id = account->id;
+ } else {
+ return -ERR_METHOD_NOT_ALLOWED;
+ }
+
+ info.path = s->info.args.get("Path");
+ if (info.path.empty()) {
+ info.path = "/";
+ } else if (!validate_iam_path(info.path, s->err.message)) {
+ return -EINVAL;
+ }
+
+ info.name = s->info.args.get("GroupName");
+ if (!validate_iam_group_name(info.name, s->err.message)) {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int RGWCreateGroup_IAM::verify_permission(optional_yield y)
+{
+ const std::string resource_name = make_resource_name(info);
+ const rgw::ARN arn{resource_name, "group", info.account_id, true};
+ if (verify_user_permission(this, s, arn, rgw::IAM::iamCreateGroup, true)) {
+ return 0;
+ }
+ return -EACCES;
+}
+
+int RGWCreateGroup_IAM::forward_to_master(optional_yield y,
+ const rgw::SiteConfig& site,
+ std::string& id)
+{
+ RGWXMLDecoder::XMLParser parser;
+ if (!parser.init()) {
+ ldpp_dout(this, 0) << "ERROR: failed to initialize xml parser" << dendl;
+ return -EINVAL;
+ }
+
+ s->info.args.remove("GroupName");
+ s->info.args.remove("Path");
+ s->info.args.remove("Action");
+ s->info.args.remove("Version");
+
+ int r = forward_iam_request_to_master(this, site, s->user->get_info(),
+ post_body, parser, s->info, y);
+ if (r < 0) {
+ ldpp_dout(this, 20) << "ERROR: forward_iam_request_to_master failed with error code: " << r << dendl;
+ return r;
+ }
+
+ XMLObj* response = parser.find_first("CreateGroupResponse");;
+ if (!response) {
+ ldpp_dout(this, 5) << "ERROR: unexpected xml: CreateGroupResponse" << dendl;
+ return -EINVAL;
+ }
+
+ XMLObj* result = response->find_first("CreateGroupResult");
+ if (!result) {
+ ldpp_dout(this, 5) << "ERROR: unexpected xml: CreateGroupResult" << dendl;
+ return -EINVAL;
+ }
+
+ XMLObj* group = result->find_first("Group");
+ if (!group) {
+ ldpp_dout(this, 5) << "ERROR: unexpected xml: Group" << dendl;
+ return -EINVAL;
+ }
+
+ try {
+ RGWXMLDecoder::decode_xml("GroupId", id, group, true);
+ } catch (RGWXMLDecoder::err& err) {
+ ldpp_dout(this, 5) << "ERROR: unexpected xml: GroupId" << dendl;
+ return -EINVAL;
+ }
+
+ ldpp_dout(this, 4) << "group id decoded from forwarded response is " << id << dendl;
+ return 0;
+}
+
+void RGWCreateGroup_IAM::execute(optional_yield y)
+{
+ {
+ // check the current group count against account limit
+ RGWAccountInfo account;
+ rgw::sal::Attrs attrs; // unused
+ RGWObjVersionTracker objv; // unused
+ op_ret = driver->load_account_by_id(this, y, info.account_id,
+ account, attrs, objv);
+ if (op_ret < 0) {
+ ldpp_dout(this, 4) << "failed to load iam account "
+ << info.account_id << ": " << cpp_strerror(op_ret) << dendl;
+ }
+
+ if (account.max_groups >= 0) { // max_groups < 0 means unlimited
+ uint32_t count = 0;
+ op_ret = driver->count_account_groups(this, y, info.account_id, count);
+ if (op_ret < 0) {
+ ldpp_dout(this, 4) << "failed to count groups for iam account "
+ << info.account_id << ": " << cpp_strerror(op_ret) << dendl;
+ return;
+ }
+ if (std::cmp_greater_equal(count, account.max_groups)) {
+ s->err.message = fmt::format("Group limit {} exceeded",
+ account.max_groups);
+ op_ret = -ERR_LIMIT_EXCEEDED;
+ return;
+ }
+ }
+ }
+
+ // generate group id. forward_to_master() may overwrite this
+ uuid_d uuid;
+ uuid.generate_random();
+ info.id = uuid.to_string();
+ info.tenant = s->auth.identity->get_tenant();
+
+ const rgw::SiteConfig& site = *s->penv.site;
+ if (!site.is_meta_master()) {
+ op_ret = forward_to_master(y, site, info.id);
+ if (op_ret) {
+ return;
+ }
+ }
+
+ rgw::sal::Attrs attrs;
+ RGWObjVersionTracker objv;
+ objv.generate_new_write_ver(get_cct());
+ constexpr bool exclusive = true;
+ op_ret = driver->store_group(this, y, info, attrs, objv, exclusive, nullptr);
+}
+
+void RGWCreateGroup_IAM::send_response()
+{
+ if (!op_ret) {
+ dump_start(s); // <?xml block ?>
+ Formatter* f = s->formatter;
+ Formatter::ObjectSection response{*f, "CreateGroupResponse", RGW_REST_IAM_XMLNS};
+ {
+ Formatter::ObjectSection result{*f, "CreateGroupResult"};
+ Formatter::ObjectSection group{*f, "Group"};
+ dump_iam_group(info, f);
+ // /Group
+ // /CreateGroupResult
+ }
+ Formatter::ObjectSection metadata{*f, "ResponseMetadata"};
+ f->dump_string("RequestId", s->trans_id);
+ // /ResponseMetadata
+ // /CreateGroupResponse
+ }
+
+ set_req_state_err(s, op_ret);
+ dump_errno(s);
+ end_header(s, this);
+}
+
+
+// GetGroup
+class RGWGetGroup_IAM : public RGWOp {
+ rgw_account_id account_id;
+ RGWGroupInfo info;
+ std::string marker;
+ int max_items = 100;
+ rgw::sal::UserList listing;
+ public:
+ int init_processing(optional_yield y) override;
+ int verify_permission(optional_yield y) override;
+ void execute(optional_yield y) override;
+ void send_response() override;
+
+ const char* name() const override { return "get_group"; }
+ RGWOpType get_type() override { return RGW_OP_GET_GROUP; }
+};
+
+int RGWGetGroup_IAM::init_processing(optional_yield y)
+{
+ if (const auto& account = s->auth.identity->get_account(); account) {
+ account_id = account->id;
+ } else {
+ return -ERR_METHOD_NOT_ALLOWED;
+ }
+
+ const std::string name = s->info.args.get("GroupName");
+ if (!validate_iam_group_name(name, s->err.message)) {
+ return -EINVAL;
+ }
+
+ marker = s->info.args.get("Marker");
+
+ int r = s->info.args.get_int("MaxItems", &max_items, max_items);
+ if (r < 0 || max_items > 1000) {
+ s->err.message = "Invalid value for MaxItems";
+ return -EINVAL;
+ }
+
+ rgw::sal::Attrs attrs_ignored;
+ RGWObjVersionTracker objv_ignored;
+ r = driver->load_group_by_name(this, y, account_id, name, info,
+ attrs_ignored, objv_ignored);
+ if (r == -ENOENT) {
+ s->err.message = "No such GroupName in the account";
+ return -ERR_NO_SUCH_ENTITY;
+ }
+ return r;
+}
+
+int RGWGetGroup_IAM::verify_permission(optional_yield y)
+{
+ const std::string resource_name = make_resource_name(info);
+ const rgw::ARN arn{resource_name, "group", account_id, true};
+ if (verify_user_permission(this, s, arn, rgw::IAM::iamGetGroup, true)) {
+ return 0;
+ }
+ return -EACCES;
+}
+
+void RGWGetGroup_IAM::execute(optional_yield y)
+{
+ const auto& tenant = s->auth.identity->get_tenant();
+ op_ret = driver->list_group_users(this, y, tenant, info.id,
+ marker, max_items, listing);
+}
+
+void RGWGetGroup_IAM::send_response()
+{
+ if (!op_ret) {
+ dump_start(s); // <?xml block ?>
+ Formatter* f = s->formatter;
+ Formatter::ObjectSection response{*f, "GetGroupResponse", RGW_REST_IAM_XMLNS};
+ {
+ Formatter::ObjectSection result{*f, "GetGroupResult"};
+ {
+ Formatter::ObjectSection Group{*f, "Group"};
+ dump_iam_group(info, f);
+ } // /Group
+ {
+ Formatter::ArraySection users{*f, "Users"};
+ for (const auto& user : listing.users) {
+ Formatter::ObjectSection result{*f, "member"};
+ dump_iam_user(user, f);
+ } // /member
+ } // /Users
+ const bool is_truncated = !listing.next_marker.empty();
+ f->dump_bool("IsTruncated", is_truncated);
+ if (is_truncated) {
+ f->dump_string("Marker", listing.next_marker);
+ }
+ // /GetGroupResult
+ }
+ Formatter::ObjectSection metadata{*f, "ResponseMetadata"};
+ f->dump_string("RequestId", s->trans_id);
+ // /ResponseMetadata
+ // /GetGroupResponse
+ }
+
+ set_req_state_err(s, op_ret);
+ dump_errno(s);
+ end_header(s, this);
+}
+
+
+// UpdateGroup
+class RGWUpdateGroup_IAM : public RGWOp {
+ bufferlist post_body;
+ std::string new_path;
+ std::string new_name;
+ RGWGroupInfo info;
+ rgw::sal::Attrs attrs;
+ RGWObjVersionTracker objv;
+
+ int forward_to_master(optional_yield y, const rgw::SiteConfig& site);
+ public:
+ explicit RGWUpdateGroup_IAM(const ceph::bufferlist& post_body)
+ : post_body(post_body) {}
+
+ int init_processing(optional_yield y) override;
+ int verify_permission(optional_yield y) override;
+ void execute(optional_yield y) override;
+ void send_response() override;
+
+ const char* name() const override { return "update_group"; }
+ RGWOpType get_type() override { return RGW_OP_UPDATE_GROUP; }
+};
+
+int RGWUpdateGroup_IAM::init_processing(optional_yield y)
+{
+ rgw_account_id account_id;
+ if (const auto& account = s->auth.identity->get_account(); account) {
+ account_id = account->id;
+ } else {
+ return -ERR_METHOD_NOT_ALLOWED;
+ }
+
+ new_path = s->info.args.get("NewPath");
+ if (!new_path.empty() && !validate_iam_path(new_path, s->err.message)) {
+ return -EINVAL;
+ }
+
+ new_name = s->info.args.get("NewGroupName");
+ if (!new_name.empty() &&
+ !validate_iam_group_name(new_name, s->err.message)) {
+ return -EINVAL;
+ }
+
+ const std::string name = s->info.args.get("GroupName");
+ if (name.empty()) {
+ s->err.message = "Missing required element GroupName";
+ return -EINVAL;
+ }
+
+ int r = driver->load_group_by_name(this, y, account_id, name,
+ info, attrs, objv);
+ if (r == -ENOENT) {
+ s->err.message = "No such GroupName in the account";
+ return -ERR_NO_SUCH_ENTITY;
+ }
+ return r;
+}
+
+int RGWUpdateGroup_IAM::verify_permission(optional_yield y)
+{
+ const std::string resource_name = make_resource_name(info);
+ const rgw::ARN arn{resource_name, "group", info.account_id, true};
+ if (verify_user_permission(this, s, arn, rgw::IAM::iamUpdateGroup, true)) {
+ return 0;
+ }
+ return -EACCES;
+}
+
+int RGWUpdateGroup_IAM::forward_to_master(optional_yield y, const rgw::SiteConfig& site)
+{
+ RGWXMLDecoder::XMLParser parser;
+ if (!parser.init()) {
+ ldpp_dout(this, 0) << "ERROR: failed to initialize xml parser" << dendl;
+ return -EINVAL;
+ }
+
+ s->info.args.remove("NewPath");
+ s->info.args.remove("NewGroupName");
+ s->info.args.remove("GroupName");
+ s->info.args.remove("Action");
+ s->info.args.remove("Version");
+
+ int r = forward_iam_request_to_master(this, site, s->user->get_info(),
+ post_body, parser, s->info, y);
+ if (r < 0) {
+ ldpp_dout(this, 20) << "ERROR: forward_iam_request_to_master failed with error code: " << r << dendl;
+ return r;
+ }
+ return 0;
+}
+
+void RGWUpdateGroup_IAM::execute(optional_yield y)
+{
+ const rgw::SiteConfig& site = *s->penv.site;
+ if (!site.is_meta_master()) {
+ op_ret = forward_to_master(y, site);
+ if (op_ret) {
+ return;
+ }
+ }
+
+ op_ret = retry_raced_group_write(this, y, driver, info, attrs, objv,
+ [this, y] {
+ const RGWGroupInfo old_info = info;
+
+ if (!new_path.empty()) {
+ info.path = new_path;
+ }
+ if (!new_name.empty()) {
+ info.name = new_name;
+ }
+
+ if (info.path == old_info.path &&
+ info.name == old_info.name) {
+ return 0; // nothing to do, return success
+ }
+
+ constexpr bool exclusive = false;
+ return driver->store_group(this, y, info, attrs, objv,
+ exclusive, &old_info);
+ });
+}
+
+void RGWUpdateGroup_IAM::send_response()
+{
+ if (!op_ret) {
+ dump_start(s); // <?xml block ?>
+ Formatter* f = s->formatter;
+ Formatter::ObjectSection response{*f, "UpdateGroupResponse", RGW_REST_IAM_XMLNS};
+ {
+ Formatter::ObjectSection result{*f, "UpdateGroupResult"};
+ Formatter::ObjectSection group{*f, "Group"};
+ dump_iam_group(info, f);
+ // /Group
+ // /UpdateGroupResult
+ }
+ Formatter::ObjectSection metadata{*f, "ResponseMetadata"};
+ f->dump_string("RequestId", s->trans_id);
+ // /ResponseMetadata
+ // /UpdateGroupResponse
+ }
+
+ set_req_state_err(s, op_ret);
+ dump_errno(s);
+ end_header(s, this);
+}
+
+
+// DeleteGroup
+class RGWDeleteGroup_IAM : public RGWOp {
+ bufferlist post_body;
+ RGWGroupInfo info;
+ rgw::sal::Attrs attrs;
+ RGWObjVersionTracker objv;
+
+ int forward_to_master(optional_yield y, const rgw::SiteConfig& site);
+ int check_empty(optional_yield y);
+ public:
+ explicit RGWDeleteGroup_IAM(const ceph::bufferlist& post_body)
+ : post_body(post_body) {}
+
+ int init_processing(optional_yield y) override;
+ int verify_permission(optional_yield y) override;
+ void execute(optional_yield y) override;
+ void send_response() override;
+
+ const char* name() const override { return "delete_group"; }
+ RGWOpType get_type() override { return RGW_OP_DELETE_GROUP; }
+};
+
+int RGWDeleteGroup_IAM::init_processing(optional_yield y)
+{
+ rgw_account_id account_id;
+ if (const auto& account = s->auth.identity->get_account(); account) {
+ account_id = account->id;
+ } else {
+ return -ERR_METHOD_NOT_ALLOWED;
+ }
+
+ const std::string name = s->info.args.get("GroupName");
+ if (name.empty()) {
+ s->err.message = "Missing required element GroupName";
+ return -EINVAL;
+ }
+
+ int r = driver->load_group_by_name(this, y, account_id, name,
+ info, attrs, objv);
+ if (r == -ENOENT) {
+ s->err.message = "No such GroupName in the account";
+ return -ERR_NO_SUCH_ENTITY;
+ }
+ return r;
+}
+
+int RGWDeleteGroup_IAM::verify_permission(optional_yield y)
+{
+ const std::string resource_name = make_resource_name(info);
+ const rgw::ARN arn{resource_name, "group", info.account_id, true};
+ if (verify_user_permission(this, s, arn, rgw::IAM::iamDeleteGroup, true)) {
+ return 0;
+ }
+ return -EACCES;
+}
+
+int RGWDeleteGroup_IAM::forward_to_master(optional_yield y, const rgw::SiteConfig& site)
+{
+ RGWXMLDecoder::XMLParser parser;
+ if (!parser.init()) {
+ ldpp_dout(this, 0) << "ERROR: failed to initialize xml parser" << dendl;
+ return -EINVAL;
+ }
+
+ s->info.args.remove("GroupName");
+ s->info.args.remove("Action");
+ s->info.args.remove("Version");
+
+ int r = forward_iam_request_to_master(this, site, s->user->get_info(),
+ post_body, parser, s->info, y);
+ if (r < 0) {
+ ldpp_dout(this, 20) << "ERROR: forward_iam_request_to_master failed with error code: " << r << dendl;
+ return r;
+ }
+ return 0;
+}
+
+int RGWDeleteGroup_IAM::check_empty(optional_yield y)
+{
+ if (!s->penv.site->is_meta_master()) {
+ // only check on the master zone. if a forwarded DeleteGroup request
+ // succeeds on the master zone, it needs to succeed here too
+ return 0;
+ }
+
+ // verify that all policies are removed first
+ if (auto p = attrs.find(RGW_ATTR_IAM_POLICY); p != attrs.end()) {
+ std::map<std::string, std::string> policies;
+ try {
+ decode(policies, p->second);
+ } catch (const buffer::error&) {
+ ldpp_dout(this, 0) << "ERROR: failed to decode group policies" << dendl;
+ return -EIO;
+ }
+
+ if (!policies.empty()) {
+ s->err.message = "The group cannot be deleted until all group policies are removed";
+ return -ERR_DELETE_CONFLICT;
+ }
+ }
+ if (auto p = attrs.find(RGW_ATTR_MANAGED_POLICY); p != attrs.end()) {
+ rgw::IAM::ManagedPolicies policies;
+ try {
+ decode(policies, p->second);
+ } catch (const buffer::error&) {
+ ldpp_dout(this, 0) << "ERROR: failed to decode managed policies" << dendl;
+ return -EIO;
+ }
+
+ if (!policies.arns.empty()) {
+ s->err.message = "The group cannot be deleted until all managed policies are detached";
+ return -ERR_DELETE_CONFLICT;
+ }
+ }
+
+ // check that group has no users
+ const std::string& tenant = s->auth.identity->get_tenant();
+ rgw::sal::UserList listing;
+ int r = driver->list_group_users(this, y, tenant, info.id, "", 1, listing);
+ if (r < 0) {
+ return r;
+ }
+
+ if (listing.users.size()) {
+ s->err.message = "The group cannot be deleted until all users are removed";
+ return -ERR_DELETE_CONFLICT;
+ }
+
+ return 0;
+}
+
+void RGWDeleteGroup_IAM::execute(optional_yield y)
+{
+ const rgw::SiteConfig& site = *s->penv.site;
+ if (!site.is_meta_master()) {
+ op_ret = forward_to_master(y, site);
+ if (op_ret) {
+ return;
+ }
+ }
+
+ op_ret = retry_raced_group_write(this, y, driver, info, attrs, objv,
+ [this, y] {
+ if (int r = check_empty(y); r < 0) {
+ return r;
+ }
+ return driver->remove_group(this, y, info, objv);
+ });
+
+ if (op_ret == -ENOENT) {
+ if (!site.is_meta_master()) {
+ // delete succeeded on the master, return that success here too
+ op_ret = 0;
+ } else {
+ s->err.message = "No such GroupName in the account";
+ op_ret = -ERR_NO_SUCH_ENTITY;
+ }
+ }
+}
+
+void RGWDeleteGroup_IAM::send_response()
+{
+ if (!op_ret) {
+ dump_start(s); // <?xml block ?>
+ Formatter* f = s->formatter;
+ Formatter::ObjectSection response{*f, "DeleteGroupResponse", RGW_REST_IAM_XMLNS};
+ Formatter::ObjectSection metadata{*f, "ResponseMetadata"};
+ f->dump_string("RequestId", s->trans_id);
+ // /ResponseMetadata
+ // /DeleteGroupResponse
+ }
+
+ set_req_state_err(s, op_ret);
+ dump_errno(s);
+ end_header(s, this);
+}
+
+
+// ListGroups
+class RGWListGroups_IAM : public RGWOp {
+ rgw_account_id account_id;
+ std::string marker;
+ std::string path_prefix;
+ int max_items = 100;
+
+ bool started_response = false;
+ void start_response();
+ void end_response(std::string_view next_marker);
+ void send_response_data(std::span<RGWGroupInfo> groups);
+ public:
+ int init_processing(optional_yield y) override;
+ int verify_permission(optional_yield y) override;
+ void execute(optional_yield y) override;
+ void send_response() override;
+
+ const char* name() const override { return "list_groups"; }
+ RGWOpType get_type() override { return RGW_OP_LIST_GROUPS; }
+};
+
+int RGWListGroups_IAM::init_processing(optional_yield y)
+{
+ if (const auto& account = s->auth.identity->get_account(); account) {
+ account_id = account->id;
+ } else {
+ return -ERR_METHOD_NOT_ALLOWED;
+ }
+
+ marker = s->info.args.get("Marker");
+ path_prefix = s->info.args.get("PathPrefix");
+
+ int r = s->info.args.get_int("MaxItems", &max_items, max_items);
+ if (r < 0 || max_items > 1000) {
+ s->err.message = "Invalid value for MaxItems";
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int RGWListGroups_IAM::verify_permission(optional_yield y)
+{
+ const std::string resource_name = "";
+ const rgw::ARN arn{resource_name, "group", account_id, true};
+ if (verify_user_permission(this, s, arn, rgw::IAM::iamListGroups, true)) {
+ return 0;
+ }
+ return -EACCES;
+}
+
+void RGWListGroups_IAM::execute(optional_yield y)
+{
+ rgw::sal::GroupList listing;
+ listing.next_marker = marker;
+
+ op_ret = driver->list_account_groups(this, y, account_id,
+ path_prefix, listing.next_marker,
+ max_items, listing);
+ if (op_ret == -ENOENT) {
+ op_ret = 0;
+ } else if (op_ret < 0) {
+ return;
+ }
+
+ send_response_data(listing.groups);
+
+ if (!started_response) {
+ started_response = true;
+ start_response();
+ }
+ end_response(listing.next_marker);
+}
+
+void RGWListGroups_IAM::start_response()
+{
+ const int64_t proposed_content_length =
+ op_ret ? NO_CONTENT_LENGTH : CHUNKED_TRANSFER_ENCODING;
+
+ set_req_state_err(s, op_ret);
+ dump_errno(s);
+ end_header(s, this, to_mime_type(s->format), proposed_content_length);
+
+ if (op_ret) {
+ return;
+ }
+
+ dump_start(s); // <?xml block ?>
+ s->formatter->open_object_section_in_ns("ListGroupsResponse", RGW_REST_IAM_XMLNS);
+ s->formatter->open_object_section("ListGroupsResult");
+ s->formatter->open_array_section("Groups");
+}
+
+void RGWListGroups_IAM::end_response(std::string_view next_marker)
+{
+ s->formatter->close_section(); // Groups
+
+ const bool truncated = !next_marker.empty();
+ s->formatter->dump_bool("IsTruncated", truncated);
+ if (truncated) {
+ s->formatter->dump_string("Marker", next_marker);
+ }
+
+ s->formatter->close_section(); // ListGroupsResult
+ s->formatter->close_section(); // ListGroupsResponse
+ rgw_flush_formatter_and_reset(s, s->formatter);
+}
+
+void RGWListGroups_IAM::send_response_data(std::span<RGWGroupInfo> groups)
+{
+ if (!started_response) {
+ started_response = true;
+ start_response();
+ }
+
+ for (const auto& info : groups) {
+ s->formatter->open_object_section("member");
+ dump_iam_group(info, s->formatter);
+ s->formatter->close_section(); // member
+ }
+
+ // flush after each chunk
+ rgw_flush_formatter(s, s->formatter);
+}
+
+void RGWListGroups_IAM::send_response()
+{
+ if (!started_response) { // errored out before execute() wrote anything
+ start_response();
+ }
+}
+
+
+// AddUserToGroup
+class RGWAddUserToGroup_IAM : public RGWOp {
+ bufferlist post_body;
+ RGWGroupInfo group;
+ std::unique_ptr<rgw::sal::User> user;
+
+ int forward_to_master(optional_yield y, const rgw::SiteConfig& site);
+ public:
+ explicit RGWAddUserToGroup_IAM(const ceph::bufferlist& post_body)
+ : post_body(post_body) {}
+
+ int init_processing(optional_yield y) override;
+ int verify_permission(optional_yield y) override;
+ void execute(optional_yield y) override;
+ void send_response() override;
+
+ const char* name() const override { return "add_user_to_group"; }
+ RGWOpType get_type() override { return RGW_OP_ADD_USER_TO_GROUP; }
+};
+
+int RGWAddUserToGroup_IAM::init_processing(optional_yield y)
+{
+ if (const auto& account = s->auth.identity->get_account(); account) {
+ group.account_id = account->id;
+ } else {
+ return -ERR_METHOD_NOT_ALLOWED;
+ }
+
+ const std::string name = s->info.args.get("GroupName");
+ if (!validate_iam_group_name(name, s->err.message)) {
+ return -EINVAL;
+ }
+
+ const std::string username = s->info.args.get("UserName");
+ if (!validate_iam_user_name(username, s->err.message)) {
+ return -EINVAL;
+ }
+
+ // look up group by GroupName
+ rgw::sal::Attrs attrs_ignored;
+ RGWObjVersionTracker objv_ignored;
+ int r = driver->load_group_by_name(this, y, group.account_id, name,
+ group, attrs_ignored, objv_ignored);
+ if (r == -ENOENT) {
+ s->err.message = "No such GroupName in the account";
+ return -ERR_NO_SUCH_ENTITY;
+ }
+ if (r < 0) {
+ return r;
+ }
+
+ // look up user by UserName
+ const std::string& tenant = s->auth.identity->get_tenant();
+ r = driver->load_account_user_by_name(this, y, group.account_id,
+ tenant, username, &user);
+ if (r == -ENOENT) {
+ s->err.message = "No such UserName in the account";
+ return -ERR_NO_SUCH_ENTITY;
+ }
+ return r;
+}
+
+int RGWAddUserToGroup_IAM::verify_permission(optional_yield y)
+{
+ const std::string resource_name = make_resource_name(group);
+ const rgw::ARN arn{resource_name, "group", group.account_id, true};
+ if (verify_user_permission(this, s, arn, rgw::IAM::iamAddUserToGroup, true)) {
+ return 0;
+ }
+ return -EACCES;
+}
+
+int RGWAddUserToGroup_IAM::forward_to_master(optional_yield y,
+ const rgw::SiteConfig& site)
+{
+ RGWXMLDecoder::XMLParser parser;
+ if (!parser.init()) {
+ ldpp_dout(this, 0) << "ERROR: failed to initialize xml parser" << dendl;
+ return -EINVAL;
+ }
+
+ s->info.args.remove("GroupName");
+ s->info.args.remove("UserName");
+ s->info.args.remove("Action");
+ s->info.args.remove("Version");
+
+ int r = forward_iam_request_to_master(this, site, s->user->get_info(),
+ post_body, parser, s->info, y);
+ if (r < 0) {
+ ldpp_dout(this, 20) << "ERROR: forward_iam_request_to_master failed with error code: " << r << dendl;
+ return r;
+ }
+ return 0;
+}
+
+void RGWAddUserToGroup_IAM::execute(optional_yield y)
+{
+ const rgw::SiteConfig& site = *s->penv.site;
+ if (!site.is_meta_master()) {
+ op_ret = forward_to_master(y, site);
+ if (op_ret) {
+ return;
+ }
+ }
+
+ op_ret = retry_raced_user_write(this, y, user.get(),
+ [this, y] {
+ RGWUserInfo& info = user->get_info();
+ RGWUserInfo old_info = info;
+
+ if (!info.group_ids.insert(group.id).second) {
+ return 0; // nothing to do, return success
+ }
+
+ constexpr bool exclusive = false;
+ return user->store_user(this, y, exclusive, &old_info);
+ });
+}
+
+void RGWAddUserToGroup_IAM::send_response()
+{
+ if (!op_ret) {
+ dump_start(s); // <?xml block ?>
+ Formatter* f = s->formatter;
+ Formatter::ObjectSection response{*f, "AddUserToGroupResponse", RGW_REST_IAM_XMLNS};
+ Formatter::ObjectSection metadata{*f, "ResponseMetadata"};
+ f->dump_string("RequestId", s->trans_id);
+ // /ResponseMetadata
+ // /AddUserToGroupResponse
+ }
+
+ set_req_state_err(s, op_ret);
+ dump_errno(s);
+ end_header(s, this);
+}
+
+
+// RemoveUserFromGroup
+class RGWRemoveUserFromGroup_IAM : public RGWOp {
+ bufferlist post_body;
+ RGWGroupInfo group;
+ std::unique_ptr<rgw::sal::User> user;
+
+ int forward_to_master(optional_yield y, const rgw::SiteConfig& site);
+ public:
+ explicit RGWRemoveUserFromGroup_IAM(const ceph::bufferlist& post_body)
+ : post_body(post_body) {}
+
+ int init_processing(optional_yield y) override;
+ int verify_permission(optional_yield y) override;
+ void execute(optional_yield y) override;
+ void send_response() override;
+
+ const char* name() const override { return "remove_user_from_group"; }
+ RGWOpType get_type() override { return RGW_OP_REMOVE_USER_FROM_GROUP; }
+};
+
+int RGWRemoveUserFromGroup_IAM::init_processing(optional_yield y)
+{
+ if (const auto& account = s->auth.identity->get_account(); account) {
+ group.account_id = account->id;
+ } else {
+ return -ERR_METHOD_NOT_ALLOWED;
+ }
+
+ const std::string name = s->info.args.get("GroupName");
+ if (!validate_iam_group_name(name, s->err.message)) {
+ return -EINVAL;
+ }
+
+ const std::string username = s->info.args.get("UserName");
+ if (!validate_iam_user_name(username, s->err.message)) {
+ return -EINVAL;
+ }
+
+ // look up group by GroupName
+ rgw::sal::Attrs attrs_ignored;
+ RGWObjVersionTracker objv_ignored;
+ int r = driver->load_group_by_name(this, y, group.account_id, name,
+ group, attrs_ignored, objv_ignored);
+ if (r == -ENOENT) {
+ s->err.message = "No such GroupName in the account";
+ return -ERR_NO_SUCH_ENTITY;
+ }
+ if (r < 0) {
+ return r;
+ }
+
+ // look up user by UserName
+ const std::string& tenant = s->auth.identity->get_tenant();
+ r = driver->load_account_user_by_name(this, y, group.account_id,
+ tenant, username, &user);
+ if (r == -ENOENT) {
+ s->err.message = "No such UserName in the account";
+ return -ERR_NO_SUCH_ENTITY;
+ }
+ return r;
+}
+
+int RGWRemoveUserFromGroup_IAM::verify_permission(optional_yield y)
+{
+ const std::string resource_name = make_resource_name(group);
+ const rgw::ARN arn{resource_name, "group", group.account_id, true};
+ if (verify_user_permission(this, s, arn, rgw::IAM::iamRemoveUserFromGroup, true)) {
+ return 0;
+ }
+ return -EACCES;
+}
+
+int RGWRemoveUserFromGroup_IAM::forward_to_master(optional_yield y,
+ const rgw::SiteConfig& site)
+{
+ RGWXMLDecoder::XMLParser parser;
+ if (!parser.init()) {
+ ldpp_dout(this, 0) << "ERROR: failed to initialize xml parser" << dendl;
+ return -EINVAL;
+ }
+
+ s->info.args.remove("GroupName");
+ s->info.args.remove("UserName");
+ s->info.args.remove("Action");
+ s->info.args.remove("Version");
+
+ int r = forward_iam_request_to_master(this, site, s->user->get_info(),
+ post_body, parser, s->info, y);
+ if (r < 0) {
+ ldpp_dout(this, 20) << "ERROR: forward_iam_request_to_master failed with error code: " << r << dendl;
+ return r;
+ }
+ return 0;
+}
+
+void RGWRemoveUserFromGroup_IAM::execute(optional_yield y)
+{
+ const rgw::SiteConfig& site = *s->penv.site;
+ if (!site.is_meta_master()) {
+ op_ret = forward_to_master(y, site);
+ if (op_ret) {
+ return;
+ }
+ }
+
+ op_ret = retry_raced_user_write(this, y, user.get(),
+ [this, y] {
+ RGWUserInfo& info = user->get_info();
+ RGWUserInfo old_info = info;
+
+ auto id = info.group_ids.find(group.id);
+ if (id == info.group_ids.end()) {
+ return 0; // nothing to do, return success
+ }
+ info.group_ids.erase(id);
+
+ constexpr bool exclusive = false;
+ return user->store_user(this, y, exclusive, &old_info);
+ });
+}
+
+void RGWRemoveUserFromGroup_IAM::send_response()
+{
+ if (!op_ret) {
+ dump_start(s); // <?xml block ?>
+ Formatter* f = s->formatter;
+ Formatter::ObjectSection response{*f, "RemoveUserFromGroupResponse", RGW_REST_IAM_XMLNS};
+ Formatter::ObjectSection metadata{*f, "ResponseMetadata"};
+ f->dump_string("RequestId", s->trans_id);
+ // /ResponseMetadata
+ // /RemoveUserFromGroupResponse
+ }
+
+ set_req_state_err(s, op_ret);
+ dump_errno(s);
+ end_header(s, this);
+}
+
+
+// ListGroupsForUser
+class RGWListGroupsForUser_IAM : public RGWOp {
+ rgw_account_id account_id;
+ std::string marker;
+ int max_items = 100;
+ std::unique_ptr<rgw::sal::User> user;
+
+ public:
+ int init_processing(optional_yield y) override;
+ int verify_permission(optional_yield y) override;
+ void execute(optional_yield y) override;
+ void send_response() override;
+
+ const char* name() const override { return "list_groups_for_user"; }
+ RGWOpType get_type() override { return RGW_OP_LIST_GROUPS_FOR_USER; }
+};
+
+int RGWListGroupsForUser_IAM::init_processing(optional_yield y)
+{
+ if (const auto& account = s->auth.identity->get_account(); account) {
+ account_id = account->id;
+ } else {
+ return -ERR_METHOD_NOT_ALLOWED;
+ }
+
+ marker = s->info.args.get("Marker");
+
+ int r = s->info.args.get_int("MaxItems", &max_items, max_items);
+ if (r < 0 || max_items > 1000) {
+ s->err.message = "Invalid value for MaxItems";
+ return -EINVAL;
+ }
+
+ const std::string username = s->info.args.get("UserName");
+ if (!validate_iam_user_name(username, s->err.message)) {
+ return -EINVAL;
+ }
+
+ // look up user by UserName
+ const std::string& tenant = s->auth.identity->get_tenant();
+ r = driver->load_account_user_by_name(this, y, account_id,
+ tenant, username, &user);
+ if (r == -ENOENT) {
+ s->err.message = "No such UserName in the account";
+ return -ERR_NO_SUCH_ENTITY;
+ }
+ return r;
+}
+
+int RGWListGroupsForUser_IAM::verify_permission(optional_yield y)
+{
+ const RGWUserInfo& info = user->get_info();
+ const std::string resource_name = string_cat_reserve(info.path, info.display_name);
+ const rgw::ARN arn{resource_name, "user", account_id, true};
+ if (verify_user_permission(this, s, arn, rgw::IAM::iamListGroupsForUser, true)) {
+ return 0;
+ }
+ return -EACCES;
+}
+
+void RGWListGroupsForUser_IAM::execute(optional_yield y)
+{
+ rgw::sal::GroupList listing;
+ listing.next_marker = marker;
+
+ op_ret = user->list_groups(this, y, marker, max_items, listing);
+ if (op_ret == -ENOENT) {
+ op_ret = 0;
+ } else if (op_ret < 0) {
+ return;
+ }
+
+ dump_start(s); // <?xml block ?>
+ Formatter* f = s->formatter;
+ Formatter::ObjectSection response{*f, "ListGroupsForUserResponse", RGW_REST_IAM_XMLNS};
+ {
+ Formatter::ObjectSection result{*f, "ListGroupsForUserResult"};
+ {
+ Formatter::ArraySection groups{*f, "Groups"};
+ for (const auto& info : listing.groups) {
+ Formatter::ObjectSection result{*f, "member"};
+ dump_iam_group(info, s->formatter);
+ } // /member
+ } // /Groups
+ const bool truncated = !listing.next_marker.empty();
+ f->dump_bool("IsTruncated", truncated);
+ if (truncated) {
+ f->dump_string("Marker", listing.next_marker);
+ }
+ } // /ListGroupsForUserResult
+ Formatter::ObjectSection metadata{*f, "ResponseMetadata"};
+ f->dump_string("RequestId", s->trans_id);
+ // /ResponseMetadata
+ // /ListGroupsForUserResponse
+}
+
+void RGWListGroupsForUser_IAM::send_response()
+{
+ set_req_state_err(s, op_ret);
+ dump_errno(s);
+ end_header(s, this);
+}
+
+
+// PutGroupPolicy
+class RGWPutGroupPolicy_IAM : public RGWOp {
+ bufferlist post_body;
+ std::string policy_name;
+ std::string policy_document;
+ RGWGroupInfo info;
+ rgw::sal::Attrs attrs;
+ RGWObjVersionTracker objv;
+
+ int forward_to_master(optional_yield y, const rgw::SiteConfig& site);
+ public:
+ explicit RGWPutGroupPolicy_IAM(const ceph::bufferlist& post_body)
+ : post_body(post_body) {}
+
+ int init_processing(optional_yield y) override;
+ int verify_permission(optional_yield y) override;
+ void execute(optional_yield y) override;
+ void send_response() override;
+
+ const char* name() const override { return "put_group_policy"; }
+ RGWOpType get_type() override { return RGW_OP_PUT_GROUP_POLICY; }
+};
+
+int RGWPutGroupPolicy_IAM::init_processing(optional_yield y)
+{
+ if (const auto& account = s->auth.identity->get_account(); account) {
+ info.account_id = account->id;
+ } else {
+ return -ERR_METHOD_NOT_ALLOWED;
+ }
+
+ const std::string name = s->info.args.get("GroupName");
+ if (!validate_iam_group_name(name, s->err.message)) {
+ return -EINVAL;
+ }
+
+ policy_name = s->info.args.get("PolicyName");
+ if (!validate_iam_policy_name(policy_name, s->err.message)) {
+ return -EINVAL;
+ }
+
+ policy_document = s->info.args.get("PolicyDocument");
+ if (policy_document.empty()) {
+ s->err.message = "Missing required element PolicyDocument";
+ return -EINVAL;
+ }
+
+ // look up group by GroupName
+ int r = driver->load_group_by_name(this, y, info.account_id, name,
+ info, attrs, objv);
+ if (r == -ENOENT) {
+ s->err.message = "No such GroupName in the account";
+ return -ERR_NO_SUCH_ENTITY;
+ }
+ return r;
+}
+
+int RGWPutGroupPolicy_IAM::verify_permission(optional_yield y)
+{
+ const std::string resource_name = make_resource_name(info);
+ const rgw::ARN arn{resource_name, "group", info.account_id, true};
+ if (verify_user_permission(this, s, arn, rgw::IAM::iamPutGroupPolicy, true)) {
+ return 0;
+ }
+ return -EACCES;
+}
+
+int RGWPutGroupPolicy_IAM::forward_to_master(optional_yield y,
+ const rgw::SiteConfig& site)
+{
+ RGWXMLDecoder::XMLParser parser;
+ if (!parser.init()) {
+ ldpp_dout(this, 0) << "ERROR: failed to initialize xml parser" << dendl;
+ return -EINVAL;
+ }
+
+ s->info.args.remove("GroupName");
+ s->info.args.remove("PolicyName");
+ s->info.args.remove("PolicyDocument");
+ s->info.args.remove("Action");
+ s->info.args.remove("Version");
+
+ int r = forward_iam_request_to_master(this, site, s->user->get_info(),
+ post_body, parser, s->info, y);
+ if (r < 0) {
+ ldpp_dout(this, 20) << "ERROR: forward_iam_request_to_master failed with error code: " << r << dendl;
+ return r;
+ }
+ return 0;
+}
+
+void RGWPutGroupPolicy_IAM::execute(optional_yield y)
+{
+ const rgw::SiteConfig& site = *s->penv.site;
+ if (!site.is_meta_master()) {
+ op_ret = forward_to_master(y, site);
+ if (op_ret) {
+ return;
+ }
+ }
+
+ try {
+ // validate the document
+ const rgw::IAM::Policy p(
+ s->cct, nullptr, policy_document,
+ s->cct->_conf.get_val<bool>("rgw_policy_reject_invalid_principals"));
+ } catch (rgw::IAM::PolicyParseException& e) {
+ s->err.message = std::move(e.msg);
+ op_ret = -ERR_MALFORMED_DOC;
+ return;
+ }
+
+ op_ret = retry_raced_group_write(this, y, driver, info, attrs, objv,
+ [this, y] {
+ std::map<std::string, std::string> policies;
+ if (auto p = attrs.find(RGW_ATTR_IAM_POLICY); p != attrs.end()) try {
+ decode(policies, p->second);
+ } catch (const buffer::error& err) {
+ ldpp_dout(this, 0) << "ERROR: failed to decode group policies" << dendl;
+ return -EIO;
+ }
+
+ policies[policy_name] = policy_document;
+
+ constexpr size_t GROUP_POLICIES_MAX_NUM = 100;
+ if (policies.size() > GROUP_POLICIES_MAX_NUM) {
+ s->err.message = fmt::format("Group policy limit {} exceeded",
+ GROUP_POLICIES_MAX_NUM);
+ return -ERR_LIMIT_EXCEEDED;
+ }
+
+ bufferlist bl;
+ encode(policies, bl);
+ attrs[RGW_ATTR_IAM_POLICY] = std::move(bl);
+
+ constexpr bool exclusive = false;
+ return driver->store_group(this, y, info, attrs, objv, exclusive, &info);
+ });
+}
+
+void RGWPutGroupPolicy_IAM::send_response()
+{
+ if (!op_ret) {
+ dump_start(s); // <?xml block ?>
+ Formatter* f = s->formatter;
+ Formatter::ObjectSection response{*f, "PutGroupPolicyResponse", RGW_REST_IAM_XMLNS};
+ Formatter::ObjectSection metadata{*f, "ResponseMetadata"};
+ f->dump_string("RequestId", s->trans_id);
+ // /ResponseMetadata
+ // /PutGroupPolicyResponse
+ }
+
+ set_req_state_err(s, op_ret);
+ dump_errno(s);
+ end_header(s, this);
+}
+
+
+// GetGroupPolicy
+class RGWGetGroupPolicy_IAM : public RGWOp {
+ std::string policy_name;
+ RGWGroupInfo info;
+ rgw::sal::Attrs attrs;
+
+ public:
+ int init_processing(optional_yield y) override;
+ int verify_permission(optional_yield y) override;
+ void execute(optional_yield y) override;
+ void send_response() override;
+
+ const char* name() const override { return "get_group_policy"; }
+ RGWOpType get_type() override { return RGW_OP_GET_GROUP_POLICY; }
+};
+
+int RGWGetGroupPolicy_IAM::init_processing(optional_yield y)
+{
+ if (const auto& account = s->auth.identity->get_account(); account) {
+ info.account_id = account->id;
+ } else {
+ return -ERR_METHOD_NOT_ALLOWED;
+ }
+
+ const std::string name = s->info.args.get("GroupName");
+ if (!validate_iam_group_name(name, s->err.message)) {
+ return -EINVAL;
+ }
+
+ policy_name = s->info.args.get("PolicyName");
+ if (!validate_iam_policy_name(policy_name, s->err.message)) {
+ return -EINVAL;
+ }
+
+ // look up group by GroupName
+ RGWObjVersionTracker objv_ignored;
+ int r = driver->load_group_by_name(this, y, info.account_id, name,
+ info, attrs, objv_ignored);
+ if (r == -ENOENT) {
+ s->err.message = "No such GroupName in the account";
+ return -ERR_NO_SUCH_ENTITY;
+ }
+ return r;
+}
+
+int RGWGetGroupPolicy_IAM::verify_permission(optional_yield y)
+{
+ const std::string resource_name = make_resource_name(info);
+ const rgw::ARN arn{resource_name, "group", info.account_id, true};
+ if (verify_user_permission(this, s, arn, rgw::IAM::iamGetGroupPolicy, true)) {
+ return 0;
+ }
+ return -EACCES;
+}
+
+void RGWGetGroupPolicy_IAM::execute(optional_yield y)
+{
+ std::map<std::string, std::string> policies;
+ if (auto p = attrs.find(RGW_ATTR_IAM_POLICY); p != attrs.end()) try {
+ decode(policies, p->second);
+ } catch (const buffer::error& err) {
+ ldpp_dout(this, 0) << "ERROR: failed to decode group policies" << dendl;
+ op_ret = -EIO;
+ return;
+ }
+
+ auto policy = policies.find(policy_name);
+ if (policy == policies.end()) {
+ s->err.message = "No such PolicyName on the group";
+ op_ret = -ERR_NO_SUCH_ENTITY;
+ return;
+ }
+
+ dump_start(s); // <?xml block ?>
+ Formatter* f = s->formatter;
+ Formatter::ObjectSection response{*f, "GetGroupPolicyResponse", RGW_REST_IAM_XMLNS};
+ {
+ Formatter::ObjectSection result{*f, "GetGroupPolicyResult"};
+ encode_json("GroupName", info.name, f);
+ encode_json("PolicyName", policy_name, f);
+ encode_json("PolicyDocument", policy->second, f);
+ // /GetGroupPolicyResult
+ }
+ Formatter::ObjectSection metadata{*f, "ResponseMetadata"};
+ f->dump_string("RequestId", s->trans_id);
+ // /ResponseMetadata
+ // /GetGroupPolicyResponse
+}
+
+void RGWGetGroupPolicy_IAM::send_response()
+{
+ set_req_state_err(s, op_ret);
+ dump_errno(s);
+ end_header(s, this);
+}
+
+
+// DeleteGroupPolicy
+class RGWDeleteGroupPolicy_IAM : public RGWOp {
+ bufferlist post_body;
+ std::string policy_name;
+ RGWGroupInfo info;
+ rgw::sal::Attrs attrs;
+ RGWObjVersionTracker objv;
+
+ int forward_to_master(optional_yield y, const rgw::SiteConfig& site);
+ public:
+ explicit RGWDeleteGroupPolicy_IAM(const ceph::bufferlist& post_body)
+ : post_body(post_body) {}
+
+ int init_processing(optional_yield y) override;
+ int verify_permission(optional_yield y) override;
+ void execute(optional_yield y) override;
+ void send_response() override;
+
+ const char* name() const override { return "detach_group_policy"; }
+ RGWOpType get_type() override { return RGW_OP_DETACH_GROUP_POLICY; }
+};
+
+int RGWDeleteGroupPolicy_IAM::init_processing(optional_yield y)
+{
+ if (const auto& account = s->auth.identity->get_account(); account) {
+ info.account_id = account->id;
+ } else {
+ return -ERR_METHOD_NOT_ALLOWED;
+ }
+
+ const std::string name = s->info.args.get("GroupName");
+ if (!validate_iam_group_name(name, s->err.message)) {
+ return -EINVAL;
+ }
+
+ policy_name = s->info.args.get("PolicyName");
+ if (!validate_iam_policy_name(policy_name, s->err.message)) {
+ return -EINVAL;
+ }
+
+ // look up group by GroupName
+ int r = driver->load_group_by_name(this, y, info.account_id, name,
+ info, attrs, objv);
+ if (r == -ENOENT) {
+ s->err.message = "No such GroupName in the account";
+ return -ERR_NO_SUCH_ENTITY;
+ }
+ return r;
+}
+
+int RGWDeleteGroupPolicy_IAM::verify_permission(optional_yield y)
+{
+ const std::string resource_name = make_resource_name(info);
+ const rgw::ARN arn{resource_name, "group", info.account_id, true};
+ if (verify_user_permission(this, s, arn, rgw::IAM::iamDeleteGroupPolicy, true)) {
+ return 0;
+ }
+ return -EACCES;
+}
+
+int RGWDeleteGroupPolicy_IAM::forward_to_master(optional_yield y,
+ const rgw::SiteConfig& site)
+{
+ RGWXMLDecoder::XMLParser parser;
+ if (!parser.init()) {
+ ldpp_dout(this, 0) << "ERROR: failed to initialize xml parser" << dendl;
+ return -EINVAL;
+ }
+
+ s->info.args.remove("GroupName");
+ s->info.args.remove("PolicyName");
+ s->info.args.remove("Action");
+ s->info.args.remove("Version");
+
+ int r = forward_iam_request_to_master(this, site, s->user->get_info(),
+ post_body, parser, s->info, y);
+ if (r < 0) {
+ ldpp_dout(this, 20) << "ERROR: forward_iam_request_to_master failed with error code: " << r << dendl;
+ return r;
+ }
+ return 0;
+}
+
+void RGWDeleteGroupPolicy_IAM::execute(optional_yield y)
+{
+ const rgw::SiteConfig& site = *s->penv.site;
+ if (!site.is_meta_master()) {
+ op_ret = forward_to_master(y, site);
+ if (op_ret) {
+ return;
+ }
+ }
+
+ op_ret = retry_raced_group_write(this, y, driver, info, attrs, objv,
+ [this, y, &site] {
+ std::map<std::string, std::string> policies;
+ if (auto it = attrs.find(RGW_ATTR_IAM_POLICY); it != attrs.end()) try {
+ decode(policies, it->second);
+ } catch (buffer::error& err) {
+ ldpp_dout(this, 0) << "ERROR: failed to decode user policies" << dendl;
+ return -EIO;
+ }
+
+ auto i = policies.find(policy_name);
+ if (i == policies.end()) {
+ if (!site.is_meta_master()) {
+ return 0; // delete succeeded on the master
+ }
+ return -ERR_NO_SUCH_ENTITY;
+ }
+ policies.erase(i);
+
+ bufferlist bl;
+ encode(policies, bl);
+ attrs[RGW_ATTR_IAM_POLICY] = std::move(bl);
+
+ constexpr bool exclusive = false;
+ return driver->store_group(this, y, info, attrs, objv, exclusive, &info);
+ });
+}
+
+void RGWDeleteGroupPolicy_IAM::send_response()
+{
+ if (!op_ret) {
+ dump_start(s); // <?xml block ?>
+ Formatter* f = s->formatter;
+ Formatter::ObjectSection response{*f, "DeleteGroupPolicyResponse", RGW_REST_IAM_XMLNS};
+ Formatter::ObjectSection metadata{*f, "ResponseMetadata"};
+ f->dump_string("RequestId", s->trans_id);
+ // /ResponseMetadata
+ // /DeleteGroupPolicyResponse
+ }
+
+ set_req_state_err(s, op_ret);
+ dump_errno(s);
+ end_header(s, this);
+}
+
+
+// ListGroupPolicies
+class RGWListGroupPolicies_IAM : public RGWOp {
+ bufferlist post_body;
+ std::string marker;
+ int max_items = 100;
+ RGWGroupInfo info;
+ rgw::sal::Attrs attrs;
+
+ public:
+ int init_processing(optional_yield y) override;
+ int verify_permission(optional_yield y) override;
+ void execute(optional_yield y) override;
+ void send_response() override;
+
+ const char* name() const override { return "list_group_policies"; }
+ RGWOpType get_type() override { return RGW_OP_LIST_GROUP_POLICIES; }
+};
+
+int RGWListGroupPolicies_IAM::init_processing(optional_yield y)
+{
+ if (const auto& account = s->auth.identity->get_account(); account) {
+ info.account_id = account->id;
+ } else {
+ return -ERR_METHOD_NOT_ALLOWED;
+ }
+
+ const std::string name = s->info.args.get("GroupName");
+ if (!validate_iam_group_name(name, s->err.message)) {
+ return -EINVAL;
+ }
+
+ marker = s->info.args.get("Marker");
+
+ int r = s->info.args.get_int("MaxItems", &max_items, max_items);
+ if (r < 0 || max_items > 1000) {
+ s->err.message = "Invalid value for MaxItems";
+ return -EINVAL;
+ }
+
+ // look up group by GroupName
+ RGWObjVersionTracker objv_ignored;
+ r = driver->load_group_by_name(this, y, info.account_id, name,
+ info, attrs, objv_ignored);
+ if (r == -ENOENT) {
+ s->err.message = "No such GroupName in the account";
+ return -ERR_NO_SUCH_ENTITY;
+ }
+ return r;
+}
+
+int RGWListGroupPolicies_IAM::verify_permission(optional_yield y)
+{
+ const std::string resource_name = make_resource_name(info);
+ const rgw::ARN arn{resource_name, "group", info.account_id, true};
+ if (verify_user_permission(this, s, arn, rgw::IAM::iamListGroupPolicies, true)) {
+ return 0;
+ }
+ return -EACCES;
+}
+
+void RGWListGroupPolicies_IAM::execute(optional_yield y)
+{
+ std::map<std::string, std::string> policies;
+ if (auto p = attrs.find(RGW_ATTR_IAM_POLICY); p != attrs.end()) try {
+ decode(policies, p->second);
+ } catch (const buffer::error& err) {
+ ldpp_dout(this, 0) << "ERROR: failed to decode user policies" << dendl;
+ op_ret = -EIO;
+ }
+
+ dump_start(s); // <?xml block ?>
+ Formatter* f = s->formatter;
+ Formatter::ObjectSection response{*f, "ListGroupPoliciesResponse", RGW_REST_IAM_XMLNS};
+ {
+ Formatter::ObjectSection result{*f, "ListGroupPoliciesResult"};
+ auto policy = policies.lower_bound(marker);
+ {
+ Formatter::ArraySection names{*f, "PolicyNames"};
+ for (; policy != policies.end() && max_items > 0; ++policy, --max_items) {
+ encode_json("member", policy->first, f);
+ }
+ } // /PolicyNames
+ const bool is_truncated = (policy != policies.end());
+ encode_json("IsTruncated", is_truncated, f);
+ if (is_truncated) {
+ encode_json("Marker", policy->first, f);
+ }
+ } // /ListUserPoliciesResult
+ Formatter::ObjectSection metadata{*f, "ResponseMetadata"};
+ f->dump_string("RequestId", s->trans_id);
+ // /ResponseMetadata
+ // /ListGroupPoliciesResponse
+}
+
+void RGWListGroupPolicies_IAM::send_response()
+{
+ set_req_state_err(s, op_ret);
+ dump_errno(s);
+ end_header(s, this);
+}
+
+
+// AttachGroupPolicy
+class RGWAttachGroupPolicy_IAM : public RGWOp {
+ bufferlist post_body;
+ std::string policy_arn;
+ RGWGroupInfo info;
+ rgw::sal::Attrs attrs;
+ RGWObjVersionTracker objv;
+
+ int forward_to_master(optional_yield y, const rgw::SiteConfig& site);
+ public:
+ explicit RGWAttachGroupPolicy_IAM(const ceph::bufferlist& post_body)
+ : post_body(post_body) {}
+
+ int init_processing(optional_yield y) override;
+ int verify_permission(optional_yield y) override;
+ void execute(optional_yield y) override;
+ void send_response() override;
+
+ const char* name() const override { return "attach_group_policy"; }
+ RGWOpType get_type() override { return RGW_OP_ATTACH_GROUP_POLICY; }
+};
+
+int RGWAttachGroupPolicy_IAM::init_processing(optional_yield y)
+{
+ if (const auto& account = s->auth.identity->get_account(); account) {
+ info.account_id = account->id;
+ } else {
+ return -ERR_METHOD_NOT_ALLOWED;
+ }
+
+ const std::string name = s->info.args.get("GroupName");
+ if (!validate_iam_group_name(name, s->err.message)) {
+ return -EINVAL;
+ }
+
+ policy_arn = s->info.args.get("PolicyArn");
+ if (!validate_iam_policy_arn(policy_arn, s->err.message)) {
+ return -EINVAL;
+ }
+
+ // look up group by GroupName
+ int r = driver->load_group_by_name(this, y, info.account_id, name,
+ info, attrs, objv);
+ if (r == -ENOENT) {
+ s->err.message = "No such GroupName in the account";
+ return -ERR_NO_SUCH_ENTITY;
+ }
+ return r;
+}
+
+int RGWAttachGroupPolicy_IAM::verify_permission(optional_yield y)
+{
+ const std::string resource_name = make_resource_name(info);
+ const rgw::ARN arn{resource_name, "group", info.account_id, true};
+ if (verify_user_permission(this, s, arn, rgw::IAM::iamAttachGroupPolicy, true)) {
+ return 0;
+ }
+ return -EACCES;
+}
+
+int RGWAttachGroupPolicy_IAM::forward_to_master(optional_yield y,
+ const rgw::SiteConfig& site)
+{
+ RGWXMLDecoder::XMLParser parser;
+ if (!parser.init()) {
+ ldpp_dout(this, 0) << "ERROR: failed to initialize xml parser" << dendl;
+ return -EINVAL;
+ }
+
+ s->info.args.remove("GroupName");
+ s->info.args.remove("PolicyArn");
+ s->info.args.remove("Action");
+ s->info.args.remove("Version");
+
+ int r = forward_iam_request_to_master(this, site, s->user->get_info(),
+ post_body, parser, s->info, y);
+ if (r < 0) {
+ ldpp_dout(this, 20) << "ERROR: forward_iam_request_to_master failed with error code: " << r << dendl;
+ return r;
+ }
+ return 0;
+}
+
+void RGWAttachGroupPolicy_IAM::execute(optional_yield y)
+{
+ const rgw::SiteConfig& site = *s->penv.site;
+ if (!site.is_meta_master()) {
+ op_ret = forward_to_master(y, site);
+ if (op_ret) {
+ return;
+ }
+ }
+
+ // validate the policy arn
+ try {
+ const auto p = rgw::IAM::get_managed_policy(s->cct, policy_arn);
+ if (!p) {
+ op_ret = -ERR_NO_SUCH_ENTITY;
+ s->err.message = "The requested PolicyArn is not recognized";
+ return;
+ }
+ } catch (const rgw::IAM::PolicyParseException& e) {
+ ldpp_dout(this, 5) << "failed to parse policy: " << e.what() << dendl;
+ s->err.message = e.what();
+ op_ret = -ERR_MALFORMED_DOC;
+ return;
+ }
+
+ op_ret = retry_raced_group_write(this, y, driver, info, attrs, objv,
+ [this, y] {
+ rgw::IAM::ManagedPolicies policies;
+ if (auto it = attrs.find(RGW_ATTR_MANAGED_POLICY); it != attrs.end()) try {
+ decode(policies, it->second);
+ } catch (buffer::error& err) {
+ ldpp_dout(this, 0) << "ERROR: failed to decode user policies" << dendl;
+ return -EIO;
+ }
+
+ if (!policies.arns.insert(policy_arn).second) {
+ return 0; // nothing to do, return success
+ }
+
+ bufferlist bl;
+ encode(policies, bl);
+ attrs[RGW_ATTR_MANAGED_POLICY] = std::move(bl);
+
+ constexpr bool exclusive = false;
+ return driver->store_group(this, y, info, attrs, objv, exclusive, &info);
+ });
+}
+
+void RGWAttachGroupPolicy_IAM::send_response()
+{
+ if (!op_ret) {
+ dump_start(s); // <?xml block ?>
+ Formatter* f = s->formatter;
+ Formatter::ObjectSection response{*f, "AttachGroupPolicyResponse", RGW_REST_IAM_XMLNS};
+ Formatter::ObjectSection metadata{*f, "ResponseMetadata"};
+ f->dump_string("RequestId", s->trans_id);
+ // /ResponseMetadata
+ // /AttachGroupPolicyResponse
+ }
+
+ set_req_state_err(s, op_ret);
+ dump_errno(s);
+ end_header(s, this);
+}
+
+
+// DetachGroupPolicy
+class RGWDetachGroupPolicy_IAM : public RGWOp {
+ bufferlist post_body;
+ std::string policy_arn;
+ RGWGroupInfo info;
+ rgw::sal::Attrs attrs;
+ RGWObjVersionTracker objv;
+
+ int forward_to_master(optional_yield y, const rgw::SiteConfig& site);
+ public:
+ explicit RGWDetachGroupPolicy_IAM(const ceph::bufferlist& post_body)
+ : post_body(post_body) {}
+
+ int init_processing(optional_yield y) override;
+ int verify_permission(optional_yield y) override;
+ void execute(optional_yield y) override;
+ void send_response() override;
+
+ const char* name() const override { return "detach_group_policy"; }
+ RGWOpType get_type() override { return RGW_OP_DETACH_GROUP_POLICY; }
+};
+
+int RGWDetachGroupPolicy_IAM::init_processing(optional_yield y)
+{
+ if (const auto& account = s->auth.identity->get_account(); account) {
+ info.account_id = account->id;
+ } else {
+ return -ERR_METHOD_NOT_ALLOWED;
+ }
+
+ const std::string name = s->info.args.get("GroupName");
+ if (!validate_iam_group_name(name, s->err.message)) {
+ return -EINVAL;
+ }
+
+ policy_arn = s->info.args.get("PolicyArn");
+ if (!validate_iam_policy_arn(policy_arn, s->err.message)) {
+ return -EINVAL;
+ }
+
+ // look up group by GroupName
+ int r = driver->load_group_by_name(this, y, info.account_id, name,
+ info, attrs, objv);
+ if (r == -ENOENT) {
+ s->err.message = "No such GroupName in the account";
+ return -ERR_NO_SUCH_ENTITY;
+ }
+ return r;
+}
+
+int RGWDetachGroupPolicy_IAM::verify_permission(optional_yield y)
+{
+ const std::string resource_name = make_resource_name(info);
+ const rgw::ARN arn{resource_name, "group", info.account_id, true};
+ if (verify_user_permission(this, s, arn, rgw::IAM::iamDetachGroupPolicy, true)) {
+ return 0;
+ }
+ return -EACCES;
+}
+
+int RGWDetachGroupPolicy_IAM::forward_to_master(optional_yield y,
+ const rgw::SiteConfig& site)
+{
+ RGWXMLDecoder::XMLParser parser;
+ if (!parser.init()) {
+ ldpp_dout(this, 0) << "ERROR: failed to initialize xml parser" << dendl;
+ return -EINVAL;
+ }
+
+ s->info.args.remove("GroupName");
+ s->info.args.remove("PolicyArn");
+ s->info.args.remove("Action");
+ s->info.args.remove("Version");
+
+ int r = forward_iam_request_to_master(this, site, s->user->get_info(),
+ post_body, parser, s->info, y);
+ if (r < 0) {
+ ldpp_dout(this, 20) << "ERROR: forward_iam_request_to_master failed with error code: " << r << dendl;
+ return r;
+ }
+ return 0;
+}
+
+void RGWDetachGroupPolicy_IAM::execute(optional_yield y)
+{
+ const rgw::SiteConfig& site = *s->penv.site;
+ if (!site.is_meta_master()) {
+ op_ret = forward_to_master(y, site);
+ if (op_ret) {
+ return;
+ }
+ }
+
+ op_ret = retry_raced_group_write(this, y, driver, info, attrs, objv,
+ [this, y, &site] {
+ rgw::IAM::ManagedPolicies policies;
+ if (auto it = attrs.find(RGW_ATTR_MANAGED_POLICY); it != attrs.end()) try {
+ decode(policies, it->second);
+ } catch (const buffer::error& err) {
+ ldpp_dout(this, 0) << "ERROR: failed to decode user policies" << dendl;
+ return -EIO;
+ }
+
+ auto i = policies.arns.find(policy_arn);
+ if (i == policies.arns.end()) {
+ if (!site.is_meta_master()) {
+ return 0; // delete succeeded on the master
+ }
+ return -ERR_NO_SUCH_ENTITY;
+ }
+ policies.arns.erase(i);
+
+ bufferlist bl;
+ encode(policies, bl);
+ attrs[RGW_ATTR_MANAGED_POLICY] = std::move(bl);
+
+ constexpr bool exclusive = false;
+ return driver->store_group(this, y, info, attrs, objv, exclusive, &info);
+ });
+}
+
+void RGWDetachGroupPolicy_IAM::send_response()
+{
+ if (!op_ret) {
+ dump_start(s); // <?xml block ?>
+ Formatter* f = s->formatter;
+ Formatter::ObjectSection response{*f, "DetachGroupPolicyResponse", RGW_REST_IAM_XMLNS};
+ Formatter::ObjectSection metadata{*f, "ResponseMetadata"};
+ f->dump_string("RequestId", s->trans_id);
+ // /ResponseMetadata
+ // /DetachGroupPolicyResponse
+ }
+
+ set_req_state_err(s, op_ret);
+ dump_errno(s);
+ end_header(s, this);
+}
+
+
+// ListAttachedGroupPolicies
+class RGWListAttachedGroupPolicies_IAM : public RGWOp {
+ bufferlist post_body;
+ RGWGroupInfo info;
+ rgw::sal::Attrs attrs;
+ std::string marker;
+ int max_items = 100;
+
+ public:
+ int init_processing(optional_yield y) override;
+ int verify_permission(optional_yield y) override;
+ void execute(optional_yield y) override;
+ void send_response() override;
+
+ const char* name() const override { return "list_attached_group_policies"; }
+ RGWOpType get_type() override { return RGW_OP_LIST_ATTACHED_GROUP_POLICIES; }
+};
+
+int RGWListAttachedGroupPolicies_IAM::init_processing(optional_yield y)
+{
+ if (const auto& account = s->auth.identity->get_account(); account) {
+ info.account_id = account->id;
+ } else {
+ return -ERR_METHOD_NOT_ALLOWED;
+ }
+
+ const std::string name = s->info.args.get("GroupName");
+ if (!validate_iam_group_name(name, s->err.message)) {
+ return -EINVAL;
+ }
+
+ marker = s->info.args.get("Marker");
+
+ int r = s->info.args.get_int("MaxItems", &max_items, max_items);
+ if (r < 0 || max_items > 1000) {
+ s->err.message = "Invalid value for MaxItems";
+ return -EINVAL;
+ }
+
+ // look up group by GroupName
+ RGWObjVersionTracker objv_ignored;
+ r = driver->load_group_by_name(this, y, info.account_id, name,
+ info, attrs, objv_ignored);
+ if (r == -ENOENT) {
+ s->err.message = "No such GroupName in the account";
+ return -ERR_NO_SUCH_ENTITY;
+ }
+ return r;
+}
+
+int RGWListAttachedGroupPolicies_IAM::verify_permission(optional_yield y)
+{
+ const std::string resource_name = make_resource_name(info);
+ const rgw::ARN arn{resource_name, "group", info.account_id, true};
+ if (verify_user_permission(this, s, arn, rgw::IAM::iamListAttachedGroupPolicies, true)) {
+ return 0;
+ }
+ return -EACCES;
+}
+
+void RGWListAttachedGroupPolicies_IAM::execute(optional_yield y)
+{
+ rgw::IAM::ManagedPolicies policies;
+ if (auto p = attrs.find(RGW_ATTR_MANAGED_POLICY); p != attrs.end()) try {
+ decode(policies, p->second);
+ } catch (const buffer::error& err) {
+ ldpp_dout(this, 0) << "ERROR: failed to decode user policies" << dendl;
+ op_ret = -EIO;
+ }
+
+
+ dump_start(s); // <?xml block ?>
+ Formatter* f = s->formatter;
+ Formatter::ObjectSection response{*f, "ListAttachedGroupPoliciesResponse", RGW_REST_IAM_XMLNS};
+ {
+ Formatter::ObjectSection result{*f, "ListAttachedGroupPoliciesResult"};
+
+ auto policy = policies.arns.lower_bound(marker);
+ {
+ Formatter::ArraySection arr{*f, "AttachedPolicies"};
+ for (; policy != policies.arns.end() && max_items > 0; ++policy, --max_items) {
+ Formatter::ObjectSection result{*f, "member"};
+ std::string_view arn = *policy;
+ if (auto p = arn.find('/'); p != arn.npos) {
+ encode_json("PolicyName", arn.substr(p + 1), f);
+ }
+ encode_json("PolicyArn", arn, f);
+ }
+ } // /AttachedPolicies
+ const bool is_truncated = (policy != policies.arns.end());
+ encode_json("IsTruncated", is_truncated, f);
+ if (is_truncated) {
+ encode_json("Marker", *policy, f);
+ }
+ // /ListAttachedUserPoliciesResult
+ }
+ Formatter::ObjectSection metadata{*f, "ResponseMetadata"};
+ f->dump_string("RequestId", s->trans_id);
+ // /ResponseMetadata
+ // /ListAttachedGroupPoliciesResponse
+}
+
+void RGWListAttachedGroupPolicies_IAM::send_response()
+{
+ set_req_state_err(s, op_ret);
+ dump_errno(s);
+ end_header(s, this);
+}
+
+
+RGWOp* make_iam_create_group_op(const ceph::bufferlist& post_body) {
+ return new RGWCreateGroup_IAM(post_body);
+}
+RGWOp* make_iam_get_group_op(const ceph::bufferlist&) {
+ return new RGWGetGroup_IAM;
+}
+RGWOp* make_iam_update_group_op(const ceph::bufferlist& post_body) {
+ return new RGWUpdateGroup_IAM(post_body);
+}
+RGWOp* make_iam_delete_group_op(const ceph::bufferlist& post_body) {
+ return new RGWDeleteGroup_IAM(post_body);
+}
+RGWOp* make_iam_list_groups_op(const ceph::bufferlist&) {
+ return new RGWListGroups_IAM;
+}
+
+RGWOp* make_iam_add_user_to_group_op(const ceph::bufferlist& post_body) {
+ return new RGWAddUserToGroup_IAM(post_body);
+}
+RGWOp* make_iam_remove_user_from_group_op(const ceph::bufferlist& post_body) {
+ return new RGWRemoveUserFromGroup_IAM(post_body);
+}
+RGWOp* make_iam_list_groups_for_user_op(const ceph::bufferlist& unused) {
+ return new RGWListGroupsForUser_IAM;
+}
+
+RGWOp* make_iam_put_group_policy_op(const ceph::bufferlist& post_body) {
+ return new RGWPutGroupPolicy_IAM(post_body);
+}
+RGWOp* make_iam_get_group_policy_op(const ceph::bufferlist& unused) {
+ return new RGWGetGroupPolicy_IAM;
+}
+RGWOp* make_iam_delete_group_policy_op(const ceph::bufferlist& post_body) {
+ return new RGWDeleteGroupPolicy_IAM(post_body);
+}
+RGWOp* make_iam_list_group_policies_op(const ceph::bufferlist& unused) {
+ return new RGWListGroupPolicies_IAM;
+}
+RGWOp* make_iam_attach_group_policy_op(const ceph::bufferlist& post_body) {
+ return new RGWAttachGroupPolicy_IAM(post_body);
+}
+RGWOp* make_iam_detach_group_policy_op(const ceph::bufferlist& post_body) {
+ return new RGWDetachGroupPolicy_IAM(post_body);
+}
+RGWOp* make_iam_list_attached_group_policies_op(const ceph::bufferlist& unused) {
+ return new RGWListAttachedGroupPolicies_IAM();
+}
+
diff --git a/src/rgw/rgw_rest_iam_group.h b/src/rgw/rgw_rest_iam_group.h
new file mode 100644
index 00000000000..861b7e0e3c5
--- /dev/null
+++ b/src/rgw/rgw_rest_iam_group.h
@@ -0,0 +1,40 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab ft=cpp
+
+/*
+ * Ceph - scalable distributed file system
+ *
+ * Copyright contributors to the Ceph project
+ *
+ * This is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License version 2.1, as published by the Free Software
+ * Foundation. See file COPYING.
+ *
+ */
+
+#pragma once
+
+#include "include/buffer_fwd.h"
+
+class RGWOp;
+
+// IAM Group op factory functions
+RGWOp* make_iam_create_group_op(const ceph::bufferlist& post_body);
+RGWOp* make_iam_get_group_op(const ceph::bufferlist& unused);
+RGWOp* make_iam_update_group_op(const ceph::bufferlist& post_body);
+RGWOp* make_iam_delete_group_op(const ceph::bufferlist& post_body);
+RGWOp* make_iam_list_groups_op(const ceph::bufferlist& unused);
+
+RGWOp* make_iam_add_user_to_group_op(const ceph::bufferlist& post_body);
+RGWOp* make_iam_remove_user_from_group_op(const ceph::bufferlist& post_body);
+RGWOp* make_iam_list_groups_for_user_op(const ceph::bufferlist& unused);
+
+// IAM GroupPolicy op factory functions
+RGWOp* make_iam_put_group_policy_op(const ceph::bufferlist& post_body);
+RGWOp* make_iam_get_group_policy_op(const ceph::bufferlist& unused);
+RGWOp* make_iam_delete_group_policy_op(const ceph::bufferlist& post_body);
+RGWOp* make_iam_list_group_policies_op(const ceph::bufferlist& unused);
+RGWOp* make_iam_attach_group_policy_op(const ceph::bufferlist& post_body);
+RGWOp* make_iam_detach_group_policy_op(const ceph::bufferlist& post_body);
+RGWOp* make_iam_list_attached_group_policies_op(const ceph::bufferlist& unused);
diff --git a/src/rgw/rgw_rest_iam_user.cc b/src/rgw/rgw_rest_iam_user.cc
new file mode 100644
index 00000000000..ae413e6d185
--- /dev/null
+++ b/src/rgw/rgw_rest_iam_user.cc
@@ -0,0 +1,1468 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab ft=cpp
+
+/*
+ * Ceph - scalable distributed file system
+ *
+ * Copyright contributors to the Ceph project
+ *
+ * This is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License version 2.1, as published by the Free Software
+ * Foundation. See file COPYING.
+ *
+ */
+
+#include "rgw_rest_iam_user.h"
+
+#include <utility>
+#include "include/buffer.h"
+#include "common/errno.h"
+#include "rgw_arn.h"
+#include "rgw_common.h"
+#include "rgw_iam_managed_policy.h"
+#include "rgw_op.h"
+#include "rgw_process_env.h"
+#include "rgw_rest.h"
+#include "rgw_rest_iam.h"
+
+
+static std::string make_resource_name(const RGWUserInfo& info)
+{
+ std::string_view path = info.path;
+ if (path.empty()) {
+ path = "/";
+ }
+ return string_cat_reserve(path, info.display_name);
+}
+
+static void dump_iam_user(const RGWUserInfo& info, Formatter* f)
+{
+ encode_json("Path", info.path, f);
+ encode_json("UserName", info.display_name, f);
+ encode_json("UserId", info.user_id, f);
+ encode_json("Arn", iam_user_arn(info), f);
+ encode_json("CreateDate", info.create_date, f);
+}
+
+
+// CreateUser
+class RGWCreateUser_IAM : public RGWOp {
+ bufferlist post_body;
+ RGWUserInfo info;
+
+ int forward_to_master(optional_yield y, const rgw::SiteConfig& site, std::string& uid);
+ public:
+ explicit RGWCreateUser_IAM(const ceph::bufferlist& post_body)
+ : post_body(post_body) {}
+
+ int init_processing(optional_yield y) override;
+ int verify_permission(optional_yield y) override;
+ void execute(optional_yield y) override;
+ void send_response() override;
+
+ const char* name() const override { return "create_user"; }
+ RGWOpType get_type() override { return RGW_OP_CREATE_USER; }
+};
+
+int RGWCreateUser_IAM::init_processing(optional_yield y)
+{
+ // use account id from authenticated user/role. with AssumeRole, this may not
+ // match the account of s->user
+ if (const auto& account = s->auth.identity->get_account(); account) {
+ info.account_id = account->id;
+ } else {
+ return -ERR_METHOD_NOT_ALLOWED;
+ }
+
+ info.path = s->info.args.get("Path");
+ if (info.path.empty()) {
+ info.path = "/";
+ } else if (!validate_iam_path(info.path, s->err.message)) {
+ return -EINVAL;
+ }
+
+ info.display_name = s->info.args.get("UserName");
+ if (!validate_iam_user_name(info.display_name, s->err.message)) {
+ return -EINVAL;
+ }
+
+ // TODO: Tags
+ return 0;
+}
+
+int RGWCreateUser_IAM::verify_permission(optional_yield y)
+{
+ const std::string resource_name = make_resource_name(info);
+ const rgw::ARN arn{resource_name, "user", info.account_id, true};
+ if (verify_user_permission(this, s, arn, rgw::IAM::iamCreateUser, true)) {
+ return 0;
+ }
+ return -EACCES;
+}
+
+int RGWCreateUser_IAM::forward_to_master(optional_yield y,
+ const rgw::SiteConfig& site,
+ std::string& uid)
+{
+ RGWXMLDecoder::XMLParser parser;
+ if (!parser.init()) {
+ ldpp_dout(this, 0) << "ERROR: failed to initialize xml parser" << dendl;
+ return -EINVAL;
+ }
+
+ s->info.args.remove("UserName");
+ s->info.args.remove("Path");
+ s->info.args.remove("PermissionsBoundary");
+ s->info.args.remove("Action");
+ s->info.args.remove("Version");
+ auto& params = s->info.args.get_params();
+ if (auto lower = params.lower_bound("Tags.member."); lower != params.end()) {
+ auto upper = params.upper_bound("Tags.member.");
+ params.erase(lower, upper);
+ }
+
+ int r = forward_iam_request_to_master(this, site, s->user->get_info(),
+ post_body, parser, s->info, y);
+ if (r < 0) {
+ ldpp_dout(this, 20) << "ERROR: forward_iam_request_to_master failed with error code: " << r << dendl;
+ return r;
+ }
+
+ XMLObj* response = parser.find_first("CreateUserResponse");;
+ if (!response) {
+ ldpp_dout(this, 5) << "ERROR: unexpected xml: CreateUserResponse" << dendl;
+ return -EINVAL;
+ }
+
+ XMLObj* result = response->find_first("CreateUserResult");
+ if (!result) {
+ ldpp_dout(this, 5) << "ERROR: unexpected xml: CreateUserResult" << dendl;
+ return -EINVAL;
+ }
+
+ XMLObj* user = result->find_first("User");
+ if (!user) {
+ ldpp_dout(this, 5) << "ERROR: unexpected xml: User" << dendl;
+ return -EINVAL;
+ }
+
+ try {
+ RGWXMLDecoder::decode_xml("UserId", uid, user, true);
+ } catch (RGWXMLDecoder::err& err) {
+ ldpp_dout(this, 5) << "ERROR: unexpected xml: UserId" << dendl;
+ return -EINVAL;
+ }
+
+ ldpp_dout(this, 4) << "user_id decoded from forwarded response is " << uid << dendl;
+ return 0;
+}
+
+void RGWCreateUser_IAM::execute(optional_yield y)
+{
+ // check the current user count against account limit
+ RGWAccountInfo account;
+ rgw::sal::Attrs attrs; // unused
+ RGWObjVersionTracker objv; // unused
+ op_ret = driver->load_account_by_id(this, y, info.account_id,
+ account, attrs, objv);
+ if (op_ret < 0) {
+ ldpp_dout(this, 4) << "failed to load iam account "
+ << info.account_id << ": " << cpp_strerror(op_ret) << dendl;
+ return;
+ }
+
+ if (account.max_users >= 0) { // max_users < 0 means unlimited
+ uint32_t count = 0;
+ op_ret = driver->count_account_users(this, y, info.account_id, count);
+ if (op_ret < 0) {
+ ldpp_dout(this, 4) << "failed to count users for iam account "
+ << info.account_id << ": " << cpp_strerror(op_ret) << dendl;
+ return;
+ }
+ if (std::cmp_greater_equal(count, account.max_users)) {
+ s->err.message = fmt::format("User limit {} exceeded",
+ account.max_users);
+ op_ret = ERR_LIMIT_EXCEEDED;
+ return;
+ }
+ }
+
+ // generate user id. forward_to_master() may overwrite this
+ uuid_d uuid;
+ uuid.generate_random();
+ info.user_id.id = uuid.to_string();
+ info.user_id.tenant = s->auth.identity->get_tenant();
+
+ info.create_date = ceph::real_clock::now();
+
+ const rgw::SiteConfig& site = *s->penv.site;
+ if (!site.is_meta_master()) {
+ op_ret = forward_to_master(y, site, info.user_id.id);
+ if (op_ret) {
+ return;
+ }
+ }
+
+ std::unique_ptr<rgw::sal::User> user = driver->get_user(info.user_id);
+ user->get_info() = info;
+
+ constexpr bool exclusive = true;
+ op_ret = user->store_user(this, y, exclusive, nullptr);
+}
+
+void RGWCreateUser_IAM::send_response()
+{
+ if (!op_ret) {
+ dump_start(s); // <?xml block ?>
+ Formatter* f = s->formatter;
+ Formatter::ObjectSection response{*f, "CreateUserResponse", RGW_REST_IAM_XMLNS};
+ {
+ Formatter::ObjectSection result{*f, "CreateUserResult"};
+ Formatter::ObjectSection user{*f, "User"};
+ dump_iam_user(info, f);
+ // /User
+ // /CreateUserResult
+ }
+ Formatter::ObjectSection metadata{*f, "ResponseMetadata"};
+ f->dump_string("RequestId", s->trans_id);
+ // /ResponseMetadata
+ // /CreateUserResponse
+ }
+
+ set_req_state_err(s, op_ret);
+ dump_errno(s);
+ end_header(s, this);
+}
+
+
+// GetUser
+class RGWGetUser_IAM : public RGWOp {
+ rgw_account_id account_id;
+ std::unique_ptr<rgw::sal::User> user;
+ public:
+ int init_processing(optional_yield y) override;
+ int verify_permission(optional_yield y) override;
+ void execute(optional_yield y) override;
+ void send_response() override;
+
+ const char* name() const override { return "get_user"; }
+ RGWOpType get_type() override { return RGW_OP_GET_USER; }
+};
+
+int RGWGetUser_IAM::init_processing(optional_yield y)
+{
+ // use account id from authenticated user/role. with AssumeRole, this may not
+ // match the account of s->user
+ if (const auto& account = s->auth.identity->get_account(); account) {
+ account_id = account->id;
+ } else {
+ return -ERR_METHOD_NOT_ALLOWED;
+ }
+
+ const std::string username = s->info.args.get("UserName");
+ if (username.empty()) {
+ // If you do not specify a user name, IAM determines the user name
+ // implicitly based on the AWS access key ID signing the request.
+ // This operation works for access keys under the AWS account.
+ // Consequently, you can use this operation to manage AWS account
+ // root user credentials.
+ user = s->user->clone();
+ return 0;
+ }
+
+ // look up user by UserName
+ const std::string& tenant = s->auth.identity->get_tenant();
+ int r = driver->load_account_user_by_name(this, y, account_id,
+ tenant, username, &user);
+ // root user is hidden from user apis
+ const bool is_root = (user && user->get_type() == TYPE_ROOT);
+ if (r == -ENOENT || is_root) {
+ s->err.message = "No such UserName in the account";
+ return -ERR_NO_SUCH_ENTITY;
+ }
+ return r;
+}
+
+int RGWGetUser_IAM::verify_permission(optional_yield y)
+{
+ const RGWUserInfo& info = user->get_info();
+ const std::string resource_name = make_resource_name(info);
+ const rgw::ARN arn{resource_name, "user", account_id, true};
+ if (verify_user_permission(this, s, arn, rgw::IAM::iamGetUser, true)) {
+ return 0;
+ }
+ return -EACCES;
+}
+
+void RGWGetUser_IAM::execute(optional_yield y)
+{
+}
+
+void RGWGetUser_IAM::send_response()
+{
+ if (!op_ret) {
+ dump_start(s); // <?xml block ?>
+ Formatter* f = s->formatter;
+ Formatter::ObjectSection response{*f, "GetUserResponse", RGW_REST_IAM_XMLNS};
+ {
+ Formatter::ObjectSection result{*f, "GetUserResult"};
+ Formatter::ObjectSection User{*f, "User"};
+ dump_iam_user(user->get_info(), f);
+ // /User
+ // /GetUserResult
+ }
+ Formatter::ObjectSection metadata{*f, "ResponseMetadata"};
+ f->dump_string("RequestId", s->trans_id);
+ // /ResponseMetadata
+ // /GetUserResponse
+ }
+
+ set_req_state_err(s, op_ret);
+ dump_errno(s);
+ end_header(s, this);
+}
+
+
+// UpdateUser
+class RGWUpdateUser_IAM : public RGWOp {
+ bufferlist post_body;
+ std::string new_path;
+ std::string new_username;
+ std::unique_ptr<rgw::sal::User> user;
+
+ int forward_to_master(optional_yield y, const rgw::SiteConfig& site);
+ public:
+ explicit RGWUpdateUser_IAM(const ceph::bufferlist& post_body)
+ : post_body(post_body) {}
+
+ int init_processing(optional_yield y) override;
+ int verify_permission(optional_yield y) override;
+ void execute(optional_yield y) override;
+ void send_response() override;
+
+ const char* name() const override { return "update_user"; }
+ RGWOpType get_type() override { return RGW_OP_UPDATE_USER; }
+};
+
+int RGWUpdateUser_IAM::init_processing(optional_yield y)
+{
+ // use account id from authenticated user/role. with AssumeRole, this may not
+ // match the account of s->user
+ rgw_account_id account_id;
+ if (const auto& account = s->auth.identity->get_account(); account) {
+ account_id = account->id;
+ } else {
+ return -ERR_METHOD_NOT_ALLOWED;
+ }
+
+ new_path = s->info.args.get("NewPath");
+ if (!new_path.empty() && !validate_iam_path(new_path, s->err.message)) {
+ return -EINVAL;
+ }
+
+ new_username = s->info.args.get("NewUserName");
+ if (!new_username.empty() &&
+ !validate_iam_user_name(new_username, s->err.message)) {
+ return -EINVAL;
+ }
+
+ const std::string username = s->info.args.get("UserName");
+ if (username.empty()) {
+ s->err.message = "Missing required element UserName";
+ return -EINVAL;
+ }
+
+ // look up user by UserName
+ const std::string& tenant = s->auth.identity->get_tenant();
+ int r = driver->load_account_user_by_name(this, y, account_id,
+ tenant, username, &user);
+ // root user is hidden from user apis
+ const bool is_root = (user && user->get_type() == TYPE_ROOT);
+ if (r == -ENOENT || is_root) {
+ s->err.message = "No such UserName in the account";
+ return -ERR_NO_SUCH_ENTITY;
+ }
+ return r;
+}
+
+int RGWUpdateUser_IAM::verify_permission(optional_yield y)
+{
+ const RGWUserInfo& info = user->get_info();
+ const std::string resource_name = make_resource_name(info);
+ const rgw::ARN arn{resource_name, "user", info.account_id, true};
+ if (verify_user_permission(this, s, arn, rgw::IAM::iamUpdateUser, true)) {
+ return 0;
+ }
+ return -EACCES;
+}
+
+int RGWUpdateUser_IAM::forward_to_master(optional_yield y, const rgw::SiteConfig& site)
+{
+ RGWXMLDecoder::XMLParser parser;
+ if (!parser.init()) {
+ ldpp_dout(this, 0) << "ERROR: failed to initialize xml parser" << dendl;
+ return -EINVAL;
+ }
+
+ s->info.args.remove("NewPath");
+ s->info.args.remove("NewUserName");
+ s->info.args.remove("UserName");
+ s->info.args.remove("Action");
+ s->info.args.remove("Version");
+
+ int r = forward_iam_request_to_master(this, site, s->user->get_info(),
+ post_body, parser, s->info, y);
+ if (r < 0) {
+ ldpp_dout(this, 20) << "ERROR: forward_iam_request_to_master failed with error code: " << r << dendl;
+ return r;
+ }
+ return 0;
+}
+
+void RGWUpdateUser_IAM::execute(optional_yield y)
+{
+ const rgw::SiteConfig& site = *s->penv.site;
+ if (!site.is_meta_master()) {
+ op_ret = forward_to_master(y, site);
+ if (op_ret) {
+ return;
+ }
+ }
+
+ op_ret = retry_raced_user_write(this, y, user.get(),
+ [this, y] {
+ RGWUserInfo& info = user->get_info();
+ RGWUserInfo old_info = info;
+
+ if (!new_path.empty()) {
+ info.path = new_path;
+ }
+ if (!new_username.empty()) {
+ info.display_name = new_username;
+ }
+
+ if (info.path == old_info.path &&
+ info.display_name == old_info.display_name) {
+ return 0; // no changes to write
+ }
+
+ constexpr bool exclusive = false;
+ return user->store_user(this, y, exclusive, &old_info);
+ });
+}
+
+void RGWUpdateUser_IAM::send_response()
+{
+ if (!op_ret) {
+ dump_start(s); // <?xml block ?>
+ Formatter* f = s->formatter;
+ Formatter::ObjectSection response{*f, "UpdateUserResponse", RGW_REST_IAM_XMLNS};
+ {
+ Formatter::ObjectSection result{*f, "UpdateUserResult"};
+ Formatter::ObjectSection User{*f, "User"};
+ dump_iam_user(user->get_info(), f);
+ // /User
+ // /UpdateUserResult
+ }
+ Formatter::ObjectSection metadata{*f, "ResponseMetadata"};
+ f->dump_string("RequestId", s->trans_id);
+ // /ResponseMetadata
+ // /UpdateUserResponse
+ }
+
+ set_req_state_err(s, op_ret);
+ dump_errno(s);
+ end_header(s, this);
+}
+
+
+// DeleteUser
+class RGWDeleteUser_IAM : public RGWOp {
+ bufferlist post_body;
+ std::unique_ptr<rgw::sal::User> user;
+
+ int forward_to_master(optional_yield y, const rgw::SiteConfig& site);
+ int check_empty();
+ public:
+ explicit RGWDeleteUser_IAM(const ceph::bufferlist& post_body)
+ : post_body(post_body) {}
+
+ int init_processing(optional_yield y) override;
+ int verify_permission(optional_yield y) override;
+ void execute(optional_yield y) override;
+ void send_response() override;
+
+ const char* name() const override { return "delete_user"; }
+ RGWOpType get_type() override { return RGW_OP_DELETE_USER; }
+};
+
+int RGWDeleteUser_IAM::init_processing(optional_yield y)
+{
+ // use account id from authenticated user/role. with AssumeRole, this may not
+ // match the account of s->user
+ rgw_account_id account_id;
+ if (const auto& account = s->auth.identity->get_account(); account) {
+ account_id = account->id;
+ } else {
+ return -ERR_METHOD_NOT_ALLOWED;
+ }
+
+ const std::string username = s->info.args.get("UserName");
+ if (username.empty()) {
+ s->err.message = "Missing required element UserName";
+ return -EINVAL;
+ }
+
+ // look up user by UserName
+ const std::string& tenant = s->auth.identity->get_tenant();
+ int r = driver->load_account_user_by_name(this, y, account_id,
+ tenant, username, &user);
+ // root user is hidden from user apis
+ const bool is_root = (user && user->get_type() == TYPE_ROOT);
+ if (r == -ENOENT || is_root) {
+ s->err.message = "No such UserName in the account";
+ return -ERR_NO_SUCH_ENTITY;
+ }
+ return r;
+}
+
+int RGWDeleteUser_IAM::verify_permission(optional_yield y)
+{
+ const RGWUserInfo& info = user->get_info();
+ const std::string resource_name = make_resource_name(info);
+ const rgw::ARN arn{resource_name, "user", info.account_id, true};
+ if (verify_user_permission(this, s, arn, rgw::IAM::iamDeleteUser, true)) {
+ return 0;
+ }
+ return -EACCES;
+}
+
+int RGWDeleteUser_IAM::forward_to_master(optional_yield y, const rgw::SiteConfig& site)
+{
+ RGWXMLDecoder::XMLParser parser;
+ if (!parser.init()) {
+ ldpp_dout(this, 0) << "ERROR: failed to initialize xml parser" << dendl;
+ return -EINVAL;
+ }
+
+ s->info.args.remove("UserName");
+ s->info.args.remove("Action");
+ s->info.args.remove("Version");
+
+ int r = forward_iam_request_to_master(this, site, s->user->get_info(),
+ post_body, parser, s->info, y);
+ if (r < 0) {
+ ldpp_dout(this, 20) << "ERROR: forward_iam_request_to_master failed with error code: " << r << dendl;
+ return r;
+ }
+ return 0;
+}
+
+int RGWDeleteUser_IAM::check_empty()
+{
+ if (!s->penv.site->is_meta_master()) {
+ // only check on the master zone. if a forwarded DeleteUser request
+ // succeeds on the master zone, it needs to succeed here too
+ return 0;
+ }
+
+ // verify that all user resources are removed first
+ const RGWUserInfo& info = user->get_info();
+ if (!info.access_keys.empty()) {
+ s->err.message = "The user cannot be deleted until its AccessKeys are removed";
+ return -ERR_DELETE_CONFLICT;
+ }
+
+ const auto& attrs = user->get_attrs();
+ if (auto p = attrs.find(RGW_ATTR_USER_POLICY); p != attrs.end()) {
+ std::map<std::string, std::string> policies;
+ try {
+ decode(policies, p->second);
+ } catch (const buffer::error&) {
+ ldpp_dout(this, 0) << "ERROR: failed to decode user policies" << dendl;
+ return -EIO;
+ }
+
+ if (!policies.empty()) {
+ s->err.message = "The user cannot be deleted until all user policies are removed";
+ return -ERR_DELETE_CONFLICT;
+ }
+ }
+ if (auto p = attrs.find(RGW_ATTR_MANAGED_POLICY); p != attrs.end()) {
+ rgw::IAM::ManagedPolicies policies;
+ try {
+ decode(policies, p->second);
+ } catch (const buffer::error&) {
+ ldpp_dout(this, 0) << "ERROR: failed to decode managed policies" << dendl;
+ return -EIO;
+ }
+
+ if (!policies.arns.empty()) {
+ s->err.message = "The user cannot be deleted until all managed policies are detached";
+ return -ERR_DELETE_CONFLICT;
+ }
+ }
+
+ return 0;
+}
+
+void RGWDeleteUser_IAM::execute(optional_yield y)
+{
+ const rgw::SiteConfig& site = *s->penv.site;
+ if (!site.is_meta_master()) {
+ op_ret = forward_to_master(y, site);
+ } else {
+ op_ret = check_empty();
+ }
+ if (op_ret) {
+ return;
+ }
+
+ op_ret = user->remove_user(this, y);
+
+ if (op_ret == -ENOENT) {
+ if (!site.is_meta_master()) {
+ // delete succeeded on the master, return that success here too
+ op_ret = 0;
+ } else {
+ s->err.message = "No such UserName in the account";
+ op_ret = -ERR_NO_SUCH_ENTITY;
+ }
+ }
+}
+
+void RGWDeleteUser_IAM::send_response()
+{
+ if (!op_ret) {
+ dump_start(s); // <?xml block ?>
+ Formatter* f = s->formatter;
+ Formatter::ObjectSection response{*f, "DeleteUserResponse", RGW_REST_IAM_XMLNS};
+ Formatter::ObjectSection metadata{*f, "ResponseMetadata"};
+ f->dump_string("RequestId", s->trans_id);
+ // /ResponseMetadata
+ // /DeleteUserResponse
+ }
+
+ set_req_state_err(s, op_ret);
+ dump_errno(s);
+ end_header(s, this);
+}
+
+
+// ListUsers
+class RGWListUsers_IAM : public RGWOp {
+ rgw_account_id account_id;
+ std::string marker;
+ std::string path_prefix;
+ int max_items = 100;
+
+ bool started_response = false;
+ void start_response();
+ void end_response(std::string_view next_marker);
+ void send_response_data(std::span<RGWUserInfo> users);
+ public:
+ int init_processing(optional_yield y) override;
+ int verify_permission(optional_yield y) override;
+ void execute(optional_yield y) override;
+ void send_response() override;
+
+ const char* name() const override { return "list_users"; }
+ RGWOpType get_type() override { return RGW_OP_LIST_USERS; }
+};
+
+int RGWListUsers_IAM::init_processing(optional_yield y)
+{
+ // use account id from authenticated user/role. with AssumeRole, this may not
+ // match the account of s->user
+ if (const auto& account = s->auth.identity->get_account(); account) {
+ account_id = account->id;
+ } else {
+ return -ERR_METHOD_NOT_ALLOWED;
+ }
+
+ marker = s->info.args.get("Marker");
+ path_prefix = s->info.args.get("PathPrefix");
+
+ int r = s->info.args.get_int("MaxItems", &max_items, max_items);
+ if (r < 0 || max_items > 1000) {
+ s->err.message = "Invalid value for MaxItems";
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int RGWListUsers_IAM::verify_permission(optional_yield y)
+{
+ const std::string resource_name = "";
+ const rgw::ARN arn{resource_name, "user", account_id, true};
+ if (verify_user_permission(this, s, arn, rgw::IAM::iamListUsers, true)) {
+ return 0;
+ }
+ return -EACCES;
+}
+
+void RGWListUsers_IAM::execute(optional_yield y)
+{
+ const std::string& tenant = s->auth.identity->get_tenant();
+
+ rgw::sal::UserList listing;
+ listing.next_marker = marker;
+
+ op_ret = driver->list_account_users(this, y, account_id, tenant,
+ path_prefix, listing.next_marker,
+ max_items, listing);
+ if (op_ret == -ENOENT) {
+ op_ret = 0;
+ } else if (op_ret < 0) {
+ return;
+ }
+
+ send_response_data(listing.users);
+
+ if (!started_response) {
+ started_response = true;
+ start_response();
+ }
+ end_response(listing.next_marker);
+}
+
+void RGWListUsers_IAM::start_response()
+{
+ const int64_t proposed_content_length =
+ op_ret ? NO_CONTENT_LENGTH : CHUNKED_TRANSFER_ENCODING;
+
+ set_req_state_err(s, op_ret);
+ dump_errno(s);
+ end_header(s, this, to_mime_type(s->format), proposed_content_length);
+
+ if (op_ret) {
+ return;
+ }
+
+ dump_start(s); // <?xml block ?>
+ s->formatter->open_object_section_in_ns("ListUsersResponse", RGW_REST_IAM_XMLNS);
+ s->formatter->open_object_section("ListUsersResult");
+ s->formatter->open_array_section("Users");
+}
+
+void RGWListUsers_IAM::end_response(std::string_view next_marker)
+{
+ s->formatter->close_section(); // Users
+
+ const bool truncated = !next_marker.empty();
+ s->formatter->dump_bool("IsTruncated", truncated);
+ if (truncated) {
+ s->formatter->dump_string("Marker", next_marker);
+ }
+
+ s->formatter->close_section(); // ListUsersResult
+ s->formatter->close_section(); // ListUsersResponse
+ rgw_flush_formatter_and_reset(s, s->formatter);
+}
+
+void RGWListUsers_IAM::send_response_data(std::span<RGWUserInfo> users)
+{
+ if (!started_response) {
+ started_response = true;
+ start_response();
+ }
+
+ for (const auto& info : users) {
+ if (info.type == TYPE_ROOT) {
+ continue; // root user is hidden from user apis
+ }
+ s->formatter->open_object_section("member");
+ dump_iam_user(info, s->formatter);
+ s->formatter->close_section(); // member
+ }
+
+ // flush after each chunk
+ rgw_flush_formatter(s, s->formatter);
+}
+
+void RGWListUsers_IAM::send_response()
+{
+ if (!started_response) { // errored out before execute() wrote anything
+ start_response();
+ }
+}
+
+
+void dump_access_key(const RGWAccessKey& key, Formatter* f)
+{
+ encode_json("AccessKeyId", key.id, f);
+ encode_json("Status", key.active ? "Active" : "Inactive", f);
+ encode_json("CreateDate", key.create_date, f);
+}
+
+// CreateAccessKey
+class RGWCreateAccessKey_IAM : public RGWOp {
+ bufferlist post_body;
+ std::unique_ptr<rgw::sal::User> user;
+ RGWAccessKey key;
+
+ int forward_to_master(optional_yield y, const rgw::SiteConfig& site,
+ RGWAccessKey& cred);
+ public:
+ explicit RGWCreateAccessKey_IAM(const ceph::bufferlist& post_body)
+ : post_body(post_body) {}
+
+ int init_processing(optional_yield y) override;
+ int verify_permission(optional_yield y) override;
+ void execute(optional_yield y) override;
+ void send_response() override;
+
+ const char* name() const override { return "create_access_key"; }
+ RGWOpType get_type() override { return RGW_OP_CREATE_ACCESS_KEY; }
+};
+
+int RGWCreateAccessKey_IAM::init_processing(optional_yield y)
+{
+ // use account id from authenticated user/role. with AssumeRole, this may not
+ // match the account of s->user
+ rgw_account_id account_id;
+ if (const auto& account = s->auth.identity->get_account(); account) {
+ account_id = account->id;
+ } else {
+ return -ERR_METHOD_NOT_ALLOWED;
+ }
+
+ const std::string username = s->info.args.get("UserName");
+ if (username.empty()) {
+ // If you do not specify a user name, IAM determines the user name
+ // implicitly based on the AWS access key ID signing the request.
+ // This operation works for access keys under the AWS account.
+ // Consequently, you can use this operation to manage AWS account
+ // root user credentials.
+ user = s->user->clone();
+ return 0;
+ }
+ if (!validate_iam_user_name(username, s->err.message)) {
+ return -EINVAL;
+ }
+
+ // look up user by UserName
+ const std::string& tenant = s->auth.identity->get_tenant();
+ int r = driver->load_account_user_by_name(this, y, account_id,
+ tenant, username, &user);
+ // root user is hidden from user apis
+ const bool is_root = (user && user->get_type() == TYPE_ROOT);
+ if (r == -ENOENT || is_root) {
+ s->err.message = "No such UserName in the account";
+ return -ERR_NO_SUCH_ENTITY;
+ }
+ return r;
+}
+
+int RGWCreateAccessKey_IAM::verify_permission(optional_yield y)
+{
+ const RGWUserInfo& info = user->get_info();
+ const std::string resource_name = make_resource_name(info);
+ const rgw::ARN arn{resource_name, "user", info.account_id, true};
+ if (verify_user_permission(this, s, arn, rgw::IAM::iamCreateAccessKey, true)) {
+ return 0;
+ }
+ return -EACCES;
+}
+
+int RGWCreateAccessKey_IAM::forward_to_master(optional_yield y,
+ const rgw::SiteConfig& site,
+ RGWAccessKey& cred)
+{
+ RGWXMLDecoder::XMLParser parser;
+ if (!parser.init()) {
+ ldpp_dout(this, 0) << "ERROR: failed to initialize xml parser" << dendl;
+ return -EINVAL;
+ }
+
+ s->info.args.remove("UserName");
+ s->info.args.remove("Action");
+ s->info.args.remove("Version");
+
+ int r = forward_iam_request_to_master(this, site, s->user->get_info(),
+ post_body, parser, s->info, y);
+ if (r < 0) {
+ ldpp_dout(this, 20) << "ERROR: forward_iam_request_to_master failed with error code: " << r << dendl;
+ return r;
+ }
+
+ XMLObj* response = parser.find_first("CreateAccessKeyResponse");;
+ if (!response) {
+ ldpp_dout(this, 5) << "ERROR: unexpected xml: CreateAccessKeyResponse" << dendl;
+ return -EINVAL;
+ }
+
+ XMLObj* result = response->find_first("CreateAccessKeyResult");
+ if (!result) {
+ ldpp_dout(this, 5) << "ERROR: unexpected xml: CreateAccessKeyResult" << dendl;
+ return -EINVAL;
+ }
+
+ XMLObj* access_key = result->find_first("AccessKey");
+ if (!user) {
+ ldpp_dout(this, 5) << "ERROR: unexpected xml: AccessKey" << dendl;
+ return -EINVAL;
+ }
+
+ try {
+ RGWXMLDecoder::decode_xml("AccessKeyId", cred.id, access_key, true);
+ RGWXMLDecoder::decode_xml("SecretAccessKey", cred.key, access_key, true);
+ RGWXMLDecoder::decode_xml("CreateDate", cred.create_date, access_key);
+ } catch (RGWXMLDecoder::err& err) {
+ ldpp_dout(this, 5) << "ERROR: unexpected xml: AccessKey" << dendl;
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void RGWCreateAccessKey_IAM::execute(optional_yield y)
+{
+ std::optional<int> max_keys;
+ {
+ // read account's access key limit
+ RGWAccountInfo account;
+ rgw::sal::Attrs attrs; // unused
+ RGWObjVersionTracker objv; // unused
+ op_ret = driver->load_account_by_id(this, y, user->get_info().account_id,
+ account, attrs, objv);
+ if (op_ret < 0) {
+ ldpp_dout(this, 4) << "failed to load iam account "
+ << user->get_info().account_id << ": " << cpp_strerror(op_ret) << dendl;
+ return;
+ }
+ if (account.max_access_keys >= 0) { // max < 0 means unlimited
+ max_keys = account.max_access_keys;
+ }
+ }
+
+ // generate the key. forward_to_master() may overwrite this
+ if (rgw_generate_access_key(this, y, driver, key.id) < 0) {
+ s->err.message = "failed to generate s3 access key";
+ op_ret = -ERR_INTERNAL_ERROR;
+ return;
+ }
+ rgw_generate_secret_key(get_cct(), key.key);
+ key.create_date = ceph::real_clock::now();
+
+ const rgw::SiteConfig& site = *s->penv.site;
+ if (!site.is_meta_master()) {
+ op_ret = forward_to_master(y, site, key);
+ if (op_ret) {
+ return;
+ }
+ }
+
+ op_ret = retry_raced_user_write(this, y, user.get(),
+ [this, y, &max_keys] {
+ RGWUserInfo& info = user->get_info();
+ RGWUserInfo old_info = info;
+
+ info.access_keys[key.id] = key;
+
+ // check the current count against account limit
+ if (max_keys && std::cmp_greater(info.access_keys.size(), *max_keys)) {
+ s->err.message = fmt::format("Access key limit {} exceeded", *max_keys);
+ return -ERR_LIMIT_EXCEEDED;
+ }
+
+ constexpr bool exclusive = false;
+ return user->store_user(this, y, exclusive, &old_info);
+ });
+}
+
+void RGWCreateAccessKey_IAM::send_response()
+{
+ if (!op_ret) {
+ dump_start(s); // <?xml block ?>
+ Formatter* f = s->formatter;
+ Formatter::ObjectSection response{*f, "CreateAccessKeyResponse", RGW_REST_IAM_XMLNS};
+ {
+ Formatter::ObjectSection result{*f, "CreateAccessKeyResult"};
+ Formatter::ObjectSection accesskey{*f, "AccessKey"};
+ encode_json("UserName", user->get_display_name(), f);
+ dump_access_key(key, f);
+ encode_json("SecretAccessKey", key.key, f);
+ // /AccessKey
+ // /CreateAccessKeyResult
+ }
+ Formatter::ObjectSection metadata{*f, "ResponseMetadata"};
+ f->dump_string("RequestId", s->trans_id);
+ // /ResponseMetadata
+ // /CreateAccessKeyResponse
+ }
+
+ set_req_state_err(s, op_ret);
+ dump_errno(s);
+ end_header(s, this);
+}
+
+
+// UpdateAccessKey
+class RGWUpdateAccessKey_IAM : public RGWOp {
+ bufferlist post_body;
+ std::string access_key_id;
+ bool new_status = false;
+ std::unique_ptr<rgw::sal::User> user;
+
+ int forward_to_master(optional_yield y, const rgw::SiteConfig& site);
+ public:
+ explicit RGWUpdateAccessKey_IAM(const ceph::bufferlist& post_body)
+ : post_body(post_body) {}
+
+ int init_processing(optional_yield y) override;
+ int verify_permission(optional_yield y) override;
+ void execute(optional_yield y) override;
+ void send_response() override;
+
+ const char* name() const override { return "update_access_key"; }
+ RGWOpType get_type() override { return RGW_OP_UPDATE_ACCESS_KEY; }
+};
+
+int RGWUpdateAccessKey_IAM::init_processing(optional_yield y)
+{
+ // use account id from authenticated user/role. with AssumeRole, this may not
+ // match the account of s->user
+ rgw_account_id account_id;
+ if (const auto& account = s->auth.identity->get_account(); account) {
+ account_id = account->id;
+ } else {
+ return -ERR_METHOD_NOT_ALLOWED;
+ }
+
+ access_key_id = s->info.args.get("AccessKeyId");
+ if (access_key_id.empty()) {
+ s->err.message = "Missing required element AccessKeyId";
+ return -EINVAL;
+ }
+
+ const std::string status = s->info.args.get("Status");
+ if (status == "Active") {
+ new_status = true;
+ } else if (status == "Inactive") {
+ new_status = false;
+ } else {
+ if (status.empty()) {
+ s->err.message = "Missing required element Status";
+ } else {
+ s->err.message = "Invalid value for Status";
+ }
+ return -EINVAL;
+ }
+
+ const std::string username = s->info.args.get("UserName");
+ if (username.empty()) {
+ // If you do not specify a user name, IAM determines the user name
+ // implicitly based on the AWS access key ID signing the request.
+ // This operation works for access keys under the AWS account.
+ // Consequently, you can use this operation to manage AWS account
+ // root user credentials.
+ user = s->user->clone();
+ return 0;
+ }
+ if (!validate_iam_user_name(username, s->err.message)) {
+ return -EINVAL;
+ }
+
+ // look up user by UserName
+ const std::string& tenant = s->auth.identity->get_tenant();
+ int r = driver->load_account_user_by_name(this, y, account_id,
+ tenant, username, &user);
+ // root user is hidden from user apis
+ const bool is_root = (user && user->get_type() == TYPE_ROOT);
+ if (r == -ENOENT || is_root) {
+ s->err.message = "No such UserName in the account";
+ return -ERR_NO_SUCH_ENTITY;
+ }
+ return r;
+}
+
+int RGWUpdateAccessKey_IAM::verify_permission(optional_yield y)
+{
+ const RGWUserInfo& info = user->get_info();
+ const std::string resource_name = make_resource_name(info);
+ const rgw::ARN arn{resource_name, "user", info.account_id, true};
+ if (verify_user_permission(this, s, arn, rgw::IAM::iamUpdateAccessKey, true)) {
+ return 0;
+ }
+ return -EACCES;
+}
+
+int RGWUpdateAccessKey_IAM::forward_to_master(optional_yield y,
+ const rgw::SiteConfig& site)
+{
+ RGWXMLDecoder::XMLParser parser;
+ if (!parser.init()) {
+ ldpp_dout(this, 0) << "ERROR: failed to initialize xml parser" << dendl;
+ return -EINVAL;
+ }
+
+ s->info.args.remove("AccessKeyId");
+ s->info.args.remove("Status");
+ s->info.args.remove("UserName");
+ s->info.args.remove("Action");
+ s->info.args.remove("Version");
+
+ int r = forward_iam_request_to_master(this, site, s->user->get_info(),
+ post_body, parser, s->info, y);
+ if (r < 0) {
+ ldpp_dout(this, 20) << "ERROR: forward_iam_request_to_master failed with error code: " << r << dendl;
+ return r;
+ }
+ return 0;
+}
+
+void RGWUpdateAccessKey_IAM::execute(optional_yield y)
+{
+ const rgw::SiteConfig& site = *s->penv.site;
+ if (!site.is_meta_master()) {
+ op_ret = forward_to_master(y, site);
+ if (op_ret) {
+ return;
+ }
+ }
+
+ op_ret = retry_raced_user_write(this, y, user.get(),
+ [this, y] {
+ RGWUserInfo& info = user->get_info();
+ RGWUserInfo old_info = info;
+
+ auto key = info.access_keys.find(access_key_id);
+ if (key == info.access_keys.end()) {
+ s->err.message = "No such AccessKeyId in the user";
+ return -ERR_NO_SUCH_ENTITY;
+ }
+
+ if (key->second.active == new_status) {
+ return 0; // nothing to do, return success
+ }
+
+ key->second.active = new_status;
+
+ constexpr bool exclusive = false;
+ return user->store_user(this, y, exclusive, &old_info);
+ });
+}
+
+void RGWUpdateAccessKey_IAM::send_response()
+{
+ if (!op_ret) {
+ dump_start(s); // <?xml block ?>
+ Formatter* f = s->formatter;
+ Formatter::ObjectSection response{*f, "UpdateAccessKeyResponse", RGW_REST_IAM_XMLNS};
+ Formatter::ObjectSection metadata{*f, "ResponseMetadata"};
+ f->dump_string("RequestId", s->trans_id);
+ // /ResponseMetadata
+ // /UpdateAccessKeyResponse
+ }
+
+ set_req_state_err(s, op_ret);
+ dump_errno(s);
+ end_header(s, this);
+}
+
+// DeleteAccessKey
+class RGWDeleteAccessKey_IAM : public RGWOp {
+ bufferlist post_body;
+ std::string access_key_id;
+ std::unique_ptr<rgw::sal::User> user;
+
+ int forward_to_master(optional_yield y, const rgw::SiteConfig& site);
+ public:
+ explicit RGWDeleteAccessKey_IAM(const ceph::bufferlist& post_body)
+ : post_body(post_body) {}
+
+ int init_processing(optional_yield y) override;
+ int verify_permission(optional_yield y) override;
+ void execute(optional_yield y) override;
+ void send_response() override;
+
+ const char* name() const override { return "delete_access_key"; }
+ RGWOpType get_type() override { return RGW_OP_DELETE_ACCESS_KEY; }
+};
+
+int RGWDeleteAccessKey_IAM::init_processing(optional_yield y)
+{
+ // use account id from authenticated user/role. with AssumeRole, this may not
+ // match the account of s->user
+ rgw_account_id account_id;
+ if (const auto& account = s->auth.identity->get_account(); account) {
+ account_id = account->id;
+ } else {
+ return -ERR_METHOD_NOT_ALLOWED;
+ }
+
+ access_key_id = s->info.args.get("AccessKeyId");
+ if (access_key_id.empty()) {
+ s->err.message = "Missing required element AccessKeyId";
+ return -EINVAL;
+ }
+
+ const std::string username = s->info.args.get("UserName");
+ if (username.empty()) {
+ // If you do not specify a user name, IAM determines the user name
+ // implicitly based on the AWS access key ID signing the request.
+ // This operation works for access keys under the AWS account.
+ // Consequently, you can use this operation to manage AWS account
+ // root user credentials.
+ user = s->user->clone();
+ return 0;
+ }
+ if (!validate_iam_user_name(username, s->err.message)) {
+ return -EINVAL;
+ }
+
+ // look up user by UserName
+ const std::string& tenant = s->auth.identity->get_tenant();
+ int r = driver->load_account_user_by_name(this, y, account_id,
+ tenant, username, &user);
+ // root user is hidden from user apis
+ const bool is_root = (user && user->get_type() == TYPE_ROOT);
+ if (r == -ENOENT || is_root) {
+ s->err.message = "No such UserName in the account";
+ return -ERR_NO_SUCH_ENTITY;
+ }
+ return r;
+}
+
+int RGWDeleteAccessKey_IAM::verify_permission(optional_yield y)
+{
+ const RGWUserInfo& info = user->get_info();
+ const std::string resource_name = make_resource_name(info);
+ const rgw::ARN arn{resource_name, "user", info.account_id, true};
+ if (verify_user_permission(this, s, arn, rgw::IAM::iamDeleteAccessKey, true)) {
+ return 0;
+ }
+ return -EACCES;
+}
+
+int RGWDeleteAccessKey_IAM::forward_to_master(optional_yield y,
+ const rgw::SiteConfig& site)
+{
+ RGWXMLDecoder::XMLParser parser;
+ if (!parser.init()) {
+ ldpp_dout(this, 0) << "ERROR: failed to initialize xml parser" << dendl;
+ return -EINVAL;
+ }
+
+ s->info.args.remove("AccessKeyId");
+ s->info.args.remove("UserName");
+ s->info.args.remove("Action");
+ s->info.args.remove("Version");
+
+ int r = forward_iam_request_to_master(this, site, s->user->get_info(),
+ post_body, parser, s->info, y);
+ if (r < 0) {
+ ldpp_dout(this, 20) << "ERROR: forward_iam_request_to_master failed with error code: " << r << dendl;
+ return r;
+ }
+ return 0;
+}
+
+void RGWDeleteAccessKey_IAM::execute(optional_yield y)
+{
+ const rgw::SiteConfig& site = *s->penv.site;
+ if (!site.is_meta_master()) {
+ op_ret = forward_to_master(y, site);
+ if (op_ret) {
+ return;
+ }
+ }
+
+ op_ret = retry_raced_user_write(this, y, user.get(),
+ [this, y, &site] {
+ RGWUserInfo& info = user->get_info();
+ RGWUserInfo old_info = info;
+
+ auto key = info.access_keys.find(access_key_id);
+ if (key == info.access_keys.end()) {
+ if (!site.is_meta_master()) {
+ return 0; // delete succeeded on the master
+ }
+ s->err.message = "No such AccessKeyId in the user";
+ return -ERR_NO_SUCH_ENTITY;
+ }
+
+ info.access_keys.erase(key);
+
+ constexpr bool exclusive = false;
+ return user->store_user(this, y, exclusive, &old_info);
+ });
+}
+
+void RGWDeleteAccessKey_IAM::send_response()
+{
+ if (!op_ret) {
+ dump_start(s); // <?xml block ?>
+ Formatter* f = s->formatter;
+ Formatter::ObjectSection response{*f, "DeleteAccessKeyResponse", RGW_REST_IAM_XMLNS};
+ Formatter::ObjectSection metadata{*f, "ResponseMetadata"};
+ f->dump_string("RequestId", s->trans_id);
+ // /ResponseMetadata
+ // /DeleteAccessKeyResponse
+ }
+
+ set_req_state_err(s, op_ret);
+ dump_errno(s);
+ end_header(s, this);
+}
+
+
+// ListAccessKeys
+class RGWListAccessKeys_IAM : public RGWOp {
+ std::unique_ptr<rgw::sal::User> user;
+ std::string marker;
+ int max_items = 100;
+
+ bool started_response = false;
+ void start_response();
+ public:
+ int init_processing(optional_yield y) override;
+ int verify_permission(optional_yield y) override;
+ void execute(optional_yield y) override;
+ void send_response() override;
+
+ const char* name() const override { return "list_access_keys"; }
+ RGWOpType get_type() override { return RGW_OP_LIST_ACCESS_KEYS; }
+};
+
+int RGWListAccessKeys_IAM::init_processing(optional_yield y)
+{
+ // use account id from authenticated user/role. with AssumeRole, this may not
+ // match the account of s->user
+ rgw_account_id account_id;
+ if (const auto& account = s->auth.identity->get_account(); account) {
+ account_id = account->id;
+ } else {
+ return -ERR_METHOD_NOT_ALLOWED;
+ }
+
+ marker = s->info.args.get("Marker");
+
+ int r = s->info.args.get_int("MaxItems", &max_items, max_items);
+ if (r < 0 || max_items > 1000) {
+ s->err.message = "Invalid value for MaxItems";
+ return -EINVAL;
+ }
+
+ const std::string username = s->info.args.get("UserName");
+ if (username.empty()) {
+ // If you do not specify a user name, IAM determines the user name
+ // implicitly based on the AWS access key ID signing the request.
+ // This operation works for access keys under the AWS account.
+ // Consequently, you can use this operation to manage AWS account
+ // root user credentials.
+ user = s->user->clone();
+ return 0;
+ }
+ if (!validate_iam_user_name(username, s->err.message)) {
+ return -EINVAL;
+ }
+
+ // look up user by UserName
+ const std::string& tenant = s->auth.identity->get_tenant();
+ r = driver->load_account_user_by_name(this, y, account_id,
+ tenant, username, &user);
+ // root user is hidden from user apis
+ const bool is_root = (user && user->get_type() == TYPE_ROOT);
+ if (r == -ENOENT || is_root) {
+ return -ERR_NO_SUCH_ENTITY;
+ }
+ return r;
+}
+
+int RGWListAccessKeys_IAM::verify_permission(optional_yield y)
+{
+ const RGWUserInfo& info = user->get_info();
+ const std::string resource_name = make_resource_name(info);
+ const rgw::ARN arn{resource_name, "user", info.account_id, true};
+ if (verify_user_permission(this, s, arn, rgw::IAM::iamListAccessKeys, true)) {
+ return 0;
+ }
+ return -EACCES;
+}
+
+void RGWListAccessKeys_IAM::execute(optional_yield y)
+{
+ start_response();
+ started_response = true;
+
+ dump_start(s); // <?xml block ?>
+
+ Formatter* f = s->formatter;
+ f->open_object_section_in_ns("ListAccessKeysResponse", RGW_REST_IAM_XMLNS);
+ f->open_object_section("ListAccessKeysResult");
+ encode_json("UserName", user->get_display_name(), f);
+ f->open_array_section("AccessKeyMetadata");
+
+ const RGWUserInfo& info = user->get_info();
+
+ auto key = info.access_keys.lower_bound(marker);
+ for (int i = 0; i < max_items && key != info.access_keys.end(); ++i, ++key) {
+ f->open_object_section("member");
+ encode_json("UserName", user->get_display_name(), f);
+ dump_access_key(key->second, f);
+ f->close_section(); // member
+ }
+
+ f->close_section(); // AccessKeyMetadata
+
+ const bool truncated = (key != info.access_keys.end());
+ f->dump_bool("IsTruncated", truncated);
+ if (truncated) {
+ f->dump_string("Marker", key->second.id);
+ }
+
+ f->close_section(); // ListAccessKeysResult
+ f->close_section(); // ListAccessKeysResponse
+ rgw_flush_formatter_and_reset(s, f);
+}
+
+void RGWListAccessKeys_IAM::start_response()
+{
+ const int64_t proposed_content_length =
+ op_ret ? NO_CONTENT_LENGTH : CHUNKED_TRANSFER_ENCODING;
+
+ set_req_state_err(s, op_ret);
+ dump_errno(s);
+ end_header(s, this, to_mime_type(s->format), proposed_content_length);
+}
+
+void RGWListAccessKeys_IAM::send_response()
+{
+ if (!started_response) { // errored out before execute() wrote anything
+ start_response();
+ }
+}
+
+
+RGWOp* make_iam_create_user_op(const ceph::bufferlist& post_body) {
+ return new RGWCreateUser_IAM(post_body);
+}
+RGWOp* make_iam_get_user_op(const ceph::bufferlist&) {
+ return new RGWGetUser_IAM;
+}
+RGWOp* make_iam_update_user_op(const ceph::bufferlist& post_body) {
+ return new RGWUpdateUser_IAM(post_body);
+}
+RGWOp* make_iam_delete_user_op(const ceph::bufferlist& post_body) {
+ return new RGWDeleteUser_IAM(post_body);
+}
+RGWOp* make_iam_list_users_op(const ceph::bufferlist&) {
+ return new RGWListUsers_IAM;
+}
+
+RGWOp* make_iam_create_access_key_op(const ceph::bufferlist& post_body) {
+ return new RGWCreateAccessKey_IAM(post_body);
+}
+RGWOp* make_iam_update_access_key_op(const ceph::bufferlist& post_body) {
+ return new RGWUpdateAccessKey_IAM(post_body);
+}
+RGWOp* make_iam_delete_access_key_op(const ceph::bufferlist& post_body) {
+ return new RGWDeleteAccessKey_IAM(post_body);
+}
+RGWOp* make_iam_list_access_keys_op(const ceph::bufferlist& unused) {
+ return new RGWListAccessKeys_IAM;
+}
diff --git a/src/rgw/rgw_rest_iam_user.h b/src/rgw/rgw_rest_iam_user.h
new file mode 100644
index 00000000000..d3ba77a54a1
--- /dev/null
+++ b/src/rgw/rgw_rest_iam_user.h
@@ -0,0 +1,33 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab ft=cpp
+
+/*
+ * Ceph - scalable distributed file system
+ *
+ * Copyright contributors to the Ceph project
+ *
+ * This is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License version 2.1, as published by the Free Software
+ * Foundation. See file COPYING.
+ *
+ */
+
+#pragma once
+
+#include "include/buffer_fwd.h"
+
+class RGWOp;
+
+// IAM User op factory functions
+RGWOp* make_iam_create_user_op(const ceph::bufferlist& post_body);
+RGWOp* make_iam_get_user_op(const ceph::bufferlist& unused);
+RGWOp* make_iam_update_user_op(const ceph::bufferlist& post_body);
+RGWOp* make_iam_delete_user_op(const ceph::bufferlist& post_body);
+RGWOp* make_iam_list_users_op(const ceph::bufferlist& unused);
+
+// AccessKey op factory functions
+RGWOp* make_iam_create_access_key_op(const ceph::bufferlist& post_body);
+RGWOp* make_iam_update_access_key_op(const ceph::bufferlist& post_body);
+RGWOp* make_iam_delete_access_key_op(const ceph::bufferlist& post_body);
+RGWOp* make_iam_list_access_keys_op(const ceph::bufferlist& unused);
diff --git a/src/rgw/rgw_rest_metadata.cc b/src/rgw/rgw_rest_metadata.cc
index d7881c22890..6630cf053f4 100644
--- a/src/rgw/rgw_rest_metadata.cc
+++ b/src/rgw/rgw_rest_metadata.cc
@@ -69,8 +69,7 @@ void RGWOp_Metadata_Get::execute(optional_yield y) {
}
void RGWOp_Metadata_Get_Myself::execute(optional_yield y) {
- const std::string owner_id = s->owner.id.to_str();
- s->info.args.append("key", owner_id);
+ s->info.args.append("key", to_string(s->owner.id));
return RGWOp_Metadata_Get::execute(y);
}
diff --git a/src/rgw/rgw_rest_oidc_provider.cc b/src/rgw/rgw_rest_oidc_provider.cc
index c50b067d58d..70191879ad1 100644
--- a/src/rgw/rgw_rest_oidc_provider.cc
+++ b/src/rgw/rgw_rest_oidc_provider.cc
@@ -13,7 +13,7 @@
#include "rgw_common.h"
#include "rgw_op.h"
#include "rgw_rest.h"
-#include "rgw_role.h"
+#include "rgw_rest_iam.h"
#include "rgw_rest_oidc_provider.h"
#include "rgw_oidc_provider.h"
#include "rgw_sal.h"
@@ -24,32 +24,16 @@ using namespace std;
int RGWRestOIDCProvider::verify_permission(optional_yield y)
{
- if (s->auth.identity->is_anonymous()) {
- return -EACCES;
+ if (verify_user_permission(this, s, resource, action)) {
+ return 0;
}
- provider_arn = s->info.args.get("OpenIDConnectProviderArn");
- if (provider_arn.empty()) {
- ldpp_dout(this, 20) << "ERROR: Provider ARN is empty"<< dendl;
- return -EINVAL;
- }
-
- auto ret = check_caps(s->user->get_caps());
- if (ret == 0) {
- return ret;
- }
-
- uint64_t op = get_op();
- auto rgw_arn = rgw::ARN::parse(provider_arn, true);
- if (rgw_arn) {
- if (!verify_user_permission(this, s, *rgw_arn, op)) {
- return -EACCES;
- }
- } else {
- return -EACCES;
- }
+ return RGWRESTOp::verify_permission(y);
+}
- return 0;
+int RGWRestOIDCProvider::check_caps(const RGWUserCaps& caps)
+{
+ return caps.check_cap("roles", perm);
}
void RGWRestOIDCProvider::send_response()
@@ -61,101 +45,198 @@ void RGWRestOIDCProvider::send_response()
end_header(s, this);
}
-int RGWRestOIDCProviderRead::check_caps(const RGWUserCaps& caps)
+
+static std::string format_creation_date(ceph::real_time now)
{
- return caps.check_cap("oidc-provider", RGW_CAP_READ);
+ struct timeval tv;
+ real_clock::to_timeval(now, tv);
+
+ struct tm result;
+ gmtime_r(&tv.tv_sec, &result);
+ char buf[30];
+ strftime(buf,30,"%Y-%m-%dT%H:%M:%S", &result);
+ sprintf(buf + strlen(buf),".%03dZ",(int)tv.tv_usec/1000);
+ return buf;
}
-int RGWRestOIDCProviderWrite::check_caps(const RGWUserCaps& caps)
+
+RGWCreateOIDCProvider::RGWCreateOIDCProvider()
+ : RGWRestOIDCProvider(rgw::IAM::iamCreateOIDCProvider, RGW_CAP_WRITE)
{
- return caps.check_cap("oidc-provider", RGW_CAP_WRITE);
}
-int RGWCreateOIDCProvider::verify_permission(optional_yield y)
-{
- if (s->auth.identity->is_anonymous()) {
- return -EACCES;
- }
+inline constexpr int MAX_OIDC_NUM_CLIENT_IDS = 100;
+inline constexpr int MAX_OIDC_CLIENT_ID_LEN = 255;
+inline constexpr int MAX_OIDC_NUM_THUMBPRINTS = 5;
+inline constexpr int MAX_OIDC_THUMBPRINT_LEN = 40;
+inline constexpr int MAX_OIDC_URL_LEN = 255;
- auto ret = check_caps(s->user->get_caps());
- if (ret == 0) {
- return ret;
+int RGWCreateOIDCProvider::init_processing(optional_yield y)
+{
+ info.provider_url = s->info.args.get("Url");
+ if (info.provider_url.empty()) {
+ s->err.message = "Missing required element Url";
+ return -EINVAL;
}
-
- string idp_url = url_remove_prefix(provider_url);
- if (!verify_user_permission(this,
- s,
- rgw::ARN(idp_url,
- "oidc-provider",
- s->user->get_tenant(), true),
- get_op())) {
- return -EACCES;
+ if (info.provider_url.size() > MAX_OIDC_URL_LEN) {
+ s->err.message = "Url cannot exceed the maximum length of "
+ + std::to_string(MAX_OIDC_URL_LEN);
+ return -EINVAL;
}
- return 0;
-}
-
-int RGWCreateOIDCProvider::get_params()
-{
- provider_url = s->info.args.get("Url");
auto val_map = s->info.args.get_params();
for (auto& it : val_map) {
- if (it.first.find("ClientIDList.member.") != string::npos) {
- client_ids.emplace_back(it.second);
+ if (it.first.find("ClientIDList.member.") != string::npos) {
+ if (it.second.size() > MAX_OIDC_CLIENT_ID_LEN) {
+ s->err.message = "ClientID cannot exceed the maximum length of "
+ + std::to_string(MAX_OIDC_CLIENT_ID_LEN);
+ return -EINVAL;
}
- if (it.first.find("ThumbprintList.member.") != string::npos) {
- thumbprints.emplace_back(it.second);
+ info.client_ids.emplace_back(it.second);
+ }
+ if (it.first.find("ThumbprintList.member.") != string::npos) {
+ if (it.second.size() > MAX_OIDC_THUMBPRINT_LEN) {
+ s->err.message = "Thumbprint cannot exceed the maximum length of "
+ + std::to_string(MAX_OIDC_THUMBPRINT_LEN);
+ return -EINVAL;
}
+ info.thumbprints.emplace_back(it.second);
+ }
+ }
+
+ if (info.thumbprints.empty()) {
+ s->err.message = "Missing required element ThumbprintList";
+ return -EINVAL;
+ }
+ if (info.thumbprints.size() > MAX_OIDC_NUM_THUMBPRINTS) {
+ s->err.message = "ThumbprintList cannot exceed the maximum size of "
+ + std::to_string(MAX_OIDC_NUM_THUMBPRINTS);
+ return -EINVAL;
}
- if (provider_url.empty() || thumbprints.empty()) {
- ldpp_dout(this, 20) << "ERROR: one of url or thumbprints is empty" << dendl;
+ if (info.client_ids.size() > MAX_OIDC_NUM_CLIENT_IDS) {
+ s->err.message = "ClientIDList cannot exceed the maximum size of "
+ + std::to_string(MAX_OIDC_NUM_CLIENT_IDS);
return -EINVAL;
}
+ if (const auto& acc = s->auth.identity->get_account(); acc) {
+ info.tenant = acc->id;
+ } else {
+ info.tenant = s->user->get_tenant();
+ }
+ resource = rgw::ARN(url_remove_prefix(info.provider_url),
+ "oidc-provider/", info.tenant, true);
+ info.arn = resource.to_string();
+ info.creation_date = format_creation_date(real_clock::now());
+
return 0;
}
void RGWCreateOIDCProvider::execute(optional_yield y)
{
- op_ret = get_params();
- if (op_ret < 0) {
- return;
- }
-
- std::unique_ptr<rgw::sal::RGWOIDCProvider> provider = driver->get_oidc_provider();
- provider->set_url(provider_url);
- provider->set_tenant(s->user->get_tenant());
- provider->set_client_ids(client_ids);
- provider->set_thumbprints(thumbprints);
- op_ret = provider->create(s, true, y);
-
+ constexpr bool exclusive = true;
+ op_ret = driver->store_oidc_provider(this, y, info, exclusive);
if (op_ret == 0) {
- s->formatter->open_object_section("CreateOpenIDConnectProviderResponse");
+ s->formatter->open_object_section_in_ns("CreateOpenIDConnectProviderResponse", RGW_REST_IAM_XMLNS);
s->formatter->open_object_section("CreateOpenIDConnectProviderResult");
- provider->dump(s->formatter);
+ encode_json("OpenIDConnectProviderArn", info.arn, s->formatter);
s->formatter->close_section();
s->formatter->open_object_section("ResponseMetadata");
s->formatter->dump_string("RequestId", s->trans_id);
s->formatter->close_section();
s->formatter->close_section();
}
+}
+
+
+static int validate_provider_arn(const std::string& provider_arn,
+ std::string_view tenant,
+ rgw::ARN& resource, std::string& url,
+ std::string& message)
+{
+ if (provider_arn.empty()) {
+ message = "Missing required element OpenIDConnectProviderArn";
+ return -EINVAL;
+ }
+
+ // teuthology runs keycloak on localhost:8080, and rgw::ARN::parse() rejects
+ // that extra colon. aws docs say "The URL should not contain a port number."
+ // but we'll be less strict about parsing
+ std::string_view str = provider_arn;
+
+ constexpr std::string_view arn_prefix = "arn:";
+ if (!str.starts_with(arn_prefix)) {
+ message = "Invalid value for OpenIDConnectProviderArn";
+ return -EINVAL;
+ }
+ str.remove_prefix(arn_prefix.size());
+
+ constexpr std::string_view partition = "aws:";
+ if (!str.starts_with(partition)) {
+ message = "OpenIDConnectProviderArn partition must be aws";
+ return -EINVAL;
+ }
+ resource.partition = rgw::Partition::aws;
+ str.remove_prefix(partition.size());
+
+ constexpr std::string_view service = "iam::";
+ if (!str.starts_with(service)) {
+ message = "OpenIDConnectProviderArn service must be iam";
+ return -EINVAL;
+ }
+ resource.service = rgw::Service::iam;
+ str.remove_prefix(service.size());
+
+ if (!str.starts_with(tenant)) {
+ message = "OpenIDConnectProviderArn account must match user tenant";
+ return -EINVAL;
+ }
+ resource.account = tenant;
+ str.remove_prefix(tenant.size());
+
+ constexpr std::string_view resource_prefix = ":oidc-provider/";
+ if (!str.starts_with(resource_prefix)) {
+ message = "Invalid ARN resource for OpenIDConnectProviderArn";
+ return -EINVAL;
+ }
+ resource.resource = str.substr(1); // trim leading :
+ str.remove_prefix(resource_prefix.size());
+ url = str;
+
+ return 0;
+}
+
+
+RGWDeleteOIDCProvider::RGWDeleteOIDCProvider()
+ : RGWRestOIDCProvider(rgw::IAM::iamDeleteOIDCProvider, RGW_CAP_WRITE)
+{
+}
+
+int RGWDeleteOIDCProvider::init_processing(optional_yield y)
+{
+ std::string_view account;
+ if (const auto& acc = s->auth.identity->get_account(); acc) {
+ account = acc->id;
+ } else {
+ account = s->user->get_tenant();
+ }
+ std::string provider_arn = s->info.args.get("OpenIDConnectProviderArn");
+ return validate_provider_arn(provider_arn, account,
+ resource, url, s->err.message);
}
void RGWDeleteOIDCProvider::execute(optional_yield y)
{
- std::unique_ptr<rgw::sal::RGWOIDCProvider> provider = driver->get_oidc_provider();
- provider->set_arn(provider_arn);
- provider->set_tenant(s->user->get_tenant());
- op_ret = provider->delete_obj(s, y);
+ op_ret = driver->delete_oidc_provider(this, y, resource.account, url);
if (op_ret < 0 && op_ret != -ENOENT && op_ret != -EINVAL) {
op_ret = ERR_INTERNAL_ERROR;
}
if (op_ret == 0) {
- s->formatter->open_object_section("DeleteOpenIDConnectProviderResponse");
+ s->formatter->open_object_section_in_ns("DeleteOpenIDConnectProviderResponse", RGW_REST_IAM_XMLNS);
s->formatter->open_object_section("ResponseMetadata");
s->formatter->dump_string("RequestId", s->trans_id);
s->formatter->close_section();
@@ -163,56 +244,80 @@ void RGWDeleteOIDCProvider::execute(optional_yield y)
}
}
+RGWGetOIDCProvider::RGWGetOIDCProvider()
+ : RGWRestOIDCProvider(rgw::IAM::iamGetOIDCProvider, RGW_CAP_READ)
+{
+}
+
+int RGWGetOIDCProvider::init_processing(optional_yield y)
+{
+ std::string_view account;
+ if (const auto& acc = s->auth.identity->get_account(); acc) {
+ account = acc->id;
+ } else {
+ account = s->user->get_tenant();
+ }
+ std::string provider_arn = s->info.args.get("OpenIDConnectProviderArn");
+ return validate_provider_arn(provider_arn, account,
+ resource, url, s->err.message);
+}
+
+static void dump_oidc_provider(const RGWOIDCProviderInfo& info, Formatter *f)
+{
+ f->open_object_section("ClientIDList");
+ for (const auto& it : info.client_ids) {
+ encode_json("member", it, f);
+ }
+ f->close_section();
+ encode_json("CreateDate", info.creation_date, f);
+ f->open_object_section("ThumbprintList");
+ for (const auto& it : info.thumbprints) {
+ encode_json("member", it, f);
+ }
+ f->close_section();
+ encode_json("Url", info.provider_url, f);
+}
+
void RGWGetOIDCProvider::execute(optional_yield y)
{
- std::unique_ptr<rgw::sal::RGWOIDCProvider> provider = driver->get_oidc_provider();
- provider->set_arn(provider_arn);
- provider->set_tenant(s->user->get_tenant());
- op_ret = provider->get(s, y);
+ RGWOIDCProviderInfo info;
+ op_ret = driver->load_oidc_provider(this, y, resource.account, url, info);
if (op_ret < 0 && op_ret != -ENOENT && op_ret != -EINVAL) {
op_ret = ERR_INTERNAL_ERROR;
}
if (op_ret == 0) {
- s->formatter->open_object_section("GetOpenIDConnectProviderResponse");
+ s->formatter->open_object_section_in_ns("GetOpenIDConnectProviderResponse", RGW_REST_IAM_XMLNS);
s->formatter->open_object_section("ResponseMetadata");
s->formatter->dump_string("RequestId", s->trans_id);
s->formatter->close_section();
s->formatter->open_object_section("GetOpenIDConnectProviderResult");
- provider->dump_all(s->formatter);
+ dump_oidc_provider(info, s->formatter);
s->formatter->close_section();
s->formatter->close_section();
}
}
-int RGWListOIDCProviders::verify_permission(optional_yield y)
-{
- if (s->auth.identity->is_anonymous()) {
- return -EACCES;
- }
-
- if (int ret = check_caps(s->user->get_caps()); ret == 0) {
- return ret;
- }
-
- if (!verify_user_permission(this,
- s,
- rgw::ARN(),
- get_op())) {
- return -EACCES;
- }
- return 0;
+RGWListOIDCProviders::RGWListOIDCProviders()
+ : RGWRestOIDCProvider(rgw::IAM::iamListOIDCProviders, RGW_CAP_READ)
+{
}
void RGWListOIDCProviders::execute(optional_yield y)
{
- vector<std::unique_ptr<rgw::sal::RGWOIDCProvider>> result;
- op_ret = driver->get_oidc_providers(s, s->user->get_tenant(), result, y);
+ std::string_view account;
+ if (const auto& acc = s->auth.identity->get_account(); acc) {
+ account = acc->id;
+ } else {
+ account = s->user->get_tenant();
+ }
+ vector<RGWOIDCProviderInfo> result;
+ op_ret = driver->get_oidc_providers(this, y, account, result);
if (op_ret == 0) {
- s->formatter->open_array_section("ListOpenIDConnectProvidersResponse");
+ s->formatter->open_object_section_in_ns("ListOpenIDConnectProvidersResponse", RGW_REST_IAM_XMLNS);
s->formatter->open_object_section("ResponseMetadata");
s->formatter->dump_string("RequestId", s->trans_id);
s->formatter->close_section();
@@ -220,9 +325,7 @@ void RGWListOIDCProviders::execute(optional_yield y)
s->formatter->open_array_section("OpenIDConnectProviderList");
for (const auto& it : result) {
s->formatter->open_object_section("member");
- auto& arn = it->get_arn();
- ldpp_dout(s, 0) << "ARN: " << arn << dendl;
- s->formatter->dump_string("Arn", arn);
+ s->formatter->dump_string("Arn", it.arn);
s->formatter->close_section();
}
s->formatter->close_section();
@@ -230,4 +333,3 @@ void RGWListOIDCProviders::execute(optional_yield y)
s->formatter->close_section();
}
}
-
diff --git a/src/rgw/rgw_rest_oidc_provider.h b/src/rgw/rgw_rest_oidc_provider.h
index 33535c6b512..192906ab498 100644
--- a/src/rgw/rgw_rest_oidc_provider.h
+++ b/src/rgw/rgw_rest_oidc_provider.h
@@ -7,65 +7,58 @@
#include "rgw_oidc_provider.h"
class RGWRestOIDCProvider : public RGWRESTOp {
+ const uint64_t action;
+ const uint32_t perm;
protected:
- std::vector<std::string> client_ids;
- std::vector<std::string> thumbprints;
- std::string provider_url; //'iss' field in JWT
- std::string provider_arn;
-public:
- int verify_permission(optional_yield y) override;
- void send_response() override;
- virtual uint64_t get_op() = 0;
-};
+ rgw::ARN resource; // must be initialized before verify_permission()
-class RGWRestOIDCProviderRead : public RGWRestOIDCProvider {
-public:
- RGWRestOIDCProviderRead() = default;
int check_caps(const RGWUserCaps& caps) override;
-};
-class RGWRestOIDCProviderWrite : public RGWRestOIDCProvider {
+ RGWRestOIDCProvider(uint64_t action, uint32_t perm)
+ : action(action), perm(perm) {}
public:
- RGWRestOIDCProviderWrite() = default;
- int check_caps(const RGWUserCaps& caps) override;
+ int verify_permission(optional_yield y) override;
+ void send_response() override;
};
-class RGWCreateOIDCProvider : public RGWRestOIDCProviderWrite {
-public:
- RGWCreateOIDCProvider() = default;
- int verify_permission(optional_yield y) override;
+class RGWCreateOIDCProvider : public RGWRestOIDCProvider {
+ RGWOIDCProviderInfo info;
+ public:
+ RGWCreateOIDCProvider();
+
+ int init_processing(optional_yield y) override;
void execute(optional_yield y) override;
- int get_params();
const char* name() const override { return "create_oidc_provider"; }
RGWOpType get_type() override { return RGW_OP_CREATE_OIDC_PROVIDER; }
- uint64_t get_op() override { return rgw::IAM::iamCreateOIDCProvider; }
};
-class RGWDeleteOIDCProvider : public RGWRestOIDCProviderWrite {
-public:
- RGWDeleteOIDCProvider() = default;
+class RGWDeleteOIDCProvider : public RGWRestOIDCProvider {
+ std::string url;
+ public:
+ RGWDeleteOIDCProvider();
+
+ int init_processing(optional_yield y) override;
void execute(optional_yield y) override;
const char* name() const override { return "delete_oidc_provider"; }
RGWOpType get_type() override { return RGW_OP_DELETE_OIDC_PROVIDER; }
- uint64_t get_op() override { return rgw::IAM::iamDeleteOIDCProvider; }
};
-class RGWGetOIDCProvider : public RGWRestOIDCProviderRead {
-public:
- RGWGetOIDCProvider() = default;
+class RGWGetOIDCProvider : public RGWRestOIDCProvider {
+ std::string url;
+ public:
+ RGWGetOIDCProvider();
+
+ int init_processing(optional_yield y) override;
void execute(optional_yield y) override;
const char* name() const override { return "get_oidc_provider"; }
RGWOpType get_type() override { return RGW_OP_GET_OIDC_PROVIDER; }
- uint64_t get_op() override { return rgw::IAM::iamGetOIDCProvider; }
};
-class RGWListOIDCProviders : public RGWRestOIDCProviderRead {
-public:
- RGWListOIDCProviders() = default;
- int verify_permission(optional_yield y) override;
+class RGWListOIDCProviders : public RGWRestOIDCProvider {
+ public:
+ RGWListOIDCProviders();
+
void execute(optional_yield y) override;
- int get_params();
const char* name() const override { return "list_oidc_providers"; }
RGWOpType get_type() override { return RGW_OP_LIST_OIDC_PROVIDERS; }
- uint64_t get_op() override { return rgw::IAM::iamListOIDCProviders; }
};
diff --git a/src/rgw/rgw_rest_pubsub.cc b/src/rgw/rgw_rest_pubsub.cc
index 431a8321338..bf72baac13e 100644
--- a/src/rgw/rgw_rest_pubsub.cc
+++ b/src/rgw/rgw_rest_pubsub.cc
@@ -5,6 +5,7 @@
#include <boost/tokenizer.hpp>
#include <optional>
#include <regex>
+#include "include/function2.hpp"
#include "rgw_iam_policy.h"
#include "rgw_rest_pubsub.h"
#include "rgw_pubsub_push.h"
@@ -23,8 +24,6 @@
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_rgw
-static const char* AWS_SNS_NS("https://sns.amazonaws.com/doc/2010-03-31/");
-
bool verify_transport_security(CephContext *cct, const RGWEnv& env) {
const auto is_secure = rgw_transport_is_secure(cct, env);
if (!is_secure && g_conf().get_val<bool>("rgw_allow_notification_secrets_in_cleartext")) {
@@ -37,24 +36,26 @@ bool verify_transport_security(CephContext *cct, const RGWEnv& env) {
// make sure that endpoint is a valid URL
// make sure that if user/password are passed inside URL, it is over secure connection
// update rgw_pubsub_dest to indicate that a password is stored in the URL
-bool validate_and_update_endpoint_secret(rgw_pubsub_dest& dest, CephContext *cct, const RGWEnv& env) {
+bool validate_and_update_endpoint_secret(rgw_pubsub_dest& dest, CephContext *cct,
+ const RGWEnv& env, std::string& message)
+{
if (dest.push_endpoint.empty()) {
- return true;
+ return true;
}
std::string user;
std::string password;
if (!rgw::parse_url_userinfo(dest.push_endpoint, user, password)) {
- ldout(cct, 1) << "endpoint validation error: malformed endpoint URL:" << dest.push_endpoint << dendl;
+ message = "Malformed URL for push-endpoint";
return false;
}
// this should be verified inside parse_url()
ceph_assert(user.empty() == password.empty());
if (!user.empty()) {
- dest.stored_secret = true;
- if (!verify_transport_security(cct, env)) {
- ldout(cct, 1) << "endpoint validation error: sending secrets over insecure transport" << dendl;
- return false;
- }
+ dest.stored_secret = true;
+ if (!verify_transport_security(cct, env)) {
+ message = "Topic contains secrets that must be transmitted over a secure transport";
+ return false;
+ }
}
return true;
}
@@ -76,6 +77,29 @@ bool validate_topic_name(const std::string& name, std::string& message)
return true;
}
+auto validate_topic_arn(const std::string& str, std::string& message)
+ -> boost::optional<rgw::ARN>
+{
+ if (str.empty()) {
+ message = "Missing required element TopicArn";
+ return boost::none;
+ }
+ auto arn = rgw::ARN::parse(str);
+ if (!arn || arn->resource.empty()) {
+ message = "Invalid value for TopicArn";
+ return boost::none;
+ }
+ return arn;
+}
+
+const std::string& get_account_or_tenant(const rgw_owner& owner)
+{
+ return std::visit(fu2::overload(
+ [] (const rgw_user& u) -> const std::string& { return u.tenant; },
+ [] (const rgw_account_id& a) -> const std::string& { return a; }
+ ), owner);
+}
+
bool topic_has_endpoint_secret(const rgw_pubsub_topic& topic) {
return topic.dest.stored_secret;
}
@@ -87,56 +111,107 @@ bool topics_has_endpoint_secret(const rgw_pubsub_topics& topics) {
return false;
}
-std::optional<rgw::IAM::Policy> get_policy_from_text(req_state* const s,
- std::string& policy_text) {
- const auto bl = bufferlist::static_from_string(policy_text);
+static bool topic_needs_queue(const rgw_pubsub_dest& dest)
+{
+ return !dest.push_endpoint.empty() && dest.persistent;
+}
+
+auto get_policy_from_text(req_state* const s, const std::string& policy_text)
+ -> boost::optional<rgw::IAM::Policy>
+{
try {
return rgw::IAM::Policy(
- s->cct, s->owner.id.tenant, bl,
+ s->cct, nullptr, policy_text,
s->cct->_conf.get_val<bool>("rgw_policy_reject_invalid_principals"));
} catch (rgw::IAM::PolicyParseException& e) {
ldout(s->cct, 1) << "failed to parse policy: '" << policy_text
<< "' with error: " << e.what() << dendl;
s->err.message = e.what();
- return std::nullopt;
+ return boost::none;
}
}
-int verify_topic_owner_or_policy(req_state* const s,
- const rgw_pubsub_topic& topic,
- const std::string& zonegroup_name,
- const uint64_t op) {
- if (topic.user == s->owner.id) {
- return 0;
+using rgw::IAM::Effect;
+using rgw::IAM::Policy;
+
+bool verify_topic_permission(const DoutPrefixProvider* dpp, req_state* s,
+ const rgw_owner& owner, const rgw::ARN& arn,
+ const boost::optional<Policy>& policy,
+ uint64_t op)
+{
+ if (s->auth.identity->get_account()) {
+ const bool account_root = (s->auth.identity->get_identity_type() == TYPE_ROOT);
+ if (!s->auth.identity->is_owner_of(owner)) {
+ ldpp_dout(dpp, 4) << "cross-account request for resource owner "
+ << owner << " != " << s->owner.id << dendl;
+ // cross-account requests evaluate the identity-based policies separately
+ // from the resource-based policies and require Allow from both
+ const auto identity_res = evaluate_iam_policies(
+ dpp, s->env, *s->auth.identity, account_root, op, arn,
+ {}, s->iam_identity_policies, s->session_policies);
+ if (identity_res == Effect::Deny) {
+ return false;
+ }
+ const auto resource_res = evaluate_iam_policies(
+ dpp, s->env, *s->auth.identity, false, op, arn,
+ policy, {}, {});
+ return identity_res == Effect::Allow && resource_res == Effect::Allow;
+ } else {
+ // require an Allow from either identity- or resource-based policy
+ return Effect::Allow == evaluate_iam_policies(
+ dpp, s->env, *s->auth.identity, account_root, op, arn,
+ policy, s->iam_identity_policies, s->session_policies);
+ }
+ }
+
+ constexpr bool account_root = false;
+ const auto effect = evaluate_iam_policies(
+ dpp, s->env, *s->auth.identity, account_root, op, arn,
+ policy, s->iam_identity_policies, s->session_policies);
+ if (effect == Effect::Deny) {
+ return false;
+ }
+ if (effect == Effect::Allow) {
+ return true;
+ }
+
+ if (s->auth.identity->is_owner_of(owner)) {
+ ldpp_dout(dpp, 10) << __func__ << ": granted to resource owner" << dendl;
+ return true;
}
- // no policy set.
- if (topic.policy_text.empty()) {
- // if rgw_topic_require_publish_policy is "false" dont validate "publish" policies
- if (op == rgw::IAM::snsPublish && !s->cct->_conf->rgw_topic_require_publish_policy) {
- return 0;
+
+ if (!policy) {
+ if (op == rgw::IAM::snsPublish &&
+ !s->cct->_conf->rgw_topic_require_publish_policy) {
+ return true;
}
- if (topic.user.empty()) {
+
+ if (std::visit([] (const auto& o) { return o.empty(); }, owner)) {
// if we don't know the original user and there is no policy
// we will not reject the request.
// this is for compatibility with versions that did not store the user in the topic
- return 0;
+ return true;
}
- s->err.message = "Topic was created by another user.";
- return -EACCES;
}
- // bufferlist::static_from_string wants non const string
- std::string policy_text(topic.policy_text);
- const auto p = get_policy_from_text(s, policy_text);
- rgw::IAM::PolicyPrincipal princ_type = rgw::IAM::PolicyPrincipal::Other;
- const rgw::ARN arn(rgw::Partition::aws, rgw::Service::sns, zonegroup_name,
- s->user->get_tenant(), topic.name);
- if (!p || p->eval(s->env, *s->auth.identity, op, arn, princ_type) !=
- rgw::IAM::Effect::Allow) {
- ldout(s->cct, 1) << "topic policy failed validation, topic policy: " << p
- << dendl;
- return -EACCES;
+
+ s->err.message = "Topic was created by another user.";
+ return false;
+}
+
+// parse topic policy if present and evaluate permissions
+bool verify_topic_permission(const DoutPrefixProvider* dpp, req_state* s,
+ const rgw_pubsub_topic& topic,
+ const rgw::ARN& arn, uint64_t op)
+{
+ boost::optional<Policy> policy;
+ if (!topic.policy_text.empty()) {
+ policy = get_policy_from_text(s, topic.policy_text);
+ if (!policy) {
+ return false;
+ }
}
- return 0;
+
+ return verify_topic_permission(dpp, s, topic.owner, arn, policy, op);
}
// command (AWS compliant):
@@ -146,8 +221,9 @@ class RGWPSCreateTopicOp : public RGWOp {
private:
bufferlist bl_post_body;
std::string topic_name;
+ rgw::ARN topic_arn;
+ std::optional<rgw_pubsub_topic> topic;
rgw_pubsub_dest dest;
- std::string topic_arn;
std::string opaque_data;
std::string policy_text;
@@ -165,7 +241,7 @@ class RGWPSCreateTopicOp : public RGWOp {
s->info.args.get_int("max_retries", reinterpret_cast<int *>(&dest.max_retries), rgw::notify::DEFAULT_GLOBAL_VALUE);
s->info.args.get_int("retry_sleep_duration", reinterpret_cast<int *>(&dest.retry_sleep_duration), rgw::notify::DEFAULT_GLOBAL_VALUE);
- if (!validate_and_update_endpoint_secret(dest, s->cct, *(s->info.env))) {
+ if (!validate_and_update_endpoint_secret(dest, s->cct, *s->info.env, s->err.message)) {
return -EINVAL;
}
// Store topic Policy.
@@ -201,10 +277,9 @@ class RGWPSCreateTopicOp : public RGWOp {
// dest object only stores endpoint info
dest.arn_topic = topic_name;
// the topic ARN will be sent in the reply
- const rgw::ARN arn(rgw::Partition::aws, rgw::Service::sns,
+ topic_arn = rgw::ARN{rgw::Partition::aws, rgw::Service::sns,
driver->get_zone()->get_zonegroup().get_name(),
- s->user->get_tenant(), topic_name);
- topic_arn = arn.to_string();
+ get_account_or_tenant(s->owner.id), topic_name};
return 0;
}
@@ -212,36 +287,59 @@ class RGWPSCreateTopicOp : public RGWOp {
explicit RGWPSCreateTopicOp(bufferlist bl_post_body)
: bl_post_body(std::move(bl_post_body)) {}
- int verify_permission(optional_yield y) override {
- auto ret = get_params();
+ int init_processing(optional_yield y) override {
+ int ret = get_params();
+ if (ret < 0) {
+ return ret;
+ }
+ ret = RGWOp::init_processing(y);
if (ret < 0) {
return ret;
}
- const RGWPubSub ps(driver, s->owner.id.tenant, *s->penv.site);
+ // account users require the notification_v2 format to index the topic metadata
+ if (s->auth.identity->get_account() &&
+ !rgw::all_zonegroups_support(*s->penv.site, rgw::zone_features::notification_v2)) {
+ s->err.message = "The 'notification_v2' zone feature must be enabled "
+ "to create topics in an account";
+ return -EINVAL;
+ }
+
+ // try to load existing topic for owner and policy
+ const RGWPubSub ps(driver, get_account_or_tenant(s->owner.id), *s->penv.site);
rgw_pubsub_topic result;
ret = ps.get_topic(this, topic_name, result, y, nullptr);
if (ret == -ENOENT) {
// topic not present
- return 0;
- }
- if (ret == 0) {
- ret = verify_topic_owner_or_policy(
- s, result, driver->get_zone()->get_zonegroup().get_name(),
- rgw::IAM::snsCreateTopic);
- if (ret == 0)
- {
- return 0;
- }
+ } else if (ret < 0) {
+ ldpp_dout(this, 1) << "failed to read topic '" << topic_name
+ << "', with error:" << ret << dendl;
+ return ret;
+ } else {
+ topic = std::move(result);
+ }
+ return 0;
+ }
- ldpp_dout(this, 1) << "no permission to modify topic '" << topic_name
- << "', topic already exist." << dendl;
- return -EACCES;
+ int verify_permission(optional_yield y) override {
+ if (topic) {
+ // consult topic policy for overwrite permission
+ if (!verify_topic_permission(this, s, *topic, topic_arn,
+ rgw::IAM::snsCreateTopic)) {
+ return -ERR_AUTHORIZATION;
+ }
+ } else {
+ // if no topic policy exists, just check identity policies for denies
+ // account users require an Allow, non-account users just check for Deny
+ const bool mandatory_policy{s->auth.identity->get_account()};
+ if (!verify_user_permission(this, s, topic_arn,
+ rgw::IAM::snsCreateTopic,
+ mandatory_policy)) {
+ return -ERR_AUTHORIZATION;
+ }
}
- ldpp_dout(this, 1) << "failed to read topic '" << topic_name
- << "', with error:" << ret << dendl;
- return ret;
- }
+ return 0;
+ }
void pre_exec() override {
rgw_bucket_object_pre_exec(s);
@@ -264,9 +362,9 @@ class RGWPSCreateTopicOp : public RGWOp {
}
const auto f = s->formatter;
- f->open_object_section_in_ns("CreateTopicResponse", AWS_SNS_NS);
+ f->open_object_section_in_ns("CreateTopicResponse", RGW_REST_SNS_XMLNS);
f->open_object_section("CreateTopicResult");
- encode_xml("TopicArn", topic_arn, f);
+ encode_xml("TopicArn", topic_arn.to_string(), f);
f->close_section(); // CreateTopicResult
f->open_object_section("ResponseMetadata");
encode_xml("RequestId", s->req_id, f);
@@ -280,16 +378,24 @@ void RGWPSCreateTopicOp::execute(optional_yield y) {
// master request will replicate the topic creation.
if (!driver->is_meta_master()) {
op_ret = rgw_forward_request_to_master(
- this, *s->penv.site, s->user->get_id(), &bl_post_body, nullptr, s->info, y);
+ this, *s->penv.site, s->owner.id, &bl_post_body, nullptr, s->info, y);
if (op_ret < 0) {
- ldpp_dout(this, 1)
+ ldpp_dout(this, 4)
<< "CreateTopic forward_request_to_master returned ret = " << op_ret
<< dendl;
return;
}
}
- if (!dest.push_endpoint.empty() && dest.persistent) {
- op_ret = rgw::notify::add_persistent_topic(topic_name, s->yield);
+
+ // don't add a persistent queue if we already have one
+ const bool already_persistent = topic && topic_needs_queue(topic->dest);
+ if (!already_persistent && topic_needs_queue(dest)) {
+ // initialize the persistent queue's location, using ':' as the namespace
+ // delimiter because its inclusion in a TopicName would break ARNs
+ dest.persistent_queue = string_cat_reserve(
+ get_account_or_tenant(s->owner.id), ":", topic_name);
+
+ op_ret = rgw::notify::add_persistent_topic(dest.persistent_queue, s->yield);
if (op_ret < 0) {
ldpp_dout(this, 1) << "CreateTopic Action failed to create queue for "
"persistent topics. error:"
@@ -297,11 +403,11 @@ void RGWPSCreateTopicOp::execute(optional_yield y) {
return;
}
}
- const RGWPubSub ps(driver, s->owner.id.tenant, *s->penv.site);
- op_ret = ps.create_topic(this, topic_name, dest, topic_arn, opaque_data,
- s->owner.id, policy_text, y);
+ const RGWPubSub ps(driver, get_account_or_tenant(s->owner.id), *s->penv.site);
+ op_ret = ps.create_topic(this, topic_name, dest, topic_arn.to_string(),
+ opaque_data, s->owner.id, policy_text, y);
if (op_ret < 0) {
- ldpp_dout(this, 1) << "failed to create topic '" << topic_name << "', ret=" << op_ret << dendl;
+ ldpp_dout(this, 4) << "failed to create topic '" << topic_name << "', ret=" << op_ret << dendl;
return;
}
ldpp_dout(this, 20) << "successfully created topic '" << topic_name << "'" << dendl;
@@ -317,6 +423,12 @@ private:
public:
int verify_permission(optional_yield) override {
+ // check account permissions up front
+ if (s->auth.identity->get_account() &&
+ !verify_user_permission(this, s, {}, rgw::IAM::snsListTopics)) {
+ return -ERR_AUTHORIZATION;
+ }
+
return 0;
}
void pre_exec() override {
@@ -340,7 +452,7 @@ public:
}
const auto f = s->formatter;
- f->open_object_section_in_ns("ListTopicsResponse", AWS_SNS_NS);
+ f->open_object_section_in_ns("ListTopicsResponse", RGW_REST_SNS_XMLNS);
f->open_object_section("ListTopicsResult");
encode_xml("Topics", result, f);
f->close_section(); // ListTopicsResult
@@ -358,7 +470,7 @@ public:
void RGWPSListTopicsOp::execute(optional_yield y) {
const std::string start_token = s->info.args.get("NextToken");
- const RGWPubSub ps(driver, s->owner.id.tenant, *s->penv.site);
+ const RGWPubSub ps(driver, get_account_or_tenant(s->owner.id), *s->penv.site);
constexpr int max_items = 100;
op_ret = ps.get_topics(this, start_token, max_items, result, next_token, y);
// if there are no topics it is not considered an error
@@ -368,44 +480,75 @@ void RGWPSListTopicsOp::execute(optional_yield y) {
return;
}
if (topics_has_endpoint_secret(result) && !verify_transport_security(s->cct, *(s->info.env))) {
- ldpp_dout(this, 1) << "topics contain secrets and cannot be sent over insecure transport" << dendl;
+ s->err.message = "Topic contains secrets that must be transmitted over a secure transport";
op_ret = -EPERM;
return;
}
+
+ ldpp_dout(this, 20) << "successfully got topics" << dendl;
+
+ // non-account users filter out topics they aren't permitted to see
+ if (s->auth.identity->get_account()) {
+ return;
+ }
for (auto it = result.topics.cbegin(); it != result.topics.cend();) {
- if (verify_topic_owner_or_policy(
- s, it->second, driver->get_zone()->get_zonegroup().get_name(),
- rgw::IAM::snsGetTopicAttributes) != 0) {
+ const auto arn = rgw::ARN::parse(it->second.arn);
+ if (!arn || !verify_topic_permission(this, s, it->second, *arn,
+ rgw::IAM::snsGetTopicAttributes)) {
result.topics.erase(it++);
} else {
++it;
}
}
- ldpp_dout(this, 20) << "successfully got topics" << dendl;
}
// command (extension to AWS):
// POST
// Action=GetTopic&TopicArn=<topic-arn>
class RGWPSGetTopicOp : public RGWOp {
- private:
+ private:
+ rgw::ARN topic_arn;
std::string topic_name;
rgw_pubsub_topic result;
int get_params() {
- const auto topic_arn = rgw::ARN::parse((s->info.args.get("TopicArn")));
-
- if (!topic_arn || topic_arn->resource.empty()) {
- ldpp_dout(this, 1) << "GetTopic Action 'TopicArn' argument is missing or invalid" << dendl;
- return -EINVAL;
+ auto arn = validate_topic_arn(s->info.args.get("TopicArn"), s->err.message);
+ if (!arn) {
+ return -EINVAL;
}
-
- topic_name = topic_arn->resource;
+ topic_arn = std::move(*arn);
+ topic_name = topic_arn.resource;
return 0;
}
- public:
+ public:
+ int init_processing(optional_yield y) override {
+ int ret = get_params();
+ if (ret < 0) {
+ return ret;
+ }
+ const RGWPubSub ps(driver, get_account_or_tenant(s->owner.id), *s->penv.site);
+ ret = ps.get_topic(this, topic_name, result, y, nullptr);
+ if (ret < 0) {
+ ldpp_dout(this, 4) << "failed to get topic '" << topic_name << "', ret=" << ret << dendl;
+ if (ret == -ENOENT) {
+ s->err.message = "No such TopicArn";
+ return -ERR_NOT_FOUND; // return NotFound instead of NoSuchKey
+ }
+ return ret;
+ }
+ if (topic_has_endpoint_secret(result) && !verify_transport_security(s->cct, *(s->info.env))) {
+ s->err.message = "Topic contains secrets that must be transmitted over a secure transport";
+ return -EPERM;
+ }
+ return RGWOp::init_processing(y);
+ }
+
int verify_permission(optional_yield y) override {
+ if (!verify_topic_permission(this, s, result, topic_arn,
+ rgw::IAM::snsGetTopicAttributes)) {
+ return -ERR_AUTHORIZATION;
+ }
return 0;
}
void pre_exec() override {
@@ -442,54 +585,56 @@ class RGWPSGetTopicOp : public RGWOp {
};
void RGWPSGetTopicOp::execute(optional_yield y) {
- op_ret = get_params();
- if (op_ret < 0) {
- return;
- }
- const RGWPubSub ps(driver, s->owner.id.tenant, *s->penv.site);
- op_ret = ps.get_topic(this, topic_name, result, y, nullptr);
- if (op_ret < 0) {
- ldpp_dout(this, 1) << "failed to get topic '" << topic_name << "', ret=" << op_ret << dendl;
- return;
- }
- if (topic_has_endpoint_secret(result) && !verify_transport_security(s->cct, *(s->info.env))) {
- ldpp_dout(this, 1) << "topic '" << topic_name << "' contain secret and cannot be sent over insecure transport" << dendl;
- op_ret = -EPERM;
- return;
- }
- op_ret = verify_topic_owner_or_policy(
- s, result, driver->get_zone()->get_zonegroup().get_name(),
- rgw::IAM::snsGetTopicAttributes);
- if (op_ret != 0) {
- ldpp_dout(this, 1) << "no permission to get topic '" << topic_name
- << "'" << dendl;
- return;
- }
- ldpp_dout(this, 1) << "successfully got topic '" << topic_name << "'" << dendl;
+ ldpp_dout(this, 4) << "successfully got topic '" << topic_name << "'" << dendl;
}
// command (AWS compliant):
// POST
// Action=GetTopicAttributes&TopicArn=<topic-arn>
class RGWPSGetTopicAttributesOp : public RGWOp {
- private:
+ private:
+ rgw::ARN topic_arn;
std::string topic_name;
rgw_pubsub_topic result;
int get_params() {
- const auto topic_arn = rgw::ARN::parse((s->info.args.get("TopicArn")));
-
- if (!topic_arn || topic_arn->resource.empty()) {
- ldpp_dout(this, 1) << "GetTopicAttribute Action 'TopicArn' argument is missing or invalid" << dendl;
- return -EINVAL;
+ auto arn = validate_topic_arn(s->info.args.get("TopicArn"), s->err.message);
+ if (!arn) {
+ return -EINVAL;
}
+ topic_arn = std::move(*arn);
+ topic_name = topic_arn.resource;
+ return 0;
+ }
- topic_name = topic_arn->resource;
+ public:
+ int init_processing(optional_yield y) override {
+ int ret = get_params();
+ if (ret < 0) {
+ return ret;
+ }
+ const RGWPubSub ps(driver, get_account_or_tenant(s->owner.id), *s->penv.site);
+ ret = ps.get_topic(this, topic_name, result, y, nullptr);
+ if (ret < 0) {
+ ldpp_dout(this, 4) << "failed to get topic '" << topic_name << "', ret=" << ret << dendl;
+ if (ret == -ENOENT) {
+ s->err.message = "No such TopicArn";
+ return -ERR_NOT_FOUND; // return NotFound instead of NoSuchKey
+ }
+ return ret;
+ }
+ if (topic_has_endpoint_secret(result) && !verify_transport_security(s->cct, *(s->info.env))) {
+ s->err.message = "Topic contains secrets that must be transmitted over a secure transport";
+ return -EPERM;
+ }
return 0;
}
- public:
int verify_permission(optional_yield y) override {
+ if (!verify_topic_permission(this, s, result, topic_arn,
+ rgw::IAM::snsGetTopicAttributes)) {
+ return -ERR_AUTHORIZATION;
+ }
return 0;
}
void pre_exec() override {
@@ -513,7 +658,7 @@ class RGWPSGetTopicAttributesOp : public RGWOp {
}
const auto f = s->formatter;
- f->open_object_section_in_ns("GetTopicAttributesResponse", AWS_SNS_NS);
+ f->open_object_section_in_ns("GetTopicAttributesResponse", RGW_REST_SNS_XMLNS);
f->open_object_section("GetTopicAttributesResult");
result.dump_xml_as_attributes(f);
f->close_section(); // GetTopicAttributesResult
@@ -526,30 +671,7 @@ class RGWPSGetTopicAttributesOp : public RGWOp {
};
void RGWPSGetTopicAttributesOp::execute(optional_yield y) {
- op_ret = get_params();
- if (op_ret < 0) {
- return;
- }
- const RGWPubSub ps(driver, s->owner.id.tenant, *s->penv.site);
- op_ret = ps.get_topic(this, topic_name, result, y, nullptr);
- if (op_ret < 0) {
- ldpp_dout(this, 1) << "failed to get topic '" << topic_name << "', ret=" << op_ret << dendl;
- return;
- }
- if (topic_has_endpoint_secret(result) && !verify_transport_security(s->cct, *(s->info.env))) {
- ldpp_dout(this, 1) << "topic '" << topic_name << "' contain secret and cannot be sent over insecure transport" << dendl;
- op_ret = -EPERM;
- return;
- }
- op_ret = verify_topic_owner_or_policy(
- s, result, driver->get_zone()->get_zonegroup().get_name(),
- rgw::IAM::snsGetTopicAttributes);
- if (op_ret != 0) {
- ldpp_dout(this, 1) << "no permission to get topic '" << topic_name
- << "'" << dendl;
- return;
- }
- ldpp_dout(this, 1) << "successfully got topic '" << topic_name << "'" << dendl;
+ ldpp_dout(this, 4) << "successfully got topic '" << topic_name << "'" << dendl;
}
// command (AWS compliant):
@@ -558,31 +680,26 @@ void RGWPSGetTopicAttributesOp::execute(optional_yield y) {
class RGWPSSetTopicAttributesOp : public RGWOp {
private:
bufferlist bl_post_body;
+ rgw::ARN topic_arn;
std::string topic_name;
- std::string topic_arn;
+ rgw_pubsub_topic result;
std::string opaque_data;
std::string policy_text;
rgw_pubsub_dest dest;
- rgw_user topic_owner;
+ rgw_owner topic_owner;
std::string attribute_name;
int get_params() {
- const auto arn = rgw::ARN::parse((s->info.args.get("TopicArn")));
-
- if (!arn || arn->resource.empty()) {
- ldpp_dout(this, 1) << "SetTopicAttribute Action 'TopicArn' argument is "
- "missing or invalid"
- << dendl;
+ auto arn = validate_topic_arn(s->info.args.get("TopicArn"), s->err.message);
+ if (!arn) {
return -EINVAL;
}
- topic_arn = arn->to_string();
- topic_name = arn->resource;
+ topic_arn = std::move(*arn);
+ topic_name = topic_arn.resource;
+
attribute_name = s->info.args.get("AttributeName");
if (attribute_name.empty()) {
- ldpp_dout(this, 1)
- << "SetTopicAttribute Action 'AttributeName' argument is "
- "missing or invalid"
- << dendl;
+ s->err.message = "Missing required element AttributeName";
return -EINVAL;
}
return 0;
@@ -612,7 +729,7 @@ class RGWPSSetTopicAttributesOp : public RGWOp {
rgw::notify::DEFAULT_GLOBAL_VALUE);
} else if (attribute_name == "push-endpoint") {
dest.push_endpoint = s->info.args.get("AttributeValue");
- if (!validate_and_update_endpoint_secret(dest, s->cct, *(s->info.env))) {
+ if (!validate_and_update_endpoint_secret(dest, s->cct, *s->info.env, s->err.message)) {
return -EINVAL;
}
} else if (attribute_name == "Policy") {
@@ -636,17 +753,15 @@ class RGWPSSetTopicAttributesOp : public RGWOp {
: end_pos;
push_endpoint_args.replace(pos, end_pos - pos, replaced_str);
};
- const std::unordered_set<std::string> push_endpoint_args = {
+ static constexpr std::initializer_list<const char*> args = {
"verify-ssl", "use-ssl", "ca-location", "amqp-ack-level",
"amqp-exchange", "kafka-ack-level", "mechanism", "cloudevents"};
- if (push_endpoint_args.count(attribute_name) == 1) {
+ if (std::find(args.begin(), args.end(), attribute_name) != args.end()) {
replace_str(attribute_name, s->info.args.get("AttributeValue"));
return 0;
}
- ldpp_dout(this, 1)
- << "SetTopicAttribute Action 'AttributeName' argument is "
- "invalid: 'AttributeName' = "
- << attribute_name << dendl;
+ s->err.message = fmt::format("Invalid value for AttributeName '{}'",
+ attribute_name);
return -EINVAL;
}
return 0;
@@ -656,30 +771,39 @@ class RGWPSSetTopicAttributesOp : public RGWOp {
explicit RGWPSSetTopicAttributesOp(bufferlist bl_post_body)
: bl_post_body(std::move(bl_post_body)) {}
- int verify_permission(optional_yield y) override {
- auto ret = get_params();
+ int init_processing(optional_yield y) override {
+ int ret = get_params();
if (ret < 0) {
return ret;
}
- rgw_pubsub_topic result;
- const RGWPubSub ps(driver, s->owner.id.tenant, *s->penv.site);
+
+ const RGWPubSub ps(driver, get_account_or_tenant(s->owner.id), *s->penv.site);
ret = ps.get_topic(this, topic_name, result, y, nullptr);
if (ret < 0) {
- ldpp_dout(this, 1) << "failed to get topic '" << topic_name
+ ldpp_dout(this, 4) << "failed to get topic '" << topic_name
<< "', ret=" << ret << dendl;
+ if (ret == -ENOENT) {
+ s->err.message = "No such TopicArn";
+ return -ERR_NOT_FOUND; // return NotFound instead of NoSuchKey
+ }
return ret;
}
- topic_owner = result.user;
- ret = verify_topic_owner_or_policy(
- s, result, driver->get_zone()->get_zonegroup().get_name(),
- rgw::IAM::snsSetTopicAttributes);
- if (ret != 0) {
- ldpp_dout(this, 1) << "no permission to set attributes for topic '" << topic_name
- << "'" << dendl;
+ topic_owner = result.owner;
+
+ ret = map_attributes(result);
+ if (ret < 0) {
return ret;
}
- return map_attributes(result);
+ return RGWOp::init_processing(y);
+ }
+
+ int verify_permission(optional_yield y) override {
+ if (!verify_topic_permission(this, s, result, topic_arn,
+ rgw::IAM::snsSetTopicAttributes)) {
+ return -ERR_AUTHORIZATION;
+ }
+ return 0;
}
void pre_exec() override { rgw_bucket_object_pre_exec(s); }
@@ -701,7 +825,7 @@ class RGWPSSetTopicAttributesOp : public RGWOp {
}
const auto f = s->formatter;
- f->open_object_section_in_ns("SetTopicAttributesResponse", AWS_SNS_NS);
+ f->open_object_section_in_ns("SetTopicAttributesResponse", RGW_REST_SNS_XMLNS);
f->open_object_section("ResponseMetadata");
encode_xml("RequestId", s->req_id, f);
f->close_section(); // ResponseMetadata
@@ -713,37 +837,45 @@ class RGWPSSetTopicAttributesOp : public RGWOp {
void RGWPSSetTopicAttributesOp::execute(optional_yield y) {
if (!driver->is_meta_master()) {
op_ret = rgw_forward_request_to_master(
- this, *s->penv.site, s->user->get_id(), &bl_post_body, nullptr, s->info, y);
+ this, *s->penv.site, s->owner.id, &bl_post_body, nullptr, s->info, y);
if (op_ret < 0) {
- ldpp_dout(this, 1)
+ ldpp_dout(this, 4)
<< "SetTopicAttributes forward_request_to_master returned ret = "
<< op_ret << dendl;
return;
}
}
- if (!dest.push_endpoint.empty() && dest.persistent) {
- op_ret = rgw::notify::add_persistent_topic(topic_name, s->yield);
+ // don't add a persistent queue if we already have one
+ const bool already_persistent = topic_needs_queue(result.dest);
+ if (!already_persistent && topic_needs_queue(dest)) {
+ // initialize the persistent queue's location, using ':' as the namespace
+ // delimiter because its inclusion in a TopicName would break ARNs
+ dest.persistent_queue = string_cat_reserve(
+ get_account_or_tenant(s->owner.id), ":", topic_name);
+
+ op_ret = rgw::notify::add_persistent_topic(dest.persistent_queue, s->yield);
if (op_ret < 0) {
- ldpp_dout(this, 1)
+ ldpp_dout(this, 4)
<< "SetTopicAttributes Action failed to create queue for "
"persistent topics. error:"
<< op_ret << dendl;
return;
}
- } else { // changing the persistent topic to non-persistent.
- op_ret = rgw::notify::remove_persistent_topic(topic_name, s->yield);
+ } else if (already_persistent) {
+ // changing the persistent topic to non-persistent.
+ op_ret = rgw::notify::remove_persistent_topic(result.dest.persistent_queue, s->yield);
if (op_ret != -ENOENT && op_ret < 0) {
- ldpp_dout(this, 1) << "SetTopicAttributes Action failed to remove queue "
+ ldpp_dout(this, 4) << "SetTopicAttributes Action failed to remove queue "
"for persistent topics. error:"
<< op_ret << dendl;
return;
}
}
- const RGWPubSub ps(driver, s->owner.id.tenant, *s->penv.site);
- op_ret = ps.create_topic(this, topic_name, dest, topic_arn, opaque_data,
- topic_owner, policy_text, y);
+ const RGWPubSub ps(driver, get_account_or_tenant(s->owner.id), *s->penv.site);
+ op_ret = ps.create_topic(this, topic_name, dest, topic_arn.to_string(),
+ opaque_data, topic_owner, policy_text, y);
if (op_ret < 0) {
- ldpp_dout(this, 1) << "failed to SetAttributes for topic '" << topic_name
+ ldpp_dout(this, 4) << "failed to SetAttributes for topic '" << topic_name
<< "', ret=" << op_ret << dendl;
return;
}
@@ -757,17 +889,17 @@ void RGWPSSetTopicAttributesOp::execute(optional_yield y) {
class RGWPSDeleteTopicOp : public RGWOp {
private:
bufferlist bl_post_body;
+ rgw::ARN topic_arn;
std::string topic_name;
+ std::optional<rgw_pubsub_topic> topic;
int get_params() {
- const auto topic_arn = rgw::ARN::parse((s->info.args.get("TopicArn")));
-
- if (!topic_arn || topic_arn->resource.empty()) {
- ldpp_dout(this, 1) << "DeleteTopic Action 'TopicArn' argument is missing or invalid" << dendl;
+ auto arn = validate_topic_arn(s->info.args.get("TopicArn"), s->err.message);
+ if (!arn) {
return -EINVAL;
}
-
- topic_name = topic_arn->resource;
+ topic_arn = std::move(*arn);
+ topic_name = topic_arn.resource;
return 0;
}
@@ -775,9 +907,48 @@ class RGWPSDeleteTopicOp : public RGWOp {
explicit RGWPSDeleteTopicOp(bufferlist bl_post_body)
: bl_post_body(std::move(bl_post_body)) {}
- int verify_permission(optional_yield) override {
+ int init_processing(optional_yield y) override {
+ int ret = get_params();
+ if (ret < 0) {
+ return ret;
+ }
+
+ const RGWPubSub ps(driver, get_account_or_tenant(s->owner.id), *s->penv.site);
+ rgw_pubsub_topic result;
+ ret = ps.get_topic(this, topic_name, result, y, nullptr);
+ if (ret == -ENOENT) {
+ // leave topic empty
+ } else if (ret < 0) {
+ ldpp_dout(this, 4) << "failed to get topic '" << topic_name
+ << "', ret=" << ret << dendl;
+ return ret;
+ } else {
+ topic = std::move(result);
+ }
+
+ return RGWOp::init_processing(y);
+ }
+
+ int verify_permission(optional_yield y) override {
+ if (topic) {
+ // consult topic policy for delete permission
+ if (!verify_topic_permission(this, s, *topic, topic_arn,
+ rgw::IAM::snsDeleteTopic)) {
+ return -ERR_AUTHORIZATION;
+ }
+ } else {
+ // if no topic policy exists, just check identity policies
+ // account users require an Allow, non-account users just check for Deny
+ const bool mandatory_policy = !!s->auth.identity->get_account();
+ if (!verify_user_permission(this, s, topic_arn,
+ rgw::IAM::snsDeleteTopic,
+ mandatory_policy)) {
+ return -ERR_AUTHORIZATION;
+ }
+ }
return 0;
}
+
void pre_exec() override {
rgw_bucket_object_pre_exec(s);
}
@@ -799,7 +970,7 @@ class RGWPSDeleteTopicOp : public RGWOp {
}
const auto f = s->formatter;
- f->open_object_section_in_ns("DeleteTopicResponse", AWS_SNS_NS);
+ f->open_object_section_in_ns("DeleteTopicResponse", RGW_REST_SNS_XMLNS);
f->open_object_section("ResponseMetadata");
encode_xml("RequestId", s->req_id, f);
f->close_section(); // ResponseMetadata
@@ -809,13 +980,9 @@ class RGWPSDeleteTopicOp : public RGWOp {
};
void RGWPSDeleteTopicOp::execute(optional_yield y) {
- op_ret = get_params();
- if (op_ret < 0) {
- return;
- }
if (!driver->is_meta_master()) {
op_ret = rgw_forward_request_to_master(
- this, *s->penv.site, s->user->get_id(), &bl_post_body, nullptr, s->info, y);
+ this, *s->penv.site, s->owner.id, &bl_post_body, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 1)
<< "DeleteTopic forward_request_to_master returned ret = " << op_ret
@@ -823,43 +990,23 @@ void RGWPSDeleteTopicOp::execute(optional_yield y) {
return;
}
}
- const RGWPubSub ps(driver, s->owner.id.tenant, *s->penv.site);
- rgw_pubsub_topic result;
- op_ret = ps.get_topic(this, topic_name, result, y, nullptr);
- if (op_ret == 0) {
- op_ret = verify_topic_owner_or_policy(
- s, result, driver->get_zone()->get_zonegroup().get_name(),
- rgw::IAM::snsDeleteTopic);
- if (op_ret < 0) {
- ldpp_dout(this, 1) << "no permission to remove topic '" << topic_name
- << "'" << dendl;
- return;
- }
- op_ret = ps.remove_topic(this, topic_name, y);
- if (op_ret < 0 && op_ret != -ENOENT) {
- ldpp_dout(this, 1) << "failed to remove topic '" << topic_name << ", ret=" << op_ret << dendl;
- return;
- }
- ldpp_dout(this, 1) << "successfully removed topic '" << topic_name << "'" << dendl;
- } else if (op_ret != -ENOENT) {
- ldpp_dout(this, 1) << "failed to fetch topic '" << topic_name
- << "' with error: " << op_ret << dendl;
+ if (!topic) {
+ return;
+ }
+
+ const RGWPubSub ps(driver, get_account_or_tenant(s->owner.id), *s->penv.site);
+ op_ret = ps.remove_topic(this, topic_name, y);
+ if (op_ret < 0 && op_ret != -ENOENT) {
+ ldpp_dout(this, 4) << "failed to remove topic '" << topic_name << ", ret=" << op_ret << dendl;
return;
}
+ ldpp_dout(this, 4) << "successfully removed topic '" << topic_name << "'" << dendl;
+
if (op_ret == -ENOENT) {
// its not an error if no topics exist, just a no-op
op_ret = 0;
}
- // upon deletion it is not known if topic is persistent or not
- // will try to delete the persistent topic anyway
- // doing this regardless of the topic being previously deleted
- // to allow for cleanup if only the queue deletion failed
- if (const auto ret = rgw::notify::remove_persistent_topic(topic_name, s->yield); ret < 0 && ret != -ENOENT) {
- ldpp_dout(this, 1) << "DeleteTopic Action failed to remove queue for "
- "persistent topics. error:"
- << ret << dendl;
- }
}
using op_generator = RGWOp*(*)(bufferlist);
@@ -944,19 +1091,22 @@ int delete_all_notifications(const DoutPrefixProvider *dpp, const rgw_pubsub_buc
// actual configuration is XML encoded in the body of the message
class RGWPSCreateNotifOp : public RGWDefaultResponseOp {
bufferlist data;
+ rgw_pubsub_s3_notifications configurations;
+ std::map<rgw::ARN, rgw_pubsub_topic> topics;
+
int verify_params() override {
bool exists;
const auto no_value = s->info.args.get("notification", &exists);
if (!exists) {
- ldpp_dout(this, 1) << "missing required param 'notification'" << dendl;
+ s->err.message = "Missing required parameter 'notification'";
return -EINVAL;
}
if (no_value.length() > 0) {
- ldpp_dout(this, 1) << "param 'notification' should not have any value" << dendl;
+ s->err.message = "Parameter 'notification' should not have any value";
return -EINVAL;
}
if (s->bucket_name.empty()) {
- ldpp_dout(this, 1) << "request must be on a bucket" << dendl;
+ s->err.message = "Missing required bucket name";
return -EINVAL;
}
return 0;
@@ -968,35 +1118,36 @@ class RGWPSCreateNotifOp : public RGWDefaultResponseOp {
std::tie(r, data) = read_all_input(s, max_size, false);
if (r < 0) {
- ldpp_dout(this, 1) << "failed to read XML payload" << dendl;
+ ldpp_dout(this, 4) << "failed to read XML payload" << dendl;
return r;
}
if (data.length() == 0) {
- ldpp_dout(this, 1) << "XML payload missing" << dendl;
+ ldpp_dout(this, 4) << "XML payload missing" << dendl;
return -EINVAL;
}
RGWXMLDecoder::XMLParser parser;
if (!parser.init()){
- ldpp_dout(this, 1) << "failed to initialize XML parser" << dendl;
+ ldpp_dout(this, 4) << "failed to initialize XML parser" << dendl;
return -EINVAL;
}
if (!parser.parse(data.c_str(), data.length(), 1)) {
- ldpp_dout(this, 1) << "failed to parse XML payload" << dendl;
+ ldpp_dout(this, 4) << "failed to parse XML payload" << dendl;
return -ERR_MALFORMED_XML;
}
try {
// NotificationConfigurations is mandatory
// It can be empty which means we delete all the notifications
RGWXMLDecoder::decode_xml("NotificationConfiguration", configurations, &parser, true);
- } catch (RGWXMLDecoder::err& err) {
- ldpp_dout(this, 1) << "failed to parse XML payload. error: " << err << dendl;
+ } catch (const RGWXMLDecoder::err& err) {
+ s->err.message = err.what();
return -ERR_MALFORMED_XML;
}
return 0;
}
public:
+ int init_processing(optional_yield y) override;
int verify_permission(optional_yield y) override;
void pre_exec() override {
@@ -1007,55 +1158,104 @@ public:
RGWOpType get_type() override { return RGW_OP_PUBSUB_NOTIF_CREATE; }
uint32_t op_mask() override { return RGW_OP_TYPE_WRITE; }
-
void execute(optional_yield) override;
void execute_v2(optional_yield);
};
-void RGWPSCreateNotifOp::execute(optional_yield y) {
- if (rgw::all_zonegroups_support(*s->penv.site, rgw::zone_features::notification_v2)) {
- return execute_v2(y);
+int RGWPSCreateNotifOp::init_processing(optional_yield y)
+{
+ int ret = verify_params();
+ if (ret < 0) {
+ return ret;
}
- op_ret = verify_params();
- if (op_ret < 0) {
- return;
+
+ ret = get_params_from_body(configurations);
+ if (ret < 0) {
+ return ret;
}
- rgw_pubsub_s3_notifications configurations;
- op_ret = get_params_from_body(configurations);
- if (op_ret < 0) {
- return;
+
+ for (const auto& c : configurations.list) {
+ const auto& notif_name = c.id;
+ if (notif_name.empty()) {
+ s->err.message = "Missing required element Id";
+ return -EINVAL;
+ }
+ if (c.topic_arn.empty()) {
+ s->err.message = "Missing required element Topic";
+ return -EINVAL;
+ }
+
+ const auto arn = rgw::ARN::parse(c.topic_arn);
+ if (!arn || arn->resource.empty()) {
+ s->err.message = "Invalid Topic ARN";
+ return -EINVAL;
+ }
+ const auto& topic_name = arn->resource;
+
+ if (std::find(c.events.begin(), c.events.end(), rgw::notify::UnknownEvent) != c.events.end()) {
+ s->err.message = "Unknown Event type: " + notif_name;
+ return -EINVAL;
+ }
+
+ // load topic metadata if we haven't already
+ auto insert = topics.emplace(std::piecewise_construct,
+ std::forward_as_tuple(*arn),
+ std::forward_as_tuple());
+ if (insert.second) {
+ rgw_pubsub_topic& topic_info = insert.first->second;
+ const RGWPubSub ps(driver, arn->account, *s->penv.site);
+ ret = ps.get_topic(this, topic_name, topic_info, y, nullptr);
+ if (ret < 0) {
+ ldpp_dout(this, 4) << "failed to get topic '" << topic_name << "', ret=" << ret << dendl;
+ return ret;
+ }
+ }
+ }
+
+ return RGWOp::init_processing(y);
+}
+
+int RGWPSCreateNotifOp::verify_permission(optional_yield y) {
+ // require s3:PutBucketNotification permission for the bucket
+ if (!verify_bucket_permission(this, s, rgw::IAM::s3PutBucketNotification)) {
+ return -EACCES;
+ }
+
+ // require sns:Publish permission for each topic
+ for (const auto& [arn, topic] : topics) {
+ if (!verify_topic_permission(this, s, topic, arn, rgw::IAM::snsPublish)) {
+ return -EACCES;
+ }
}
+ return 0;
+}
+
+void RGWPSCreateNotifOp::execute(optional_yield y) {
if (!driver->is_meta_master()) {
op_ret = rgw_forward_request_to_master(
- this, *s->penv.site, s->user->get_id(), &data, nullptr, s->info, y);
+ this, *s->penv.site, s->owner.id, &data, nullptr, s->info, y);
if (op_ret < 0) {
- ldpp_dout(this, 1) << "CreateBucketNotification "
+ ldpp_dout(this, 4) << "CreateBucketNotification "
"forward_request_to_master returned ret = "
<< op_ret << dendl;
return;
}
}
- std::unique_ptr<rgw::sal::Bucket> bucket;
- op_ret = driver->load_bucket(this, rgw_bucket(s->bucket_tenant, s->bucket_name),
- &bucket, y);
- if (op_ret < 0) {
- ldpp_dout(this, 1) << "failed to get bucket '" <<
- (s->bucket_tenant.empty() ? s->bucket_name : s->bucket_tenant + ":" + s->bucket_name) <<
- "' info, ret = " << op_ret << dendl;
- return;
+ if (rgw::all_zonegroups_support(*s->penv.site, rgw::zone_features::notification_v2)) {
+ return execute_v2(y);
}
- const RGWPubSub ps(driver, s->owner.id.tenant);
- const RGWPubSub::Bucket b(ps, bucket.get());
+ const RGWPubSub ps(driver, get_account_or_tenant(s->owner.id), *s->penv.site);
+ const RGWPubSub::Bucket b(ps, s->bucket.get());
if(configurations.list.empty()) {
// get all topics on a bucket
rgw_pubsub_bucket_topics bucket_topics;
op_ret = b.get_topics(this, bucket_topics, y);
if (op_ret < 0) {
- ldpp_dout(this, 1) << "failed to get list of topics from bucket '" << s->bucket_name << "', ret=" << op_ret << dendl;
+ ldpp_dout(this, 4) << "failed to get list of topics from bucket '" << s->bucket_name << "', ret=" << op_ret << dendl;
return;
}
@@ -1065,47 +1265,19 @@ void RGWPSCreateNotifOp::execute(optional_yield y) {
for (const auto& c : configurations.list) {
const auto& notif_name = c.id;
- if (notif_name.empty()) {
- ldpp_dout(this, 1) << "missing notification id" << dendl;
- op_ret = -EINVAL;
- return;
- }
- if (c.topic_arn.empty()) {
- ldpp_dout(this, 1) << "missing topic ARN in notification: '" << notif_name << "'" << dendl;
- op_ret = -EINVAL;
- return;
- }
const auto arn = rgw::ARN::parse(c.topic_arn);
- if (!arn || arn->resource.empty()) {
- ldpp_dout(this, 1) << "topic ARN has invalid format: '" << c.topic_arn << "' in notification: '" << notif_name << "'" << dendl;
- op_ret = -EINVAL;
- return;
+ if (!arn) { // already validated above
+ continue;
}
+ const auto& topic_name = arn->resource;
- if (std::find(c.events.begin(), c.events.end(), rgw::notify::UnknownEvent) != c.events.end()) {
- ldpp_dout(this, 1) << "unknown event type in notification: '" << notif_name << "'" << dendl;
- op_ret = -EINVAL;
- return;
+ auto t = topics.find(*arn);
+ if (t == topics.end()) {
+ continue;
}
+ auto& topic_info = t->second;
- const auto topic_name = arn->resource;
-
- // get topic information. destination information is stored in the topic
- rgw_pubsub_topic topic_info;
- op_ret = ps.get_topic(this, topic_name, topic_info, y, nullptr);
- if (op_ret < 0) {
- ldpp_dout(this, 1) << "failed to get topic '" << topic_name << "', ret=" << op_ret << dendl;
- return;
- }
- op_ret = verify_topic_owner_or_policy(
- s, topic_info, driver->get_zone()->get_zonegroup().get_name(),
- rgw::IAM::snsPublish);
- if (op_ret != 0) {
- ldpp_dout(this, 1) << "no permission to create notification for topic '"
- << topic_name << "'" << dendl;
- return;
- }
// make sure that full topic configuration match
// TODO: use ARN match function
@@ -1139,36 +1311,7 @@ void RGWPSCreateNotifOp::execute(optional_yield y) {
}
}
-int RGWPSCreateNotifOp::verify_permission(optional_yield y) {
- if (!verify_bucket_permission(this, s, rgw::IAM::s3PutBucketNotification)) {
- return -EACCES;
- }
-
- return 0;
-}
-
void RGWPSCreateNotifOp::execute_v2(optional_yield y) {
- op_ret = verify_params();
- if (op_ret < 0) {
- return;
- }
-
- rgw_pubsub_s3_notifications configurations;
- op_ret = get_params_from_body(configurations);
- if (op_ret < 0) {
- return;
- }
- if (!driver->is_meta_master()) {
- op_ret = rgw_forward_request_to_master(
- this, *s->penv.site, s->user->get_id(), &data, nullptr, s->info, y);
- if (op_ret < 0) {
- ldpp_dout(this, 1) << "CreateBucketNotification "
- "forward_request_to_master returned ret = "
- << op_ret << dendl;
- return;
- }
- }
-
if (const auto ret = driver->stat_topics_v1(s->bucket_tenant, y, this); ret != -ENOENT) {
ldpp_dout(this, 1) << "WARNING: " << (ret == 0 ? "topic migration in process" : "cannot determine topic migration status. ret = " + std::to_string(ret))
<< ". please try again later" << dendl;
@@ -1176,88 +1319,37 @@ void RGWPSCreateNotifOp::execute_v2(optional_yield y) {
return;
}
- std::unique_ptr<rgw::sal::Bucket> bucket;
- op_ret = driver->load_bucket(this, rgw_bucket(s->bucket_tenant, s->bucket_name),
- &bucket, y);
- if (op_ret < 0) {
- ldpp_dout(this, 1) << "failed to get bucket '"
- << (s->bucket_tenant.empty()
- ? s->bucket_name
- : s->bucket_tenant + ":" + s->bucket_name)
- << "' info, ret = " << op_ret << dendl;
- return;
- }
if (configurations.list.empty()) {
- op_ret = remove_notification_v2(this, driver, bucket.get(),
+ op_ret = remove_notification_v2(this, driver, s->bucket.get(),
/*delete all notif=true*/ "", y);
return;
}
rgw_pubsub_bucket_topics bucket_topics;
- op_ret = get_bucket_notifications(this, bucket.get(), bucket_topics);
+ op_ret = get_bucket_notifications(this, s->bucket.get(), bucket_topics);
if (op_ret < 0) {
ldpp_dout(this, 1)
<< "failed to load existing bucket notification on bucket: "
- << (s->bucket_tenant.empty() ? s->bucket_name
- : s->bucket_tenant + ":" + s->bucket_name)
- << "' , ret = " << op_ret << dendl;
+ << s->bucket << ", ret = " << op_ret << dendl;
return;
}
- const RGWPubSub ps(driver, s->owner.id.tenant, *s->penv.site);
- std::unordered_map<std::string, rgw_pubsub_topic> topics;
for (const auto& c : configurations.list) {
const auto& notif_name = c.id;
- if (notif_name.empty()) {
- ldpp_dout(this, 1) << "missing notification id" << dendl;
- op_ret = -EINVAL;
- return;
- }
- if (c.topic_arn.empty()) {
- ldpp_dout(this, 1) << "missing topic ARN in notification: '" << notif_name
- << "'" << dendl;
- op_ret = -EINVAL;
- return;
- }
const auto arn = rgw::ARN::parse(c.topic_arn);
- if (!arn || arn->resource.empty()) {
- ldpp_dout(this, 1) << "topic ARN has invalid format: '" << c.topic_arn
- << "' in notification: '" << notif_name << "'"
- << dendl;
- op_ret = -EINVAL;
- return;
- }
-
- if (std::find(c.events.begin(), c.events.end(),
- rgw::notify::UnknownEvent) != c.events.end()) {
- ldpp_dout(this, 1) << "unknown event type in notification: '"
- << notif_name << "'" << dendl;
- op_ret = -EINVAL;
- return;
+ if (!arn) { // already validated above
+ continue;
}
const auto& topic_name = arn->resource;
- if (!topics.contains(topic_name)) {
- // get topic information. destination information is stored in the topic
- rgw_pubsub_topic topic_info;
- op_ret = ps.get_topic(this, topic_name, topic_info, y,nullptr);
- if (op_ret < 0) {
- ldpp_dout(this, 1) << "failed to get topic '" << topic_name
- << "', ret=" << op_ret << dendl;
- return;
- }
- op_ret = verify_topic_owner_or_policy(
- s, topic_info, driver->get_zone()->get_zonegroup().get_name(),
- rgw::IAM::snsPublish);
- if (op_ret != 0) {
- ldpp_dout(this, 1) << "failed to create notification for topic '"
- << topic_name << "' topic owned by other user"
- << dendl;
- return;
- }
- topics[topic_name] = std::move(topic_info);
+
+ auto t = topics.find(*arn);
+ if (t == topics.end()) {
+ continue;
}
+ auto& topic_info = t->second;
+
auto& topic_filter =
bucket_topics.topics[topic_to_unique(topic_name, notif_name)];
- topic_filter.topic = topics[topic_name];
+ topic_filter.topic = topic_info;
topic_filter.events = c.events;
topic_filter.s3_id = notif_name;
topic_filter.s3_filter = c.filter;
@@ -1265,42 +1357,43 @@ void RGWPSCreateNotifOp::execute_v2(optional_yield y) {
// finally store all the bucket notifications as attr.
bufferlist bl;
bucket_topics.encode(bl);
- rgw::sal::Attrs& attrs = bucket->get_attrs();
+ rgw::sal::Attrs& attrs = s->bucket->get_attrs();
attrs[RGW_ATTR_BUCKET_NOTIFICATION] = std::move(bl);
- op_ret = bucket->merge_and_store_attrs(this, attrs, y);
+ op_ret = s->bucket->merge_and_store_attrs(this, attrs, y);
if (op_ret < 0) {
- ldpp_dout(this, 1)
+ ldpp_dout(this, 4)
<< "Failed to store RGW_ATTR_BUCKET_NOTIFICATION on bucket="
- << bucket->get_name() << " returned err= " << op_ret << dendl;
+ << s->bucket->get_name() << " returned err= " << op_ret << dendl;
return;
}
for (const auto& [_, topic] : topics) {
const auto ret = driver->update_bucket_topic_mapping(
topic,
- rgw_make_bucket_entry_name(bucket->get_tenant(), bucket->get_name()),
+ rgw_make_bucket_entry_name(s->bucket->get_tenant(), s->bucket->get_name()),
/*add_mapping=*/true, y, this);
if (ret < 0) {
- ldpp_dout(this, 1) << "Failed to remove topic mapping on bucket="
- << bucket->get_name() << " ret= " << ret << dendl;
+ ldpp_dout(this, 4) << "Failed to remove topic mapping on bucket="
+ << s->bucket->get_name() << " ret= " << ret << dendl;
// error should be reported ??
// op_ret = ret;
}
}
ldpp_dout(this, 20) << "successfully created bucket notification for bucket: "
- << bucket->get_name() << dendl;
+ << s->bucket->get_name() << dendl;
}
// command (extension to S3): DELETE /bucket?notification[=<notification-id>]
class RGWPSDeleteNotifOp : public RGWDefaultResponseOp {
- int get_params(std::string& notif_name) const {
+ std::string notif_name;
+ int get_params() {
bool exists;
notif_name = s->info.args.get("notification", &exists);
if (!exists) {
- ldpp_dout(this, 1) << "missing required param 'notification'" << dendl;
+ s->err.message = "Missing required parameter 'notification'";
return -EINVAL;
}
if (s->bucket_name.empty()) {
- ldpp_dout(this, 1) << "request must be on a bucket" << dendl;
+ s->err.message = "Missing required bucket name";
return -EINVAL;
}
return 0;
@@ -1308,6 +1401,7 @@ class RGWPSDeleteNotifOp : public RGWDefaultResponseOp {
void execute_v2(optional_yield y);
public:
+ int init_processing(optional_yield y) override;
int verify_permission(optional_yield y) override;
void pre_exec() override {
@@ -1321,45 +1415,48 @@ class RGWPSDeleteNotifOp : public RGWDefaultResponseOp {
void execute(optional_yield y) override;
};
-void RGWPSDeleteNotifOp::execute(optional_yield y) {
- if (rgw::all_zonegroups_support(*s->penv.site, rgw::zone_features::notification_v2)) {
- return execute_v2(y);
+int RGWPSDeleteNotifOp::init_processing(optional_yield y)
+{
+ int ret = get_params();
+ if (ret < 0) {
+ return ret;
}
- std::string notif_name;
- op_ret = get_params(notif_name);
- if (op_ret < 0) {
- return;
+ return RGWOp::init_processing(y);
+}
+
+int RGWPSDeleteNotifOp::verify_permission(optional_yield y) {
+ if (!verify_bucket_permission(this, s, rgw::IAM::s3PutBucketNotification)) {
+ return -EACCES;
}
+
+ return 0;
+}
+
+void RGWPSDeleteNotifOp::execute(optional_yield y) {
if (!driver->is_meta_master()) {
bufferlist indata;
op_ret = rgw_forward_request_to_master(
- this, *s->penv.site, s->user->get_id(), &indata, nullptr, s->info, y);
+ this, *s->penv.site, s->owner.id, &indata, nullptr, s->info, y);
if (op_ret < 0) {
- ldpp_dout(this, 1) << "DeleteBucketNotification "
+ ldpp_dout(this, 4) << "DeleteBucketNotification "
"forward_request_to_master returned error ret= "
<< op_ret << dendl;
return;
}
}
- std::unique_ptr<rgw::sal::Bucket> bucket;
- op_ret = driver->load_bucket(this, rgw_bucket(s->bucket_tenant, s->bucket_name),
- &bucket, y);
- if (op_ret < 0) {
- ldpp_dout(this, 1) << "failed to get bucket '" <<
- (s->bucket_tenant.empty() ? s->bucket_name : s->bucket_tenant + ":" + s->bucket_name) <<
- "' info, ret = " << op_ret << dendl;
- return;
+ if (rgw::all_zonegroups_support(*s->penv.site, rgw::zone_features::notification_v2)) {
+ return execute_v2(y);
}
- const RGWPubSub ps(driver, s->owner.id.tenant);
- const RGWPubSub::Bucket b(ps, bucket.get());
+ const RGWPubSub ps(driver, get_account_or_tenant(s->owner.id), *s->penv.site);
+ const RGWPubSub::Bucket b(ps, s->bucket.get());
// get all topics on a bucket
rgw_pubsub_bucket_topics bucket_topics;
op_ret = b.get_topics(this, bucket_topics, y);
if (op_ret < 0) {
- ldpp_dout(this, 1) << "failed to get list of topics from bucket '" << s->bucket_name << "', ret=" << op_ret << dendl;
+ ldpp_dout(this, 4) << "failed to get list of topics from bucket '" << s->bucket_name << "', ret=" << op_ret << dendl;
return;
}
@@ -1379,51 +1476,15 @@ void RGWPSDeleteNotifOp::execute(optional_yield y) {
op_ret = delete_all_notifications(this, bucket_topics, b, y, ps);
}
-int RGWPSDeleteNotifOp::verify_permission(optional_yield y) {
- if (!verify_bucket_permission(this, s, rgw::IAM::s3PutBucketNotification)) {
- return -EACCES;
- }
-
- return 0;
-}
-
void RGWPSDeleteNotifOp::execute_v2(optional_yield y) {
- std::string notif_name;
- op_ret = get_params(notif_name);
- if (op_ret < 0) {
- return;
- }
- if (!driver->is_meta_master()) {
- bufferlist indata;
- op_ret = rgw_forward_request_to_master(
- this, *s->penv.site, s->user->get_id(), &indata, nullptr, s->info, y);
- if (op_ret < 0) {
- ldpp_dout(this, 1) << "DeleteBucketNotification "
- "forward_request_to_master returned error ret= "
- << op_ret << dendl;
- return;
- }
- }
-
if (const auto ret = driver->stat_topics_v1(s->bucket_tenant, y, this); ret != -ENOENT) {
- ldpp_dout(this, 1) << "WARNING: " << (ret == 0 ? "topic migration in process" : "cannot determine topic migration status. ret = " + std::to_string(ret))
+ ldpp_dout(this, 4) << "WARNING: " << (ret == 0 ? "topic migration in process" : "cannot determine topic migration status. ret = " + std::to_string(ret))
<< ". please try again later" << dendl;
op_ret = -ERR_SERVICE_UNAVAILABLE;
return;
}
- std::unique_ptr<rgw::sal::Bucket> bucket;
- op_ret = driver->load_bucket(this, rgw_bucket(s->bucket_tenant, s->bucket_name),
- &bucket, y);
- if (op_ret < 0) {
- ldpp_dout(this, 1) << "failed to get bucket '"
- << (s->bucket_tenant.empty()
- ? s->bucket_name
- : s->bucket_tenant + ":" + s->bucket_name)
- << "' info, ret = " << op_ret << dendl;
- return;
- }
- op_ret = remove_notification_v2(this, driver, bucket.get(), notif_name, y);
+ op_ret = remove_notification_v2(this, driver, s->bucket.get(), notif_name, y);
}
// command (S3 compliant): GET /bucket?notification[=<notification-id>]
@@ -1434,11 +1495,11 @@ class RGWPSListNotifsOp : public RGWOp {
bool exists;
notif_name = s->info.args.get("notification", &exists);
if (!exists) {
- ldpp_dout(this, 1) << "missing required param 'notification'" << dendl;
+ s->err.message = "Missing required parameter 'notification'";
return -EINVAL;
}
if (s->bucket_name.empty()) {
- ldpp_dout(this, 1) << "request must be on a bucket" << dendl;
+ s->err.message = "Missing required bucket name";
return -EINVAL;
}
return 0;
@@ -1482,7 +1543,7 @@ void RGWPSListNotifsOp::execute(optional_yield y) {
op_ret = driver->load_bucket(this, rgw_bucket(s->bucket_tenant, s->bucket_name),
&bucket, y);
if (op_ret < 0) {
- ldpp_dout(this, 1) << "failed to get bucket '" <<
+ ldpp_dout(this, 4) << "failed to get bucket '" <<
(s->bucket_tenant.empty() ? s->bucket_name : s->bucket_tenant + ":" + s->bucket_name) <<
"' info, ret = " << op_ret << dendl;
return;
@@ -1494,12 +1555,12 @@ void RGWPSListNotifsOp::execute(optional_yield y) {
driver->stat_topics_v1(s->bucket_tenant, y, this) == -ENOENT) {
op_ret = get_bucket_notifications(this, bucket.get(), bucket_topics);
} else {
- const RGWPubSub ps(driver, s->owner.id.tenant);
+ const RGWPubSub ps(driver, get_account_or_tenant(s->owner.id), *s->penv.site);
const RGWPubSub::Bucket b(ps, bucket.get());
op_ret = b.get_topics(this, bucket_topics, y);
}
if (op_ret < 0) {
- ldpp_dout(this, 1) << "failed to get list of topics from bucket '"
+ ldpp_dout(this, 4) << "failed to get list of topics from bucket '"
<< s->bucket_name << "', ret=" << op_ret << dendl;
return;
}
@@ -1511,7 +1572,7 @@ void RGWPSListNotifsOp::execute(optional_yield y) {
return;
}
op_ret = -ENOENT;
- ldpp_dout(this, 1) << "failed to get notification info for '" << notif_name << "', ret=" << op_ret << dendl;
+ ldpp_dout(this, 4) << "failed to get notification info for '" << notif_name << "', ret=" << op_ret << dendl;
return;
}
// loop through all topics of the bucket
diff --git a/src/rgw/rgw_rest_role.cc b/src/rgw/rgw_rest_role.cc
index 6132b111178..2d6a96cfe62 100644
--- a/src/rgw/rgw_rest_role.cc
+++ b/src/rgw/rgw_rest_role.cc
@@ -1,7 +1,9 @@
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
+#include <algorithm>
#include <errno.h>
+#include <iterator>
#include <regex>
#include "common/errno.h"
@@ -15,7 +17,7 @@
#include "rgw_op.h"
#include "rgw_process_env.h"
#include "rgw_rest.h"
-#include "rgw_rest_conn.h"
+#include "rgw_rest_iam.h"
#include "rgw_rest_role.h"
#include "rgw_role.h"
#include "rgw_sal.h"
@@ -24,104 +26,41 @@
using namespace std;
-int forward_iam_request_to_master(const DoutPrefixProvider* dpp,
- const rgw::SiteConfig& site,
- const RGWUserInfo& user,
- bufferlist& indata,
- RGWXMLDecoder::XMLParser& parser,
- req_info& req, optional_yield y)
+int RGWRestRole::verify_permission(optional_yield y)
{
- const auto& period = site.get_period();
- if (!period) {
- return 0; // not multisite
- }
- if (site.is_meta_master()) {
- return 0; // don't need to forward metadata requests
- }
- const auto& pmap = period->period_map;
- auto zg = pmap.zonegroups.find(pmap.master_zonegroup);
- if (zg == pmap.zonegroups.end()) {
- return -EINVAL;
- }
- auto z = zg->second.zones.find(zg->second.master_zone);
- if (z == zg->second.zones.end()) {
- return -EINVAL;
- }
-
- RGWAccessKey creds;
- if (auto i = user.access_keys.begin(); i != user.access_keys.end()) {
- creds.id = i->first;
- creds.key = i->second.key;
- }
-
- // use the master zone's endpoints
- auto conn = RGWRESTConn{dpp->get_cct(), z->second.id, z->second.endpoints,
- std::move(creds), zg->second.id, zg->second.api_name};
- bufferlist outdata;
- constexpr size_t max_response_size = 128 * 1024; // we expect a very small response
- int ret = conn.forward_iam_request(dpp, req, nullptr, max_response_size,
- &indata, &outdata, y);
- if (ret < 0) {
- return ret;
+ if (verify_user_permission(this, s, resource, action)) {
+ return 0;
}
- std::string r = rgw_bl_str(outdata);
- boost::replace_all(r, "&quot;", "\"");
-
- if (!parser.parse(r.c_str(), r.length(), 1)) {
- ldpp_dout(dpp, 0) << "ERROR: failed to parse response from master zonegroup" << dendl;
- return -EIO;
- }
- return 0;
+ return RGWRESTOp::verify_permission(y);
}
-int RGWRestRole::verify_permission(optional_yield y)
+int RGWRestRole::check_caps(const RGWUserCaps& caps)
{
- if (s->auth.identity->is_anonymous()) {
- return -EACCES;
- }
-
- string role_name = s->info.args.get("RoleName");
- if (int ret = check_caps(s->user->get_caps()); ret == 0) {
- return ret;
- }
-
- string resource_name = _role->get_path() + role_name;
- uint64_t op = get_op();
- if (!verify_user_permission(this,
- s,
- rgw::ARN(resource_name,
- "role",
- s->user->get_tenant(), true),
- op)) {
- return -EACCES;
- }
-
- return 0;
+ return caps.check_cap("roles", perm);
}
-int RGWRestRole::init_processing(optional_yield y)
+static void dump_iam_role(const rgw::sal::RGWRoleInfo& role, Formatter *f)
{
- string role_name = s->info.args.get("RoleName");
- std::unique_ptr<rgw::sal::RGWRole> role = driver->get_role(role_name,
- s->user->get_tenant());
- if (int ret = role->get(s, y); ret < 0) {
- if (ret == -ENOENT) {
- return -ERR_NO_ROLE_FOUND;
- }
- return ret;
- }
- _role = std::move(role);
- return 0;
+ encode_json("RoleId", role.id, f);
+ encode_json("RoleName", role.name, f);
+ encode_json("Path", role.path, f);
+ encode_json("Arn", role.arn, f);
+ encode_json("CreateDate", role.creation_date, f);
+ encode_json("Description", role.description, f);
+ encode_json("MaxSessionDuration", role.max_session_duration, f);
+ encode_json("AssumeRolePolicyDocument", role.trust_policy, f);
}
-int RGWRestRole::parse_tags()
+static int parse_tags(const DoutPrefixProvider* dpp,
+ const std::map<std::string, std::string>& params,
+ std::multimap<std::string, std::string>& tags,
+ std::string& message)
{
vector<string> keys, vals;
- auto val_map = s->info.args.get_params();
const regex pattern_key("Tags.member.([0-9]+).Key");
const regex pattern_value("Tags.member.([0-9]+).Value");
- for (auto& v : val_map) {
+ for (const auto& v : params) {
string key_index="", value_index="";
for(sregex_iterator it = sregex_iterator(
v.first.begin(), v.first.end(), pattern_key);
@@ -129,7 +68,7 @@ int RGWRestRole::parse_tags()
smatch match;
match = *it;
key_index = match.str(1);
- ldout(s->cct, 20) << "Key index: " << match.str(1) << dendl;
+ ldpp_dout(dpp, 20) << "Key index: " << match.str(1) << dendl;
if (!key_index.empty()) {
int index = stoi(key_index);
auto pos = keys.begin() + (index-1);
@@ -142,7 +81,7 @@ int RGWRestRole::parse_tags()
smatch match;
match = *it;
value_index = match.str(1);
- ldout(s->cct, 20) << "Value index: " << match.str(1) << dendl;
+ ldpp_dout(dpp, 20) << "Value index: " << match.str(1) << dendl;
if (!value_index.empty()) {
int index = stoi(value_index);
auto pos = vals.begin() + (index-1);
@@ -151,114 +90,150 @@ int RGWRestRole::parse_tags()
}
}
if (keys.size() != vals.size()) {
- ldout(s->cct, 0) << "No. of keys doesn't match with no. of values in tags" << dendl;
+ message = "Tags array found mismatched Keys/Values";
return -EINVAL;
}
for (size_t i = 0; i < keys.size(); i++) {
tags.emplace(keys[i], vals[i]);
- ldout(s->cct, 0) << "Tag Key: " << keys[i] << " Tag Value is: " << vals[i] << dendl;
+ ldpp_dout(dpp, 4) << "Tag Key: " << keys[i] << " Tag Value is: " << vals[i] << dendl;
}
return 0;
}
-void RGWRestRole::send_response()
-{
- if (op_ret) {
- set_req_state_err(s, op_ret);
- }
- dump_errno(s);
- end_header(s, this);
-}
-
-int RGWRoleRead::check_caps(const RGWUserCaps& caps)
+static rgw::ARN make_role_arn(const std::string& path,
+ const std::string& name,
+ const std::string& account)
{
- return caps.check_cap("roles", RGW_CAP_READ);
+ return {string_cat_reserve(path, name), "role", account, true};
}
-int RGWRoleWrite::check_caps(const RGWUserCaps& caps)
+static int load_role(const DoutPrefixProvider* dpp, optional_yield y,
+ rgw::sal::Driver* driver, const rgw_account_id& account_id,
+ const std::string& tenant, const std::string& name,
+ std::unique_ptr<rgw::sal::RGWRole>& role,
+ rgw::ARN& resource, std::string& message)
{
- return caps.check_cap("roles", RGW_CAP_WRITE);
+ role = driver->get_role(name, tenant, account_id);
+ const int r = role->get(dpp, y);
+ if (r == -ENOENT) {
+ message = "No such RoleName in the tenant";
+ return -ERR_NO_ROLE_FOUND;
+ }
+ if (r >= 0) {
+ // construct the ARN once we know the path
+ const auto& arn_account = !account_id.empty() ? account_id : tenant;
+ resource = make_role_arn(role->get_path(),
+ role->get_name(),
+ arn_account);
+ }
+ return r;
}
-int RGWCreateRole::verify_permission(optional_yield y)
+// check the current role count against account limit
+int check_role_limit(const DoutPrefixProvider* dpp, optional_yield y,
+ rgw::sal::Driver* driver, std::string_view account_id,
+ std::string& err)
{
- if (s->auth.identity->is_anonymous()) {
- return -EACCES;
+ RGWAccountInfo account;
+ rgw::sal::Attrs attrs; // unused
+ RGWObjVersionTracker objv; // unused
+ int r = driver->load_account_by_id(dpp, y, account_id, account, attrs, objv);
+ if (r < 0) {
+ ldpp_dout(dpp, 4) << "failed to load iam account "
+ << account_id << ": " << cpp_strerror(r) << dendl;
+ return r;
}
- if (int ret = check_caps(s->user->get_caps()); ret == 0) {
- return ret;
+ if (account.max_roles < 0) { // max_roles < 0 means unlimited
+ return 0;
}
- string role_name = s->info.args.get("RoleName");
- string role_path = s->info.args.get("Path");
-
- string resource_name = role_path + role_name;
- if (!verify_user_permission(this,
- s,
- rgw::ARN(resource_name,
- "role",
- s->user->get_tenant(), true),
- get_op())) {
- return -EACCES;
+ uint32_t count = 0;
+ r = driver->count_account_roles(dpp, y, account_id, count);
+ if (r < 0) {
+ ldpp_dout(dpp, 4) << "failed to count roles for iam account "
+ << account_id << ": " << cpp_strerror(r) << dendl;
+ return r;
+ }
+ if (std::cmp_greater_equal(count, account.max_roles)) {
+ err = fmt::format("Role limit {} exceeded", account.max_roles);
+ return -ERR_LIMIT_EXCEEDED;
}
return 0;
}
-int RGWCreateRole::init_processing(optional_yield y)
-{
- return 0; // avoid calling RGWRestRole::init_processing()
-}
-int RGWCreateRole::get_params()
+int RGWCreateRole::init_processing(optional_yield y)
{
role_name = s->info.args.get("RoleName");
+ if (!validate_iam_role_name(role_name, s->err.message)) {
+ return -EINVAL;
+ }
+
role_path = s->info.args.get("Path");
+ if (role_path.empty()) {
+ role_path = "/";
+ } else if (!validate_iam_path(role_path, s->err.message)) {
+ return -EINVAL;
+ }
+
trust_policy = s->info.args.get("AssumeRolePolicyDocument");
+ description = s->info.args.get("Description");
max_session_duration = s->info.args.get("MaxSessionDuration");
- if (role_name.empty() || trust_policy.empty()) {
- ldpp_dout(this, 20) << "ERROR: one of role name or assume role policy document is empty"
- << dendl;
+ if (trust_policy.empty()) {
+ s->err.message = "Missing required element AssumeRolePolicyDocument";
return -EINVAL;
}
-
- bufferlist bl = bufferlist::static_from_string(trust_policy);
try {
const rgw::IAM::Policy p(
- s->cct, s->user->get_tenant(), bl,
+ s->cct, nullptr, trust_policy,
s->cct->_conf.get_val<bool>("rgw_policy_reject_invalid_principals"));
}
catch (rgw::IAM::PolicyParseException& e) {
- ldpp_dout(this, 5) << "failed to parse policy: " << e.what() << dendl;
+ ldpp_dout(this, 5) << "failed to parse policy '" << trust_policy << "' with: " << e.what() << dendl;
s->err.message = e.what();
return -ERR_MALFORMED_DOC;
}
+ if (description.size() > 1000) {
+ s->err.message = "Description exceeds maximum length of 1000 characters.";
+ return -EINVAL;
+ }
- int ret = parse_tags();
+ int ret = parse_tags(this, s->info.args.get_params(), tags, s->err.message);
if (ret < 0) {
return ret;
}
if (tags.size() > 50) {
- ldout(s->cct, 0) << "No. tags is greater than 50" << dendl;
- return -EINVAL;
+ s->err.message = "Tags count cannot exceed 50";
+ return -ERR_LIMIT_EXCEEDED;
}
+
+ if (const auto* id = std::get_if<rgw_account_id>(&s->owner.id); id) {
+ account_id = *id;
+ resource = make_role_arn(role_path, role_name, *id);
+
+ ret = check_role_limit(this, y, driver, account_id, s->err.message);
+ if (ret < 0) {
+ return ret;
+ }
+ } else {
+ resource = make_role_arn(role_path, role_name, s->user->get_tenant());
+ }
return 0;
}
void RGWCreateRole::execute(optional_yield y)
{
- op_ret = get_params();
- if (op_ret < 0) {
- return;
- }
std::string user_tenant = s->user->get_tenant();
std::unique_ptr<rgw::sal::RGWRole> role = driver->get_role(role_name,
user_tenant,
+ account_id,
role_path,
trust_policy,
+ description,
max_session_duration,
tags);
if (!user_tenant.empty() && role->get_tenant() != user_tenant) {
@@ -327,13 +302,14 @@ void RGWCreateRole::execute(optional_yield y)
try {
if (role_obj) {
RGWXMLDecoder::decode_xml("RoleId", role_id, role_obj, true);
+ RGWXMLDecoder::decode_xml("CreateDate", role->get_info().creation_date, role_obj);
}
} catch (RGWXMLDecoder::err& err) {
ldpp_dout(this, 5) << "ERROR: unexpected xml: RoleId" << dendl;
op_ret = -EINVAL;
return;
}
- ldpp_dout(this, 0) << "role_id decoded from master zonegroup response is" << role_id << dendl;
+ ldpp_dout(this, 0) << "role_id decoded from master zonegroup response is " << role_id << dendl;
}
op_ret = role->create(s, true, role_id, y);
@@ -346,7 +322,7 @@ void RGWCreateRole::execute(optional_yield y)
s->formatter->open_object_section("CreateRoleResponse");
s->formatter->open_object_section("CreateRoleResult");
s->formatter->open_object_section("Role");
- role->dump(s->formatter);
+ dump_iam_role(role->get_info(), s->formatter);
s->formatter->close_section();
s->formatter->close_section();
s->formatter->open_object_section("ResponseMetadata");
@@ -356,29 +332,24 @@ void RGWCreateRole::execute(optional_yield y)
}
}
-int RGWDeleteRole::get_params()
+int RGWDeleteRole::init_processing(optional_yield y)
{
role_name = s->info.args.get("RoleName");
-
- if (role_name.empty()) {
- ldpp_dout(this, 20) << "ERROR: Role name is empty"<< dendl;
+ if (!validate_iam_role_name(role_name, s->err.message)) {
return -EINVAL;
}
- return 0;
+ if (const auto& account = s->auth.identity->get_account(); account) {
+ account_id = account->id;
+ }
+ return load_role(this, y, driver, account_id, s->user->get_tenant(),
+ role_name, role, resource, s->err.message);
}
void RGWDeleteRole::execute(optional_yield y)
{
- bool is_master = true;
- op_ret = get_params();
- if (op_ret < 0) {
- return;
- }
-
const rgw::SiteConfig& site = *s->penv.site;
if (!site.is_meta_master()) {
- is_master = false;
RGWXMLDecoder::XMLParser parser;
if (!parser.init()) {
ldpp_dout(this, 0) << "ERROR: failed to initialize xml parser" << dendl;
@@ -398,16 +369,28 @@ void RGWDeleteRole::execute(optional_yield y)
}
}
- op_ret = _role->delete_obj(s, y);
+ op_ret = retry_raced_role_write(this, y, role.get(),
+ [this, y, &site] {
+ if (site.is_meta_master()) {
+ // only check on the master zone. if a forwarded DeleteRole request
+ // succeeds on the master zone, it needs to succeed here too
+ const auto& info = role->get_info();
+ if (!info.perm_policy_map.empty() ||
+ !info.managed_policies.arns.empty()) {
+ s->err.message = "The role cannot be deleted until all role policies are removed";
+ return -ERR_DELETE_CONFLICT;
+ }
+ }
+ return role->delete_obj(s, y);
+ });
if (op_ret == -ENOENT) {
//Role has been deleted since metadata from master has synced up
- if (!is_master) {
+ if (!site.is_meta_master()) {
op_ret = 0;
} else {
op_ret = -ERR_NO_ROLE_FOUND;
}
- return;
}
if (!op_ret) {
s->formatter->open_object_section("DeleteRoleResponse");
@@ -418,106 +401,62 @@ void RGWDeleteRole::execute(optional_yield y)
}
}
-int RGWGetRole::verify_permission(optional_yield y)
-{
- return 0;
-}
-
-int RGWGetRole::_verify_permission(const rgw::sal::RGWRole* role)
-{
- if (s->auth.identity->is_anonymous()) {
- return -EACCES;
- }
-
- if (int ret = check_caps(s->user->get_caps()); ret == 0) {
- return ret;
- }
-
- string resource_name = role->get_path() + role->get_name();
- if (!verify_user_permission(this,
- s,
- rgw::ARN(resource_name,
- "role",
- s->user->get_tenant(), true),
- get_op())) {
- return -EACCES;
- }
- return 0;
-}
-
int RGWGetRole::init_processing(optional_yield y)
{
- return 0; // avoid calling RGWRestRole::init_processing()
-}
-
-int RGWGetRole::get_params()
-{
role_name = s->info.args.get("RoleName");
-
- if (role_name.empty()) {
- ldpp_dout(this, 20) << "ERROR: Role name is empty"<< dendl;
+ if (!validate_iam_role_name(role_name, s->err.message)) {
return -EINVAL;
}
- return 0;
+ if (const auto& account = s->auth.identity->get_account(); account) {
+ account_id = account->id;
+ }
+ return load_role(this, y, driver, account_id, s->user->get_tenant(),
+ role_name, role, resource, s->err.message);
}
void RGWGetRole::execute(optional_yield y)
{
- op_ret = get_params();
- if (op_ret < 0) {
- return;
- }
- std::unique_ptr<rgw::sal::RGWRole> role = driver->get_role(role_name,
- s->user->get_tenant());
- op_ret = role->get(s, y);
-
- if (op_ret == -ENOENT) {
- op_ret = -ERR_NO_ROLE_FOUND;
- return;
- }
-
- op_ret = _verify_permission(role.get());
-
- if (op_ret == 0) {
- s->formatter->open_object_section("GetRoleResponse");
- s->formatter->open_object_section("ResponseMetadata");
- s->formatter->dump_string("RequestId", s->trans_id);
- s->formatter->close_section();
- s->formatter->open_object_section("GetRoleResult");
- s->formatter->open_object_section("Role");
- role->dump(s->formatter);
- s->formatter->close_section();
- s->formatter->close_section();
- s->formatter->close_section();
- }
+ s->formatter->open_object_section("GetRoleResponse");
+ s->formatter->open_object_section("ResponseMetadata");
+ s->formatter->dump_string("RequestId", s->trans_id);
+ s->formatter->close_section();
+ s->formatter->open_object_section("GetRoleResult");
+ s->formatter->open_object_section("Role");
+ dump_iam_role(role->get_info(), s->formatter);
+ s->formatter->close_section();
+ s->formatter->close_section();
+ s->formatter->close_section();
}
-int RGWModifyRoleTrustPolicy::get_params()
+int RGWModifyRoleTrustPolicy::init_processing(optional_yield y)
{
role_name = s->info.args.get("RoleName");
- trust_policy = s->info.args.get("PolicyDocument");
+ if (!validate_iam_role_name(role_name, s->err.message)) {
+ return -EINVAL;
+ }
- if (role_name.empty() || trust_policy.empty()) {
- ldpp_dout(this, 20) << "ERROR: One of role name or trust policy is empty"<< dendl;
+ trust_policy = s->info.args.get("PolicyDocument");
+ if (trust_policy.empty()) {
+ s->err.message = "Missing required element PolicyDocument";
return -EINVAL;
}
+
JSONParser p;
if (!p.parse(trust_policy.c_str(), trust_policy.length())) {
ldpp_dout(this, 20) << "ERROR: failed to parse assume role policy doc" << dendl;
return -ERR_MALFORMED_DOC;
}
- return 0;
+ if (const auto& account = s->auth.identity->get_account(); account) {
+ account_id = account->id;
+ }
+ return load_role(this, y, driver, account_id, s->user->get_tenant(),
+ role_name, role, resource, s->err.message);
}
void RGWModifyRoleTrustPolicy::execute(optional_yield y)
{
- op_ret = get_params();
- if (op_ret < 0) {
- return;
- }
-
const rgw::SiteConfig& site = *s->penv.site;
if (!site.is_meta_master()) {
RGWXMLDecoder::XMLParser parser;
@@ -541,8 +480,11 @@ void RGWModifyRoleTrustPolicy::execute(optional_yield y)
}
}
- _role->update_trust_policy(trust_policy);
- op_ret = _role->update(this, y);
+ op_ret = retry_raced_role_write(this, y, role.get(),
+ [this, y] {
+ role->update_trust_policy(trust_policy);
+ return role->update(this, y);
+ });
s->formatter->open_object_section("UpdateAssumeRolePolicyResponse");
s->formatter->open_object_section("ResponseMetadata");
@@ -551,79 +493,93 @@ void RGWModifyRoleTrustPolicy::execute(optional_yield y)
s->formatter->close_section();
}
-int RGWListRoles::verify_permission(optional_yield y)
+int RGWListRoles::init_processing(optional_yield y)
{
- if (s->auth.identity->is_anonymous()) {
- return -EACCES;
- }
+ path_prefix = s->info.args.get("PathPrefix");
+ marker = s->info.args.get("Marker");
- if (int ret = check_caps(s->user->get_caps()); ret == 0) {
- return ret;
+ int r = s->info.args.get_int("MaxItems", &max_items, max_items);
+ if (r < 0 || max_items > 1000) {
+ s->err.message = "Invalid value for MaxItems";
+ return -EINVAL;
}
- if (!verify_user_permission(this,
- s,
- rgw::ARN(),
- get_op())) {
- return -EACCES;
+ if (const auto* id = std::get_if<rgw_account_id>(&s->owner.id); id) {
+ account_id = *id;
}
-
- return 0;
-}
-
-int RGWListRoles::init_processing(optional_yield y)
-{
- return 0; // avoid calling RGWRestRole::init_processing()
-}
-
-int RGWListRoles::get_params()
-{
- path_prefix = s->info.args.get("PathPrefix");
-
return 0;
}
void RGWListRoles::execute(optional_yield y)
{
- op_ret = get_params();
- if (op_ret < 0) {
- return;
+ rgw::sal::RoleList listing;
+ if (!account_id.empty()) {
+ // list roles from the account
+ op_ret = driver->list_account_roles(this, y, account_id, path_prefix,
+ marker, max_items, listing);
+ } else {
+ // list roles from the tenant
+ op_ret = driver->list_roles(this, y, s->auth.identity->get_tenant(),
+ path_prefix, marker, max_items, listing);
}
- vector<std::unique_ptr<rgw::sal::RGWRole>> result;
- op_ret = driver->get_roles(s, y, path_prefix, s->user->get_tenant(), result);
if (op_ret == 0) {
- s->formatter->open_array_section("ListRolesResponse");
- s->formatter->open_array_section("ListRolesResult");
- s->formatter->open_object_section("Roles");
- for (const auto& it : result) {
- s->formatter->open_object_section("member");
- it->dump(s->formatter);
- s->formatter->close_section();
+ s->formatter->open_object_section("ListRolesResponse");
+ s->formatter->open_object_section("ListRolesResult");
+ s->formatter->open_array_section("Roles");
+ for (const auto& info : listing.roles) {
+ encode_json("member", info, s->formatter);
}
- s->formatter->close_section();
- s->formatter->close_section();
+ s->formatter->close_section(); // Roles
+
+ const bool truncated = !listing.next_marker.empty();
+ encode_json("IsTruncated", truncated, s->formatter);
+ if (truncated) {
+ encode_json("Marker", listing.next_marker, s->formatter);
+ }
+
+ s->formatter->close_section(); // ListRolesResult
s->formatter->open_object_section("ResponseMetadata");
s->formatter->dump_string("RequestId", s->trans_id);
- s->formatter->close_section();
- s->formatter->close_section();
+ s->formatter->close_section(); // ResponseMetadata
+ s->formatter->close_section(); // ListRolesResponse
}
}
-int RGWPutRolePolicy::get_params()
+int RGWPutRolePolicy::init_processing(optional_yield y)
{
role_name = s->info.args.get("RoleName");
+ if (!validate_iam_role_name(role_name, s->err.message)) {
+ return -EINVAL;
+ }
+
policy_name = s->info.args.get("PolicyName");
perm_policy = s->info.args.get("PolicyDocument");
- if (role_name.empty() || policy_name.empty() || perm_policy.empty()) {
- ldpp_dout(this, 20) << "ERROR: One of role name, policy name or perm policy is empty"<< dendl;
+ if (policy_name.empty()) {
+ s->err.message = "Missing required element PolicyName";
return -EINVAL;
}
- bufferlist bl = bufferlist::static_from_string(perm_policy);
+ if (perm_policy.empty()) {
+ s->err.message = "Missing required element PolicyDocument";
+ return -EINVAL;
+ }
+
+ if (const auto& account = s->auth.identity->get_account(); account) {
+ account_id = account->id;
+ }
+ int r = load_role(this, y, driver, account_id, s->user->get_tenant(),
+ role_name, role, resource, s->err.message);
+ if (r < 0) {
+ return r;
+ }
+
try {
+ // non-account identity policy is restricted to the current tenant
+ const rgw::sal::RGWRoleInfo& info = role->get_info();
+ const std::string* policy_tenant = account_id.empty() ? &info.tenant : nullptr;
const rgw::IAM::Policy p(
- s->cct, s->user->get_tenant(), bl,
+ s->cct, policy_tenant, perm_policy,
s->cct->_conf.get_val<bool>("rgw_policy_reject_invalid_principals"));
}
catch (rgw::IAM::PolicyParseException& e) {
@@ -636,11 +592,6 @@ int RGWPutRolePolicy::get_params()
void RGWPutRolePolicy::execute(optional_yield y)
{
- op_ret = get_params();
- if (op_ret < 0) {
- return;
- }
-
const rgw::SiteConfig& site = *s->penv.site;
if (!site.is_meta_master()) {
RGWXMLDecoder::XMLParser parser;
@@ -665,8 +616,11 @@ void RGWPutRolePolicy::execute(optional_yield y)
}
}
- _role->set_perm_policy(policy_name, perm_policy);
- op_ret = _role->update(this, y);
+ op_ret = retry_raced_role_write(this, y, role.get(),
+ [this, y] {
+ role->set_perm_policy(policy_name, perm_policy);
+ return role->update(this, y);
+ });
if (op_ret == 0) {
s->formatter->open_object_section("PutRolePolicyResponse");
@@ -677,27 +631,30 @@ void RGWPutRolePolicy::execute(optional_yield y)
}
}
-int RGWGetRolePolicy::get_params()
+int RGWGetRolePolicy::init_processing(optional_yield y)
{
role_name = s->info.args.get("RoleName");
- policy_name = s->info.args.get("PolicyName");
+ if (!validate_iam_role_name(role_name, s->err.message)) {
+ return -EINVAL;
+ }
- if (role_name.empty() || policy_name.empty()) {
- ldpp_dout(this, 20) << "ERROR: One of role name or policy name is empty"<< dendl;
+ policy_name = s->info.args.get("PolicyName");
+ if (policy_name.empty()) {
+ s->err.message = "Missing required element PolicyName";
return -EINVAL;
}
- return 0;
+
+ if (const auto& account = s->auth.identity->get_account(); account) {
+ account_id = account->id;
+ }
+ return load_role(this, y, driver, account_id, s->user->get_tenant(),
+ role_name, role, resource, s->err.message);
}
void RGWGetRolePolicy::execute(optional_yield y)
{
- op_ret = get_params();
- if (op_ret < 0) {
- return;
- }
-
string perm_policy;
- op_ret = _role->get_role_policy(this, policy_name, perm_policy);
+ op_ret = role->get_role_policy(this, policy_name, perm_policy);
if (op_ret == -ENOENT) {
op_ret = -ERR_NO_SUCH_ENTITY;
}
@@ -716,25 +673,23 @@ void RGWGetRolePolicy::execute(optional_yield y)
}
}
-int RGWListRolePolicies::get_params()
+int RGWListRolePolicies::init_processing(optional_yield y)
{
role_name = s->info.args.get("RoleName");
-
- if (role_name.empty()) {
- ldpp_dout(this, 20) << "ERROR: Role name is empty"<< dendl;
+ if (!validate_iam_role_name(role_name, s->err.message)) {
return -EINVAL;
}
- return 0;
+
+ if (const auto& account = s->auth.identity->get_account(); account) {
+ account_id = account->id;
+ }
+ return load_role(this, y, driver, account_id, s->user->get_tenant(),
+ role_name, role, resource, s->err.message);
}
void RGWListRolePolicies::execute(optional_yield y)
{
- op_ret = get_params();
- if (op_ret < 0) {
- return;
- }
-
- std::vector<string> policy_names = _role->get_role_policy_names();
+ std::vector<string> policy_names = role->get_role_policy_names();
s->formatter->open_object_section("ListRolePoliciesResponse");
s->formatter->open_object_section("ResponseMetadata");
s->formatter->dump_string("RequestId", s->trans_id);
@@ -749,25 +704,28 @@ void RGWListRolePolicies::execute(optional_yield y)
s->formatter->close_section();
}
-int RGWDeleteRolePolicy::get_params()
+int RGWDeleteRolePolicy::init_processing(optional_yield y)
{
role_name = s->info.args.get("RoleName");
- policy_name = s->info.args.get("PolicyName");
+ if (!validate_iam_role_name(role_name, s->err.message)) {
+ return -EINVAL;
+ }
- if (role_name.empty() || policy_name.empty()) {
- ldpp_dout(this, 20) << "ERROR: One of role name or policy name is empty"<< dendl;
+ policy_name = s->info.args.get("PolicyName");
+ if (policy_name.empty()) {
+ s->err.message = "Missing required element PolicyName";
return -EINVAL;
}
- return 0;
+
+ if (const auto& account = s->auth.identity->get_account(); account) {
+ account_id = account->id;
+ }
+ return load_role(this, y, driver, account_id, s->user->get_tenant(),
+ role_name, role, resource, s->err.message);
}
void RGWDeleteRolePolicy::execute(optional_yield y)
{
- op_ret = get_params();
- if (op_ret < 0) {
- return;
- }
-
const rgw::SiteConfig& site = *s->penv.site;
if (!site.is_meta_master()) {
RGWXMLDecoder::XMLParser parser;
@@ -791,46 +749,52 @@ void RGWDeleteRolePolicy::execute(optional_yield y)
}
}
- op_ret = _role->delete_policy(this, policy_name);
- if (op_ret == -ENOENT) {
- op_ret = -ERR_NO_ROLE_FOUND;
- return;
- }
+ op_ret = retry_raced_role_write(this, y, role.get(),
+ [this, y, &site] {
+ int r = role->delete_policy(this, policy_name);
+ if (r == -ENOENT) {
+ if (!site.is_meta_master()) {
+ return 0; // delete succeeded on the master
+ }
+ s->err.message = "The requested PolicyName was not found";
+ return -ERR_NO_SUCH_ENTITY;
+ }
+ if (r == 0) {
+ r = role->update(this, y);
+ }
+ return r;
+ });
if (op_ret == 0) {
- op_ret = _role->update(this, y);
+ s->formatter->open_object_section("DeleteRolePolicyResponse");
+ s->formatter->open_object_section("ResponseMetadata");
+ s->formatter->dump_string("RequestId", s->trans_id);
+ s->formatter->close_section();
+ s->formatter->close_section();
}
-
- s->formatter->open_object_section("DeleteRolePoliciesResponse");
- s->formatter->open_object_section("ResponseMetadata");
- s->formatter->dump_string("RequestId", s->trans_id);
- s->formatter->close_section();
- s->formatter->close_section();
}
-int RGWTagRole::get_params()
+int RGWTagRole::init_processing(optional_yield y)
{
role_name = s->info.args.get("RoleName");
-
- if (role_name.empty()) {
- ldout(s->cct, 0) << "ERROR: Role name is empty" << dendl;
+ if (!validate_iam_role_name(role_name, s->err.message)) {
return -EINVAL;
}
- int ret = parse_tags();
- if (ret < 0) {
- return ret;
+
+ int r = parse_tags(this, s->info.args.get_params(), tags, s->err.message);
+ if (r < 0) {
+ return r;
}
- return 0;
+ if (const auto& account = s->auth.identity->get_account(); account) {
+ account_id = account->id;
+ }
+ return load_role(this, y, driver, account_id, s->user->get_tenant(),
+ role_name, role, resource, s->err.message);
}
void RGWTagRole::execute(optional_yield y)
{
- op_ret = get_params();
- if (op_ret < 0) {
- return;
- }
-
const rgw::SiteConfig& site = *s->penv.site;
if (!site.is_meta_master()) {
RGWXMLDecoder::XMLParser parser;
@@ -859,10 +823,14 @@ void RGWTagRole::execute(optional_yield y)
}
}
- op_ret = _role->set_tags(this, tags);
- if (op_ret == 0) {
- op_ret = _role->update(this, y);
- }
+ op_ret = retry_raced_role_write(this, y, role.get(),
+ [this, y] {
+ int r = role->set_tags(this, tags);
+ if (r == 0) {
+ r = role->update(this, y);
+ }
+ return r;
+ });
if (op_ret == 0) {
s->formatter->open_object_section("TagRoleResponse");
@@ -873,26 +841,23 @@ void RGWTagRole::execute(optional_yield y)
}
}
-int RGWListRoleTags::get_params()
+int RGWListRoleTags::init_processing(optional_yield y)
{
role_name = s->info.args.get("RoleName");
-
- if (role_name.empty()) {
- ldout(s->cct, 0) << "ERROR: Role name is empty" << dendl;
+ if (!validate_iam_role_name(role_name, s->err.message)) {
return -EINVAL;
}
- return 0;
+ if (const auto& account = s->auth.identity->get_account(); account) {
+ account_id = account->id;
+ }
+ return load_role(this, y, driver, account_id, s->user->get_tenant(),
+ role_name, role, resource, s->err.message);
}
void RGWListRoleTags::execute(optional_yield y)
{
- op_ret = get_params();
- if (op_ret < 0) {
- return;
- }
-
- boost::optional<multimap<string,string>> tag_map = _role->get_tags();
+ boost::optional<multimap<string,string>> tag_map = role->get_tags();
s->formatter->open_object_section("ListRoleTagsResponse");
s->formatter->open_object_section("ListRoleTagsResult");
if (tag_map) {
@@ -914,31 +879,32 @@ void RGWListRoleTags::execute(optional_yield y)
s->formatter->close_section();
}
-int RGWUntagRole::get_params()
+int RGWUntagRole::init_processing(optional_yield y)
{
role_name = s->info.args.get("RoleName");
-
- if (role_name.empty()) {
- ldout(s->cct, 0) << "ERROR: Role name is empty" << dendl;
+ if (!validate_iam_role_name(role_name, s->err.message)) {
return -EINVAL;
}
- auto val_map = s->info.args.get_params();
- for (auto& it : val_map) {
- if (it.first.find("TagKeys.member.") != string::npos) {
- tagKeys.emplace_back(it.second);
- }
+ const auto& params = s->info.args.get_params();
+ const std::string prefix = "TagKeys.member.";
+ if (auto l = params.lower_bound(prefix); l != params.end()) {
+ // copy matching values into untag vector
+ std::transform(l, params.upper_bound(prefix), std::back_inserter(untag),
+ [] (const std::pair<const std::string, std::string>& p) {
+ return p.second;
+ });
}
- return 0;
+
+ if (const auto& account = s->auth.identity->get_account(); account) {
+ account_id = account->id;
+ }
+ return load_role(this, y, driver, account_id, s->user->get_tenant(),
+ role_name, role, resource, s->err.message);
}
void RGWUntagRole::execute(optional_yield y)
{
- op_ret = get_params();
- if (op_ret < 0) {
- return;
- }
-
const rgw::SiteConfig& site = *s->penv.site;
if (!site.is_meta_master()) {
RGWXMLDecoder::XMLParser parser;
@@ -952,17 +918,11 @@ void RGWUntagRole::execute(optional_yield y)
s->info.args.remove("RoleName");
s->info.args.remove("Action");
s->info.args.remove("Version");
- auto& val_map = s->info.args.get_params();
- std::vector<std::multimap<std::string, std::string>::iterator> iters;
- for (auto it = val_map.begin(); it!= val_map.end(); it++) {
- if (it->first.find("Tags.member.") == 0) {
- iters.emplace_back(it);
- }
+ auto& params = s->info.args.get_params();
+ if (auto l = params.lower_bound("TagKeys.member."); l != params.end()) {
+ params.erase(l, params.upper_bound("TagKeys.member."));
}
- for (auto& it : iters) {
- val_map.erase(it);
- }
op_ret = forward_iam_request_to_master(this, site, s->user->get_info(),
bl_post_body, parser, s->info, y);
if (op_ret < 0) {
@@ -971,8 +931,11 @@ void RGWUntagRole::execute(optional_yield y)
}
}
- _role->erase_tags(tagKeys);
- op_ret = _role->update(this, y);
+ op_ret = retry_raced_role_write(this, y, role.get(),
+ [this, y] {
+ role->erase_tags(untag);
+ return role->update(this, y);
+ });
if (op_ret == 0) {
s->formatter->open_object_section("UntagRoleResponse");
@@ -983,26 +946,30 @@ void RGWUntagRole::execute(optional_yield y)
}
}
-int RGWUpdateRole::get_params()
+int RGWUpdateRole::init_processing(optional_yield y)
{
role_name = s->info.args.get("RoleName");
- max_session_duration = s->info.args.get("MaxSessionDuration");
+ if (!validate_iam_role_name(role_name, s->err.message)) {
+ return -EINVAL;
+ }
- if (role_name.empty()) {
- ldpp_dout(this, 20) << "ERROR: Role name is empty"<< dendl;
+ description = s->info.args.get_optional("Description");
+ if (description && description->size() > 1000) {
+ s->err.message = "Description exceeds maximum length of 1000 characters.";
return -EINVAL;
}
- return 0;
+ max_session_duration = s->info.args.get("MaxSessionDuration");
+
+ if (const auto& account = s->auth.identity->get_account(); account) {
+ account_id = account->id;
+ }
+ return load_role(this, y, driver, account_id, s->user->get_tenant(),
+ role_name, role, resource, s->err.message);
}
void RGWUpdateRole::execute(optional_yield y)
{
- op_ret = get_params();
- if (op_ret < 0) {
- return;
- }
-
const rgw::SiteConfig& site = *s->penv.site;
if (!site.is_meta_master()) {
RGWXMLDecoder::XMLParser parser;
@@ -1026,13 +993,18 @@ void RGWUpdateRole::execute(optional_yield y)
}
}
- _role->update_max_session_duration(max_session_duration);
- if (!_role->validate_max_session_duration(this)) {
- op_ret = -EINVAL;
- return;
- }
+ op_ret = retry_raced_role_write(this, y, role.get(),
+ [this, y] {
+ if (description) {
+ role->get_info().description = std::move(*description);
+ }
+ role->update_max_session_duration(max_session_duration);
+ if (!role->validate_max_session_duration(this)) {
+ return -EINVAL;
+ }
- op_ret = _role->update(this, y);
+ return role->update(this, y);
+ });
s->formatter->open_object_section("UpdateRoleResponse");
s->formatter->open_object_section("UpdateRoleResult");
@@ -1041,3 +1013,280 @@ void RGWUpdateRole::execute(optional_yield y)
s->formatter->close_section();
s->formatter->close_section();
}
+
+static bool validate_policy_arn(const std::string& arn, std::string& err)
+{
+ if (arn.empty()) {
+ err = "Missing required element PolicyArn";
+ return false;
+ }
+
+ if (arn.size() > 2048) {
+ err = "PolicyArn must be at most 2048 characters long";
+ return false;
+ }
+
+ if (arn.size() < 20) {
+ err = "PolicyArn must be at least 20 characters long";
+ return false;
+ }
+
+ return true;
+}
+
+class RGWAttachRolePolicy_IAM : public RGWRestRole {
+ bufferlist bl_post_body;
+ std::string role_name;
+ std::string policy_arn;
+ std::unique_ptr<rgw::sal::RGWRole> role;
+public:
+ explicit RGWAttachRolePolicy_IAM(const bufferlist& bl_post_body)
+ : RGWRestRole(rgw::IAM::iamAttachRolePolicy, RGW_CAP_WRITE),
+ bl_post_body(bl_post_body) {}
+ int init_processing(optional_yield y) override;
+ void execute(optional_yield y) override;
+ const char* name() const override { return "attach_role_policy"; }
+ RGWOpType get_type() override { return RGW_OP_ATTACH_ROLE_POLICY; }
+};
+
+int RGWAttachRolePolicy_IAM::init_processing(optional_yield y)
+{
+ // managed policy is only supported for account users. adding them to
+ // non-account users would give blanket permissions to all buckets
+ if (!s->auth.identity->get_account()) {
+ s->err.message = "Managed policies are only supported for account users";
+ return -ERR_METHOD_NOT_ALLOWED;
+ }
+
+ role_name = s->info.args.get("RoleName");
+ if (!validate_iam_role_name(role_name, s->err.message)) {
+ return -EINVAL;
+ }
+
+ policy_arn = s->info.args.get("PolicyArn");
+ if (!validate_policy_arn(policy_arn, s->err.message)) {
+ return -EINVAL;
+ }
+
+ if (const auto& account = s->auth.identity->get_account(); account) {
+ account_id = account->id;
+ }
+ return load_role(this, y, driver, account_id, s->user->get_tenant(),
+ role_name, role, resource, s->err.message);
+}
+
+void RGWAttachRolePolicy_IAM::execute(optional_yield y)
+{
+ const rgw::SiteConfig& site = *s->penv.site;
+ if (!site.is_meta_master()) {
+ RGWXMLDecoder::XMLParser parser;
+ if (!parser.init()) {
+ ldpp_dout(this, 0) << "ERROR: failed to initialize xml parser" << dendl;
+ op_ret = -EINVAL;
+ return;
+ }
+
+ bufferlist data;
+ s->info.args.remove("RoleName");
+ s->info.args.remove("PolicyArn");
+ s->info.args.remove("Action");
+ s->info.args.remove("Version");
+
+ op_ret = forward_iam_request_to_master(this, site, s->user->get_info(),
+ bl_post_body, parser, s->info, y);
+ if (op_ret < 0) {
+ ldpp_dout(this, 20) << "ERROR: forward_iam_request_to_master failed with error code: " << op_ret << dendl;
+ return;
+ }
+ }
+
+ try {
+ // make sure the policy exists
+ if (!rgw::IAM::get_managed_policy(s->cct, policy_arn)) {
+ op_ret = ERR_NO_SUCH_ENTITY;
+ s->err.message = "The requested PolicyArn is not recognized";
+ return;
+ }
+ } catch (rgw::IAM::PolicyParseException& e) {
+ ldpp_dout(this, 5) << "failed to parse policy: " << e.what() << dendl;
+ s->err.message = e.what();
+ op_ret = -ERR_MALFORMED_DOC;
+ return;
+ }
+
+ op_ret = retry_raced_role_write(this, y, role.get(),
+ [this, y] {
+ // insert the policy arn. if it's already there, just return success
+ auto &policies = role->get_info().managed_policies;
+ if (!policies.arns.insert(policy_arn).second) {
+ return 0;
+ }
+ return role->update(this, y);
+ });
+
+ if (op_ret == 0) {
+ s->formatter->open_object_section_in_ns("AttachRolePolicyResponse", RGW_REST_IAM_XMLNS);
+ s->formatter->open_object_section("ResponseMetadata");
+ s->formatter->dump_string("RequestId", s->trans_id);
+ s->formatter->close_section();
+ s->formatter->close_section();
+ }
+}
+
+class RGWDetachRolePolicy_IAM : public RGWRestRole {
+ bufferlist bl_post_body;
+ std::string role_name;
+ std::string policy_arn;
+ std::unique_ptr<rgw::sal::RGWRole> role;
+public:
+ explicit RGWDetachRolePolicy_IAM(const bufferlist& bl_post_body)
+ : RGWRestRole(rgw::IAM::iamDetachRolePolicy, RGW_CAP_WRITE),
+ bl_post_body(bl_post_body) {}
+ int init_processing(optional_yield y) override;
+ void execute(optional_yield y) override;
+ const char* name() const override { return "detach_role_policy"; }
+ RGWOpType get_type() override { return RGW_OP_DETACH_ROLE_POLICY; }
+};
+
+int RGWDetachRolePolicy_IAM::init_processing(optional_yield y)
+{
+ // managed policy is only supported for account users. adding them to
+ // non-account users would give blanket permissions to all buckets
+ if (!s->auth.identity->get_account()) {
+ s->err.message = "Managed policies are only supported for account users";
+ return -ERR_METHOD_NOT_ALLOWED;
+ }
+
+ role_name = s->info.args.get("RoleName");
+ if (!validate_iam_role_name(role_name, s->err.message)) {
+ return -EINVAL;
+ }
+
+ policy_arn = s->info.args.get("PolicyArn");
+ if (!validate_policy_arn(policy_arn, s->err.message)) {
+ return -EINVAL;
+ }
+
+ if (const auto& account = s->auth.identity->get_account(); account) {
+ account_id = account->id;
+ }
+ return load_role(this, y, driver, account_id, s->user->get_tenant(),
+ role_name, role, resource, s->err.message);
+}
+
+void RGWDetachRolePolicy_IAM::execute(optional_yield y)
+{
+ const rgw::SiteConfig& site = *s->penv.site;
+ if (!site.is_meta_master()) {
+ RGWXMLDecoder::XMLParser parser;
+ if (!parser.init()) {
+ ldpp_dout(this, 0) << "ERROR: failed to initialize xml parser" << dendl;
+ op_ret = -EINVAL;
+ return;
+ }
+
+ bufferlist data;
+ s->info.args.remove("RoleName");
+ s->info.args.remove("PolicyArn");
+ s->info.args.remove("Action");
+ s->info.args.remove("Version");
+
+ op_ret = forward_iam_request_to_master(this, site, s->user->get_info(),
+ bl_post_body, parser, s->info, y);
+ if (op_ret < 0) {
+ ldpp_dout(this, 20) << "ERROR: forward_iam_request_to_master failed with error code: " << op_ret << dendl;
+ return;
+ }
+ }
+
+ op_ret = retry_raced_role_write(this, y, role.get(),
+ [this, y, &site] {
+ auto &policies = role->get_info().managed_policies;
+ auto p = policies.arns.find(policy_arn);
+ if (p == policies.arns.end()) {
+ if (!site.is_meta_master()) {
+ return 0; // delete succeeded on the master
+ }
+ s->err.message = "The requested PolicyArn is not attached to the role";
+ return -ERR_NO_SUCH_ENTITY;
+ }
+ policies.arns.erase(p);
+ return role->update(this, y);
+ });
+
+ if (op_ret == 0) {
+ s->formatter->open_object_section_in_ns("DetachRolePolicyResponse", RGW_REST_IAM_XMLNS);
+ s->formatter->open_object_section("ResponseMetadata");
+ s->formatter->dump_string("RequestId", s->trans_id);
+ s->formatter->close_section();
+ s->formatter->close_section();
+ }
+}
+
+class RGWListAttachedRolePolicies_IAM : public RGWRestRole {
+ std::string role_name;
+ std::unique_ptr<rgw::sal::RGWRole> role;
+public:
+ RGWListAttachedRolePolicies_IAM()
+ : RGWRestRole(rgw::IAM::iamListAttachedRolePolicies, RGW_CAP_WRITE)
+ {}
+ int init_processing(optional_yield y) override;
+ void execute(optional_yield y) override;
+ const char* name() const override { return "list_attached_role_policies"; }
+ RGWOpType get_type() override { return RGW_OP_LIST_ATTACHED_ROLE_POLICIES; }
+};
+
+int RGWListAttachedRolePolicies_IAM::init_processing(optional_yield y)
+{
+ // managed policy is only supported for account roles. adding them to
+ // non-account roles would give blanket permissions to all buckets
+ if (!s->auth.identity->get_account()) {
+ s->err.message = "Managed policies are only supported for account roles";
+ return -ERR_METHOD_NOT_ALLOWED;
+ }
+
+ role_name = s->info.args.get("RoleName");
+ if (!validate_iam_role_name(role_name, s->err.message)) {
+ return -EINVAL;
+ }
+
+ if (const auto& account = s->auth.identity->get_account(); account) {
+ account_id = account->id;
+ }
+ return load_role(this, y, driver, account_id, s->user->get_tenant(),
+ role_name, role, resource, s->err.message);
+}
+
+void RGWListAttachedRolePolicies_IAM::execute(optional_yield y)
+{
+ s->formatter->open_object_section_in_ns("ListAttachedRolePoliciesResponse", RGW_REST_IAM_XMLNS);
+ s->formatter->open_object_section("ResponseMetadata");
+ s->formatter->dump_string("RequestId", s->trans_id);
+ s->formatter->close_section(); // ResponseMetadata
+ s->formatter->open_object_section("ListAttachedRolePoliciesResult");
+ s->formatter->open_array_section("AttachedPolicies");
+ for (const auto& policy : role->get_info().managed_policies.arns) {
+ s->formatter->open_object_section("member");
+ std::string_view arn = policy;
+ if (auto p = arn.find('/'); p != arn.npos) {
+ s->formatter->dump_string("PolicyName", arn.substr(p + 1));
+ }
+ s->formatter->dump_string("PolicyArn", arn);
+ s->formatter->close_section(); // member
+ }
+ s->formatter->close_section(); // AttachedPolicies
+ s->formatter->close_section(); // ListAttachedRolePoliciesResult
+ s->formatter->close_section(); // ListAttachedRolePoliciesResponse
+}
+
+RGWOp* make_iam_attach_role_policy_op(const ceph::bufferlist& post_body) {
+ return new RGWAttachRolePolicy_IAM(post_body);
+}
+
+RGWOp* make_iam_detach_role_policy_op(const ceph::bufferlist& post_body) {
+ return new RGWDetachRolePolicy_IAM(post_body);
+}
+
+RGWOp* make_iam_list_attached_role_policies_op(const ceph::bufferlist& unused) {
+ return new RGWListAttachedRolePolicies_IAM();
+}
diff --git a/src/rgw/rgw_rest_role.h b/src/rgw/rgw_rest_role.h
index a93c418001f..b7c662c02c6 100644
--- a/src/rgw/rgw_rest_role.h
+++ b/src/rgw/rgw_rest_role.h
@@ -3,183 +3,210 @@
#pragma once
+#include <boost/optional.hpp>
#include "common/async/yield_context.h"
+#include "rgw_arn.h"
#include "rgw_role.h"
#include "rgw_rest.h"
class RGWRestRole : public RGWRESTOp {
-protected:
- std::string role_name;
- std::string role_path;
- std::string trust_policy;
- std::string policy_name;
- std::string perm_policy;
- std::string path_prefix;
- std::string max_session_duration;
- std::multimap<std::string,std::string> tags;
- std::vector<std::string> tagKeys;
- std::unique_ptr<rgw::sal::RGWRole> _role;
- int verify_permission(optional_yield y) override;
- int init_processing(optional_yield y) override;
- void send_response() override;
- virtual uint64_t get_op() = 0;
- int parse_tags();
-};
-
-class RGWRoleRead : public RGWRestRole {
-public:
- RGWRoleRead() = default;
+ const uint64_t action;
+ const uint32_t perm;
+ protected:
+ rgw_account_id account_id;
+ rgw::ARN resource; // must be initialized before verify_permission()
int check_caps(const RGWUserCaps& caps) override;
-};
-class RGWRoleWrite : public RGWRestRole {
-public:
- RGWRoleWrite() = default;
- int check_caps(const RGWUserCaps& caps) override;
+ RGWRestRole(uint64_t action, uint32_t perm) : action(action), perm(perm) {}
+ public:
+ int verify_permission(optional_yield y) override;
};
-class RGWCreateRole : public RGWRoleWrite {
+class RGWCreateRole : public RGWRestRole {
bufferlist bl_post_body;
+ std::string role_name;
+ std::string role_path;
+ std::string trust_policy;
+ std::string description;
+ std::string max_session_duration;
+ std::multimap<std::string, std::string> tags;
public:
- RGWCreateRole(const bufferlist& bl_post_body) : bl_post_body(bl_post_body) {};
- int verify_permission(optional_yield y) override;
+ explicit RGWCreateRole(const bufferlist& bl_post_body)
+ : RGWRestRole(rgw::IAM::iamCreateRole, RGW_CAP_WRITE),
+ bl_post_body(bl_post_body) {}
int init_processing(optional_yield y) override;
void execute(optional_yield y) override;
- int get_params();
const char* name() const override { return "create_role"; }
RGWOpType get_type() override { return RGW_OP_CREATE_ROLE; }
- uint64_t get_op() override { return rgw::IAM::iamCreateRole; }
};
-class RGWDeleteRole : public RGWRoleWrite {
+class RGWDeleteRole : public RGWRestRole {
bufferlist bl_post_body;
+ std::string role_name;
+ std::unique_ptr<rgw::sal::RGWRole> role;
public:
- RGWDeleteRole(const bufferlist& bl_post_body) : bl_post_body(bl_post_body) {};
+ explicit RGWDeleteRole(const bufferlist& bl_post_body)
+ : RGWRestRole(rgw::IAM::iamDeleteRole, RGW_CAP_WRITE),
+ bl_post_body(bl_post_body) {}
+ int init_processing(optional_yield y) override;
void execute(optional_yield y) override;
- int get_params();
const char* name() const override { return "delete_role"; }
RGWOpType get_type() override { return RGW_OP_DELETE_ROLE; }
- uint64_t get_op() override { return rgw::IAM::iamDeleteRole; }
};
-class RGWGetRole : public RGWRoleRead {
- int _verify_permission(const rgw::sal::RGWRole* role);
+class RGWGetRole : public RGWRestRole {
+ std::string role_name;
+ std::unique_ptr<rgw::sal::RGWRole> role;
public:
- RGWGetRole() = default;
- int verify_permission(optional_yield y) override;
- int init_processing(optional_yield y) override;
+ RGWGetRole() : RGWRestRole(rgw::IAM::iamGetRole, RGW_CAP_READ) {}
+ int init_processing(optional_yield y) override;
void execute(optional_yield y) override;
- int get_params();
const char* name() const override { return "get_role"; }
RGWOpType get_type() override { return RGW_OP_GET_ROLE; }
- uint64_t get_op() override { return rgw::IAM::iamGetRole; }
};
-class RGWModifyRoleTrustPolicy : public RGWRoleWrite {
+class RGWModifyRoleTrustPolicy : public RGWRestRole {
bufferlist bl_post_body;
+ std::string role_name;
+ std::string trust_policy;
+ std::unique_ptr<rgw::sal::RGWRole> role;
public:
- RGWModifyRoleTrustPolicy(const bufferlist& bl_post_body) : bl_post_body(bl_post_body) {};
+ explicit RGWModifyRoleTrustPolicy(const bufferlist& bl_post_body)
+ : RGWRestRole(rgw::IAM::iamModifyRoleTrustPolicy, RGW_CAP_WRITE),
+ bl_post_body(bl_post_body) {}
+ int init_processing(optional_yield y) override;
void execute(optional_yield y) override;
- int get_params();
const char* name() const override { return "modify_role_trust_policy"; }
RGWOpType get_type() override { return RGW_OP_MODIFY_ROLE_TRUST_POLICY; }
- uint64_t get_op() override { return rgw::IAM::iamModifyRoleTrustPolicy; }
};
-class RGWListRoles : public RGWRoleRead {
+class RGWListRoles : public RGWRestRole {
+ std::string path_prefix;
+ std::string marker;
+ int max_items = 100;
+ std::string next_marker;
public:
- RGWListRoles() = default;
- int verify_permission(optional_yield y) override;
+ RGWListRoles() : RGWRestRole(rgw::IAM::iamListRoles, RGW_CAP_READ) {}
int init_processing(optional_yield y) override;
void execute(optional_yield y) override;
- int get_params();
const char* name() const override { return "list_roles"; }
RGWOpType get_type() override { return RGW_OP_LIST_ROLES; }
- uint64_t get_op() override { return rgw::IAM::iamListRoles; }
};
-class RGWPutRolePolicy : public RGWRoleWrite {
+class RGWPutRolePolicy : public RGWRestRole {
bufferlist bl_post_body;
+ std::string role_name;
+ std::string policy_name;
+ std::string perm_policy;
+ std::unique_ptr<rgw::sal::RGWRole> role;
public:
- RGWPutRolePolicy(const bufferlist& bl_post_body) : bl_post_body(bl_post_body) {};
+ explicit RGWPutRolePolicy(const bufferlist& bl_post_body)
+ : RGWRestRole(rgw::IAM::iamPutRolePolicy, RGW_CAP_WRITE),
+ bl_post_body(bl_post_body) {}
+ int init_processing(optional_yield y) override;
void execute(optional_yield y) override;
- int get_params();
const char* name() const override { return "put_role_policy"; }
RGWOpType get_type() override { return RGW_OP_PUT_ROLE_POLICY; }
- uint64_t get_op() override { return rgw::IAM::iamPutRolePolicy; }
};
-class RGWGetRolePolicy : public RGWRoleRead {
+class RGWGetRolePolicy : public RGWRestRole {
+ std::string role_name;
+ std::string policy_name;
+ std::string perm_policy;
+ std::unique_ptr<rgw::sal::RGWRole> role;
public:
- RGWGetRolePolicy() = default;
+ RGWGetRolePolicy() : RGWRestRole(rgw::IAM::iamGetRolePolicy, RGW_CAP_READ) {}
+ int init_processing(optional_yield y) override;
void execute(optional_yield y) override;
- int get_params();
const char* name() const override { return "get_role_policy"; }
RGWOpType get_type() override { return RGW_OP_GET_ROLE_POLICY; }
- uint64_t get_op() override { return rgw::IAM::iamGetRolePolicy; }
};
-class RGWListRolePolicies : public RGWRoleRead {
+class RGWListRolePolicies : public RGWRestRole {
+ std::string role_name;
+ std::unique_ptr<rgw::sal::RGWRole> role;
public:
- RGWListRolePolicies() = default;
+ RGWListRolePolicies() : RGWRestRole(rgw::IAM::iamListRolePolicies, RGW_CAP_READ) {}
+ int init_processing(optional_yield y) override;
void execute(optional_yield y) override;
- int get_params();
const char* name() const override { return "list_role_policies"; }
RGWOpType get_type() override { return RGW_OP_LIST_ROLE_POLICIES; }
- uint64_t get_op() override { return rgw::IAM::iamListRolePolicies; }
};
-class RGWDeleteRolePolicy : public RGWRoleWrite {
+class RGWDeleteRolePolicy : public RGWRestRole {
bufferlist bl_post_body;
+ std::string role_name;
+ std::string policy_name;
+ std::unique_ptr<rgw::sal::RGWRole> role;
public:
- RGWDeleteRolePolicy(const bufferlist& bl_post_body) : bl_post_body(bl_post_body) {};
+ explicit RGWDeleteRolePolicy(const bufferlist& bl_post_body)
+ : RGWRestRole(rgw::IAM::iamDeleteRolePolicy, RGW_CAP_WRITE),
+ bl_post_body(bl_post_body) {}
+ int init_processing(optional_yield y) override;
void execute(optional_yield y) override;
- int get_params();
const char* name() const override { return "delete_role_policy"; }
RGWOpType get_type() override { return RGW_OP_DELETE_ROLE_POLICY; }
- uint64_t get_op() override { return rgw::IAM::iamDeleteRolePolicy; }
};
-class RGWTagRole : public RGWRoleWrite {
+class RGWTagRole : public RGWRestRole {
bufferlist bl_post_body;
+ std::string role_name;
+ std::multimap<std::string, std::string> tags;
+ std::unique_ptr<rgw::sal::RGWRole> role;
public:
- RGWTagRole(const bufferlist& bl_post_body) : bl_post_body(bl_post_body) {};
+ explicit RGWTagRole(const bufferlist& bl_post_body)
+ : RGWRestRole(rgw::IAM::iamTagRole, RGW_CAP_WRITE),
+ bl_post_body(bl_post_body) {}
+ int init_processing(optional_yield y) override;
void execute(optional_yield y) override;
- int get_params();
const char* name() const override { return "tag_role"; }
RGWOpType get_type() override { return RGW_OP_TAG_ROLE; }
- uint64_t get_op() override { return rgw::IAM::iamTagRole; }
};
-class RGWListRoleTags : public RGWRoleRead {
+class RGWListRoleTags : public RGWRestRole {
+ std::string role_name;
+ std::multimap<std::string, std::string> tags;
+ std::unique_ptr<rgw::sal::RGWRole> role;
public:
- RGWListRoleTags() = default;
+ RGWListRoleTags() : RGWRestRole(rgw::IAM::iamListRoleTags, RGW_CAP_READ) {}
+ int init_processing(optional_yield y) override;
void execute(optional_yield y) override;
- int get_params();
const char* name() const override { return "list_role_tags"; }
RGWOpType get_type() override { return RGW_OP_LIST_ROLE_TAGS; }
- uint64_t get_op() override { return rgw::IAM::iamListRoleTags; }
};
-class RGWUntagRole : public RGWRoleWrite {
+class RGWUntagRole : public RGWRestRole {
bufferlist bl_post_body;
+ std::string role_name;
+ std::vector<std::string> untag;
+ std::unique_ptr<rgw::sal::RGWRole> role;
public:
- RGWUntagRole(const bufferlist& bl_post_body) : bl_post_body(bl_post_body) {};
+ explicit RGWUntagRole(const bufferlist& bl_post_body)
+ : RGWRestRole(rgw::IAM::iamUntagRole, RGW_CAP_WRITE),
+ bl_post_body(bl_post_body) {}
+ int init_processing(optional_yield y) override;
void execute(optional_yield y) override;
- int get_params();
const char* name() const override { return "untag_role"; }
RGWOpType get_type() override { return RGW_OP_UNTAG_ROLE; }
- uint64_t get_op() override { return rgw::IAM::iamUntagRole; }
};
-class RGWUpdateRole : public RGWRoleWrite {
+class RGWUpdateRole : public RGWRestRole {
bufferlist bl_post_body;
+ std::string role_name;
+ boost::optional<std::string> description;
+ std::string max_session_duration;
+ std::unique_ptr<rgw::sal::RGWRole> role;
public:
- RGWUpdateRole(const bufferlist& bl_post_body) : bl_post_body(bl_post_body) {};
+ explicit RGWUpdateRole(const bufferlist& bl_post_body)
+ : RGWRestRole(rgw::IAM::iamUpdateRole, RGW_CAP_WRITE),
+ bl_post_body(bl_post_body) {}
+ int init_processing(optional_yield y) override;
void execute(optional_yield y) override;
- int get_params();
const char* name() const override { return "update_role"; }
RGWOpType get_type() override { return RGW_OP_UPDATE_ROLE; }
- uint64_t get_op() override { return rgw::IAM::iamUpdateRole; }
-}; \ No newline at end of file
+};
+
+RGWOp* make_iam_attach_role_policy_op(const ceph::bufferlist& post_body);
+RGWOp* make_iam_detach_role_policy_op(const ceph::bufferlist& post_body);
+RGWOp* make_iam_list_attached_role_policies_op(const ceph::bufferlist& unused);
diff --git a/src/rgw/rgw_rest_s3.cc b/src/rgw/rgw_rest_s3.cc
index 3389f59d42d..a9a6c3699d0 100644
--- a/src/rgw/rgw_rest_s3.cc
+++ b/src/rgw/rgw_rest_s3.cc
@@ -1820,7 +1820,7 @@ void RGWListBucket_ObjStore_S3::send_versioned_response()
auto& storage_class = rgw_placement_rule::get_canonical_storage_class(iter->meta.storage_class);
s->formatter->dump_string("StorageClass", storage_class.c_str());
}
- dump_owner(s, rgw_user(iter->meta.owner), iter->meta.owner_display_name);
+ dump_owner(s, iter->meta.owner, iter->meta.owner_display_name);
if (iter->meta.appendable) {
s->formatter->dump_string("Type", "Appendable");
} else {
@@ -1911,7 +1911,7 @@ void RGWListBucket_ObjStore_S3::send_response()
s->formatter->dump_int("Size", iter->meta.accounted_size);
auto& storage_class = rgw_placement_rule::get_canonical_storage_class(iter->meta.storage_class);
s->formatter->dump_string("StorageClass", storage_class.c_str());
- dump_owner(s, rgw_user(iter->meta.owner), iter->meta.owner_display_name);
+ dump_owner(s, iter->meta.owner, iter->meta.owner_display_name);
if (s->system_request) {
s->formatter->dump_string("RgwxTag", iter->tag);
}
@@ -1988,7 +1988,7 @@ void RGWListBucket_ObjStore_S3v2::send_versioned_response()
s->formatter->dump_string("StorageClass", storage_class.c_str());
}
if (fetchOwner == true) {
- dump_owner(s, rgw_user(iter->meta.owner), iter->meta.owner_display_name);
+ dump_owner(s, iter->meta.owner, iter->meta.owner_display_name);
}
s->formatter->close_section();
}
@@ -2056,7 +2056,7 @@ void RGWListBucket_ObjStore_S3v2::send_response()
auto& storage_class = rgw_placement_rule::get_canonical_storage_class(iter->meta.storage_class);
s->formatter->dump_string("StorageClass", storage_class.c_str());
if (fetchOwner == true) {
- dump_owner(s, rgw_user(iter->meta.owner), iter->meta.owner_display_name);
+ dump_owner(s, iter->meta.owner, iter->meta.owner_display_name);
}
if (s->system_request) {
s->formatter->dump_string("RgwxTag", iter->tag);
@@ -2349,9 +2349,9 @@ static void dump_bucket_metadata(req_state *s, rgw::sal::Bucket* bucket,
dump_header(s, "X-RGW-Bytes-Used", static_cast<long long>(stats.size));
// only bucket's owner is allowed to get the quota settings of the account
- if (bucket->get_owner() == s->user->get_id()) {
- auto user_info = s->user->get_info();
- auto bucket_quota = s->bucket->get_info().quota; // bucket quota
+ if (s->auth.identity->is_owner_of(bucket->get_owner())) {
+ const auto& user_info = s->user->get_info();
+ const auto& bucket_quota = s->bucket->get_info().quota; // bucket quota
dump_header(s, "X-RGW-Quota-User-Size", static_cast<long long>(user_info.quota.user_quota.max_size));
dump_header(s, "X-RGW-Quota-User-Objects", static_cast<long long>(user_info.quota.user_quota.max_objects));
dump_header(s, "X-RGW-Quota-Max-Buckets", static_cast<long long>(user_info.max_buckets));
@@ -2381,7 +2381,7 @@ static int create_s3_policy(req_state *s, rgw::sal::Driver* driver,
if (!s->canned_acl.empty())
return -ERR_INVALID_REQUEST;
- return rgw::s3::create_policy_from_headers(s, driver, owner,
+ return rgw::s3::create_policy_from_headers(s, s->yield, driver, owner,
*s->info.env, policy);
}
@@ -3148,9 +3148,6 @@ int RGWPostObj_ObjStore_S3::get_policy(optional_yield y)
if (ret != 0) {
return -EACCES;
} else {
- /* Populate the owner info. */
- s->owner.id = s->user->get_id();
- s->owner.display_name = s->user->get_display_name();
ldpp_dout(this, 20) << "Successful Signature Verification!" << dendl;
}
@@ -4918,14 +4915,13 @@ int RGWHandler_REST_S3::postauth_init(optional_yield y)
{
struct req_init_state *t = &s->init_state;
- int ret = rgw_parse_url_bucket(t->url_bucket, s->user->get_tenant(),
+ const std::string& auth_tenant = s->auth.identity->get_tenant();
+
+ int ret = rgw_parse_url_bucket(t->url_bucket, auth_tenant,
s->bucket_tenant, s->bucket_name);
if (ret) {
return ret;
}
- if (s->auth.identity->get_identity_type() == TYPE_ROLE) {
- s->bucket_tenant = s->auth.identity->get_role_tenant();
- }
ldpp_dout(s, 10) << "s->object=" << s->object
<< " s->bucket=" << rgw_make_bucket_entry_name(s->bucket_tenant, s->bucket_name) << dendl;
@@ -4940,12 +4936,6 @@ int RGWHandler_REST_S3::postauth_init(optional_yield y)
}
if (!t->src_bucket.empty()) {
- string auth_tenant;
- if (s->auth.identity->get_identity_type() == TYPE_ROLE) {
- auth_tenant = s->auth.identity->get_role_tenant();
- } else {
- auth_tenant = s->user->get_tenant();
- }
ret = rgw_parse_url_bucket(t->src_bucket, auth_tenant,
s->src_tenant_name, s->src_bucket_name);
if (ret) {
@@ -5087,13 +5077,7 @@ int RGW_Auth_S3::authorize(const DoutPrefixProvider *dpp,
return -EPERM;
}
- const auto ret = rgw::auth::Strategy::apply(dpp, auth_registry.get_s3_main(), s, y);
- if (ret == 0) {
- /* Populate the owner info. */
- s->owner.id = s->user->get_id();
- s->owner.display_name = s->user->get_display_name();
- }
- return ret;
+ return rgw::auth::Strategy::apply(dpp, auth_registry.get_s3_main(), s, y);
}
int RGWHandler_Auth_S3::init(rgw::sal::Driver* driver, req_state *state,
@@ -6307,6 +6291,14 @@ rgw::auth::s3::LocalEngine::authenticate(
}
}*/
+ std::optional<RGWAccountInfo> account;
+ std::vector<IAM::Policy> policies;
+ int ret = load_account_and_policies(dpp, y, driver, user->get_info(),
+ user->get_attrs(), account, policies);
+ if (ret < 0) {
+ return result_t::deny(-EPERM);
+ }
+
const auto iter = user->get_info().access_keys.find(access_key_id);
if (iter == std::end(user->get_info().access_keys)) {
ldpp_dout(dpp, 0) << "ERROR: access key not encoded in user info" << dendl;
@@ -6316,8 +6308,9 @@ rgw::auth::s3::LocalEngine::authenticate(
/* Ignore signature for HTTP OPTIONS */
if (s->op_type == RGW_OP_OPTIONS_CORS) {
- auto apl = apl_factory->create_apl_local(cct, s, user->get_info(),
- k.subuser, std::nullopt, access_key_id);
+ auto apl = apl_factory->create_apl_local(
+ cct, s, user->get_info(), std::move(account), std::move(policies),
+ k.subuser, std::nullopt, access_key_id);
return result_t::grant(std::move(apl), completer_factory(k.key));
}
@@ -6336,8 +6329,9 @@ rgw::auth::s3::LocalEngine::authenticate(
return result_t::reject(-ERR_SIGNATURE_NO_MATCH);
}
- auto apl = apl_factory->create_apl_local(cct, s, user->get_info(),
- k.subuser, std::nullopt, access_key_id);
+ auto apl = apl_factory->create_apl_local(
+ cct, s, user->get_info(), std::move(account), std::move(policies),
+ k.subuser, std::nullopt, access_key_id);
return result_t::grant(std::move(apl), completer_factory(k.key));
}
@@ -6471,7 +6465,6 @@ rgw::auth::s3::STSEngine::authenticate(
}
// Get all the authorization info
- std::unique_ptr<rgw::sal::User> user;
rgw_user user_id;
string role_id;
rgw::auth::RoleApplier::Role r;
@@ -6483,24 +6476,29 @@ rgw::auth::s3::STSEngine::authenticate(
}
r.id = token.roleId;
r.name = role->get_name();
+ r.path = role->get_path();
r.tenant = role->get_tenant();
- vector<string> role_policy_names = role->get_role_policy_names();
- for (auto& policy_name : role_policy_names) {
- string perm_policy;
- if (int ret = role->get_role_policy(dpp, policy_name, perm_policy); ret == 0) {
- r.role_policies.push_back(std::move(perm_policy));
+ const auto& account_id = role->get_account_id();
+ if (!account_id.empty()) {
+ r.account.emplace();
+ rgw::sal::Attrs attrs; // ignored
+ RGWObjVersionTracker objv; // ignored
+ int ret = driver->load_account_by_id(dpp, y, account_id,
+ *r.account, attrs, objv);
+ if (ret < 0) {
+ ldpp_dout(dpp, 1) << "ERROR: failed to load account "
+ << account_id << " for role " << r.name
+ << ": " << cpp_strerror(ret) << dendl;
+ return result_t::deny(-EPERM);
}
}
- }
- user = driver->get_user(token.user);
- if (! token.user.empty() && token.acct_type != TYPE_ROLE) {
- // get user info
- int ret = user->load_user(dpp, y);
- if (ret < 0) {
- ldpp_dout(dpp, 5) << "ERROR: failed reading user info: uid=" << token.user << dendl;
- return result_t::reject(-EPERM);
+ for (auto& [name, policy] : role->get_info().perm_policy_map) {
+ r.inline_policies.push_back(std::move(policy));
+ }
+ for (auto& arn : role->get_info().managed_policies.arns) {
+ r.managed_policies.push_back(std::move(arn));
}
}
@@ -6515,11 +6513,34 @@ rgw::auth::s3::STSEngine::authenticate(
t_attrs.token_claims = std::move(token.token_claims);
t_attrs.token_issued_at = std::move(token.issued_at);
t_attrs.principal_tags = std::move(token.principal_tags);
- auto apl = role_apl_factory->create_apl_role(cct, s, r, t_attrs);
+ auto apl = role_apl_factory->create_apl_role(cct, s, std::move(r),
+ std::move(t_attrs));
return result_t::grant(std::move(apl), completer_factory(token.secret_access_key));
- } else { // This is for all local users of type TYPE_RGW or TYPE_NONE
+ } else { // This is for all local users of type TYPE_RGW|ROOT|NONE
+ if (token.user.empty()) {
+ ldpp_dout(dpp, 5) << "ERROR: got session token with empty user id" << dendl;
+ return result_t::reject(-EPERM);
+ }
+ // load user info
+ auto user = driver->get_user(token.user);
+ int ret = user->load_user(dpp, y);
+ if (ret < 0) {
+ ldpp_dout(dpp, 5) << "ERROR: failed reading user info: uid=" << token.user << dendl;
+ return result_t::reject(-EPERM);
+ }
+
+ std::optional<RGWAccountInfo> account;
+ std::vector<IAM::Policy> policies;
+ ret = load_account_and_policies(dpp, y, driver, user->get_info(),
+ user->get_attrs(), account, policies);
+ if (ret < 0) {
+ return result_t::deny(-EPERM);
+ }
+
string subuser;
- auto apl = local_apl_factory->create_apl_local(cct, s, user->get_info(), subuser, token.perm_mask, std::string(_access_key_id));
+ auto apl = local_apl_factory->create_apl_local(
+ cct, s, user->get_info(), std::move(account), std::move(policies),
+ subuser, token.perm_mask, std::string(_access_key_id));
return result_t::grant(std::move(apl), completer_factory(token.secret_access_key));
}
}
diff --git a/src/rgw/rgw_rest_sts.cc b/src/rgw/rgw_rest_sts.cc
index d94181f4e63..b9c23aa159c 100644
--- a/src/rgw/rgw_rest_sts.cc
+++ b/src/rgw/rgw_rest_sts.cc
@@ -21,6 +21,7 @@
#include "common/ceph_json.h"
#include "rgw_rest.h"
+#include "rgw_account.h"
#include "rgw_auth.h"
#include "rgw_auth_registry.h"
#include "jwt-cpp/jwt.h"
@@ -79,8 +80,9 @@ WebTokenEngine::get_role_name(const string& role_arn) const
return role_name;
}
-std::unique_ptr<rgw::sal::RGWOIDCProvider>
-WebTokenEngine::get_provider(const DoutPrefixProvider *dpp, const string& role_arn, const string& iss, optional_yield y) const
+int WebTokenEngine::load_provider(const DoutPrefixProvider* dpp, optional_yield y,
+ const string& role_arn, const string& iss,
+ RGWOIDCProviderInfo& info) const
{
string tenant = get_role_tenant(role_arn);
@@ -99,16 +101,8 @@ WebTokenEngine::get_provider(const DoutPrefixProvider *dpp, const string& role_a
} else {
idp_url.erase(pos, 7);
}
- auto provider_arn = rgw::ARN(idp_url, "oidc-provider", tenant);
- string p_arn = provider_arn.to_string();
- std::unique_ptr<rgw::sal::RGWOIDCProvider> provider = driver->get_oidc_provider();
- provider->set_arn(p_arn);
- provider->set_tenant(tenant);
- auto ret = provider->get(dpp, y);
- if (ret < 0) {
- return nullptr;
- }
- return provider;
+
+ return driver->load_oidc_provider(dpp, y, tenant, idp_url, info);
}
bool
@@ -248,8 +242,9 @@ WebTokenEngine::get_from_jwt(const DoutPrefixProvider* dpp, const std::string& t
}
string role_arn = s->info.args.get("RoleArn");
- auto provider = get_provider(dpp, role_arn, iss, y);
- if (! provider) {
+ RGWOIDCProviderInfo provider;
+ int r = load_provider(dpp, y, role_arn, iss, provider);
+ if (r < 0) {
ldpp_dout(dpp, 0) << "Couldn't get oidc provider info using input iss" << iss << dendl;
throw -EACCES;
}
@@ -265,17 +260,15 @@ WebTokenEngine::get_from_jwt(const DoutPrefixProvider* dpp, const std::string& t
throw -EINVAL;
}
}
- vector<string> client_ids = provider->get_client_ids();
- vector<string> thumbprints = provider->get_thumbprints();
- if (! client_ids.empty()) {
+ if (! provider.client_ids.empty()) {
bool found = false;
for (auto& it : aud) {
- if (is_client_id_valid(client_ids, it)) {
+ if (is_client_id_valid(provider.client_ids, it)) {
found = true;
break;
}
}
- if (! found && ! is_client_id_valid(client_ids, client_id) && ! is_client_id_valid(client_ids, azp)) {
+ if (! found && ! is_client_id_valid(provider.client_ids, client_id) && ! is_client_id_valid(provider.client_ids, azp)) {
ldpp_dout(dpp, 0) << "Client id in token doesn't match with that registered with oidc provider" << dendl;
throw -EACCES;
}
@@ -284,7 +277,7 @@ WebTokenEngine::get_from_jwt(const DoutPrefixProvider* dpp, const std::string& t
if (decoded.has_algorithm()) {
auto& algorithm = decoded.get_algorithm();
try {
- validate_signature(dpp, decoded, algorithm, iss, thumbprints, y);
+ validate_signature(dpp, decoded, algorithm, iss, provider.thumbprints, y);
} catch (...) {
throw -EACCES;
}
@@ -496,14 +489,37 @@ WebTokenEngine::authenticate( const DoutPrefixProvider* dpp,
string role_arn = s->info.args.get("RoleArn");
string role_tenant = get_role_tenant(role_arn);
string role_name = get_role_name(role_arn);
- std::unique_ptr<rgw::sal::RGWRole> role = driver->get_role(role_name, role_tenant);
+
+ rgw_account_id role_account;
+ if (rgw::account::validate_id(role_tenant)) {
+ role_account = std::move(role_tenant);
+ role_tenant.clear();
+ }
+
+ std::unique_ptr<rgw::sal::RGWRole> role = driver->get_role(role_name, role_tenant, role_account);
int ret = role->get(dpp, y);
if (ret < 0) {
ldpp_dout(dpp, 0) << "Role not found: name:" << role_name << " tenant: " << role_tenant << dendl;
return result_t::deny(-EACCES);
}
+
+ std::optional<RGWAccountInfo> account;
+ if (!role_account.empty()) {
+ account.emplace();
+ rgw::sal::Attrs attrs; // ignored
+ RGWObjVersionTracker objv; // ignored
+ ret = driver->load_account_by_id(dpp, y, role_account,
+ *account, attrs, objv);
+ if (ret < 0) {
+ ldpp_dout(dpp, 0) << "Role account " << role_account << " not found" << dendl;
+ return result_t::deny(-EACCES);
+ }
+ }
+
boost::optional<multimap<string,string>> role_tags = role->get_tags();
- auto apl = apl_factory->create_apl_web_identity(cct, s, role_session, role_tenant, *t, role_tags, princ_tags);
+ auto apl = apl_factory->create_apl_web_identity(
+ cct, s, role->get_id(), role_session, role_tenant,
+ *t, role_tags, princ_tags, std::move(account));
return result_t::grant(std::move(apl));
}
return result_t::deny(-EACCES);
@@ -527,12 +543,14 @@ int RGWREST_STS::verify_permission(optional_yield y)
return ret;
}
string policy = role->get_assume_role_policy();
- buffer::list bl = buffer::list::static_from_string(policy);
//Parse the policy
//TODO - This step should be part of Role Creation
try {
- const rgw::IAM::Policy p(s->cct, s->user->get_tenant(), bl, false);
+ // resource policy is not restricted to the current tenant
+ const std::string* policy_tenant = nullptr;
+
+ const rgw::IAM::Policy p(s->cct, policy_tenant, policy, false);
if (!s->principal_tags.empty()) {
auto res = p.eval(s->env, *s->auth.identity, rgw::IAM::stsTagSession, boost::none);
if (res != rgw::IAM::Effect::Allow) {
@@ -621,7 +639,7 @@ void RGWSTSGetSessionToken::execute(optional_yield y)
op_ret = std::move(ret);
//Dump the output
if (op_ret == 0) {
- s->formatter->open_object_section("GetSessionTokenResponse");
+ s->formatter->open_object_section_in_ns("GetSessionTokenResponse", RGW_REST_STS_XMLNS);
s->formatter->open_object_section("GetSessionTokenResult");
s->formatter->open_object_section("Credentials");
creds.dump(s->formatter);
@@ -648,10 +666,9 @@ int RGWSTSAssumeRoleWithWebIdentity::get_params()
}
if (! policy.empty()) {
- bufferlist bl = bufferlist::static_from_string(policy);
try {
const rgw::IAM::Policy p(
- s->cct, s->user->get_tenant(), bl,
+ s->cct, nullptr, policy,
s->cct->_conf.get_val<bool>("rgw_policy_reject_invalid_principals"));
}
catch (rgw::IAM::PolicyParseException& e) {
@@ -677,7 +694,7 @@ void RGWSTSAssumeRoleWithWebIdentity::execute(optional_yield y)
//Dump the output
if (op_ret == 0) {
- s->formatter->open_object_section("AssumeRoleWithWebIdentityResponse");
+ s->formatter->open_object_section_in_ns("AssumeRoleWithWebIdentityResponse", RGW_REST_STS_XMLNS);
s->formatter->open_object_section("AssumeRoleWithWebIdentityResult");
encode_json("SubjectFromWebIdentityToken", response.sub , s->formatter);
encode_json("Audience", response.aud , s->formatter);
@@ -710,10 +727,9 @@ int RGWSTSAssumeRole::get_params()
}
if (! policy.empty()) {
- bufferlist bl = bufferlist::static_from_string(policy);
try {
const rgw::IAM::Policy p(
- s->cct, s->user->get_tenant(), bl,
+ s->cct, nullptr, policy,
s->cct->_conf.get_val<bool>("rgw_policy_reject_invalid_principals"));
}
catch (rgw::IAM::PolicyParseException& e) {
@@ -738,7 +754,7 @@ void RGWSTSAssumeRole::execute(optional_yield y)
op_ret = std::move(response.retCode);
//Dump the output
if (op_ret == 0) {
- s->formatter->open_object_section("AssumeRoleResponse");
+ s->formatter->open_object_section_in_ns("AssumeRoleResponse", RGW_REST_STS_XMLNS);
s->formatter->open_object_section("AssumeRoleResult");
s->formatter->open_object_section("Credentials");
response.creds.dump(s->formatter);
diff --git a/src/rgw/rgw_rest_sts.h b/src/rgw/rgw_rest_sts.h
index 91b9e98d303..43224400920 100644
--- a/src/rgw/rgw_rest_sts.h
+++ b/src/rgw/rgw_rest_sts.h
@@ -40,7 +40,9 @@ class WebTokenEngine : public rgw::auth::Engine {
bool is_cert_valid(const std::vector<std::string>& thumbprints, const std::string& cert) const;
- std::unique_ptr<rgw::sal::RGWOIDCProvider> get_provider(const DoutPrefixProvider *dpp, const std::string& role_arn, const std::string& iss, optional_yield y) const;
+ int load_provider(const DoutPrefixProvider *dpp, optional_yield y,
+ const std::string& role_arn, const std::string& iss,
+ RGWOIDCProviderInfo& info) const;
std::string get_role_tenant(const std::string& role_arn) const;
@@ -99,13 +101,17 @@ class DefaultStrategy : public rgw::auth::Strategy,
aplptr_t create_apl_web_identity( CephContext* cct,
const req_state* s,
+ const std::string& role_id,
const std::string& role_session,
const std::string& role_tenant,
const std::unordered_multimap<std::string, std::string>& token,
boost::optional<std::multimap<std::string, std::string>> role_tags,
- boost::optional<std::set<std::pair<std::string, std::string>>> principal_tags) const override {
+ boost::optional<std::set<std::pair<std::string, std::string>>> principal_tags,
+ std::optional<RGWAccountInfo> account) const override {
auto apl = rgw::auth::add_sysreq(cct, driver, s,
- rgw::auth::WebIdentityApplier(cct, driver, role_session, role_tenant, token, role_tags, principal_tags));
+ rgw::auth::WebIdentityApplier(cct, driver, role_id, role_session,
+ role_tenant, token, role_tags,
+ principal_tags, std::move(account)));
return aplptr_t(new decltype(apl)(std::move(apl)));
}
diff --git a/src/rgw/rgw_rest_swift.cc b/src/rgw/rgw_rest_swift.cc
index dd8d5a47321..bde5925dfd1 100644
--- a/src/rgw/rgw_rest_swift.cc
+++ b/src/rgw/rgw_rest_swift.cc
@@ -709,8 +709,7 @@ static int get_swift_container_settings(req_state * const s,
if (read_list || write_list) {
int r = rgw::swift::create_container_policy(s, driver,
- s->user->get_id(),
- s->user->get_display_name(),
+ s->owner,
read_list,
write_list,
*rw_mask,
@@ -823,7 +822,7 @@ int RGWCreateBucket_ObjStore_SWIFT::get_params(optional_yield y)
}
if (!has_policy) {
- policy.create_default(s->user->get_id(), s->user->get_display_name());
+ policy.create_default(s->owner.id, s->owner.display_name);
}
location_constraint = driver->get_zone()->get_zonegroup().get_api_name();
@@ -1048,7 +1047,7 @@ int RGWPutObj_ObjStore_SWIFT::get_params(optional_yield y)
}
}
- policy.create_default(s->user->get_id(), s->user->get_display_name());
+ policy.create_default(s->owner.id, s->owner.display_name);
int r = get_delete_at_param(s, delete_at);
if (r < 0) {
@@ -1167,9 +1166,7 @@ static int get_swift_account_settings(req_state * const s,
const char * const acl_attr = s->info.env->get("HTTP_X_ACCOUNT_ACCESS_CONTROL");
if (acl_attr) {
- int r = rgw::swift::create_account_policy(s, driver,
- s->user->get_id(),
- s->user->get_display_name(),
+ int r = rgw::swift::create_account_policy(s, driver, s->owner,
acl_attr, policy);
if (r < 0) {
return r;
@@ -1477,7 +1474,7 @@ static void dump_object_metadata(const DoutPrefixProvider* dpp, req_state * cons
int RGWCopyObj_ObjStore_SWIFT::init_dest_policy()
{
- dest_policy.create_default(s->user->get_id(), s->user->get_display_name());
+ dest_policy.create_default(s->owner.id, s->owner.display_name);
return 0;
}
@@ -2140,8 +2137,13 @@ bool RGWFormPost::is_integral()
bool r = false;
try {
- get_owner_info(s, s->user->get_info());
- s->auth.identity = rgw::auth::transform_old_authinfo(s);
+ s->user = get_owner_info(s);
+ auto result = rgw::auth::transform_old_authinfo(
+ this, s->yield, driver, s->user.get());
+ if (!result) {
+ return false;
+ }
+ s->auth.identity = std::move(result).value();
} catch (...) {
ldpp_dout(this, 5) << "cannot get user_info of account's owner" << dendl;
return false;
@@ -2182,8 +2184,8 @@ bool RGWFormPost::is_integral()
return r;
}
-void RGWFormPost::get_owner_info(const req_state* const s,
- RGWUserInfo& owner_info) const
+auto RGWFormPost::get_owner_info(const req_state* const s) const
+ -> std::unique_ptr<rgw::sal::User>
{
/* We cannot use req_state::bucket_name because it isn't available
* now. It will be initialized in RGWHandler_REST_SWIFT::postauth_init(). */
@@ -2230,15 +2232,22 @@ void RGWFormPost::get_owner_info(const req_state* const s,
throw ret;
}
- ldpp_dout(this, 20) << "temp url user (bucket owner): " << bucket->get_info().owner
- << dendl;
+ const rgw_owner& owner = bucket->get_owner();
+ const rgw_user* uid = std::get_if<rgw_user>(&owner);
+ if (!uid) {
+ ldpp_dout(this, 20) << "bucket " << *bucket << " is not owned by a user "
+ "so has no temp url keys" << dendl;
+ throw -EPERM;
+ }
+
+ ldpp_dout(this, 20) << "temp url user (bucket owner): " << *uid << dendl;
- user = driver->get_user(bucket->get_info().owner);
+ user = driver->get_user(*uid);
if (user->load_user(s, s->yield) < 0) {
throw -EPERM;
}
- owner_info = user->get_info();
+ return user;
}
int RGWFormPost::get_params(optional_yield y)
@@ -2249,7 +2258,7 @@ int RGWFormPost::get_params(optional_yield y)
return ret;
}
- policy.create_default(s->user->get_id(), s->user->get_display_name());
+ policy.create_default(s->owner.id, s->owner.display_name);
/* Let's start parsing the HTTP body by parsing each form part step-
* by-step till encountering the first part with file data. */
@@ -2398,6 +2407,16 @@ int RGWFormPost::get_data(ceph::bufferlist& bl, bool& again)
return bl.length();
}
+// override error_handler() to map error messages from abort_early(), which
+// doesn't end up calling our send_response()
+int RGWFormPost::error_handler(int err_no, std::string *error_content, optional_yield y)
+{
+ if (!err_msg.empty()) {
+ *error_content = err_msg;
+ }
+ return err_no;
+}
+
void RGWFormPost::send_response()
{
std::string redirect = get_part_str(ctrl_parts, "redirect");
@@ -2931,7 +2950,7 @@ int RGWHandler_REST_SWIFT::postauth_init(optional_yield y)
&& s->user->get_id().id == RGW_USER_ANON_ID) {
s->bucket_tenant = s->account_name;
} else {
- s->bucket_tenant = s->user->get_tenant();
+ s->bucket_tenant = s->auth.identity->get_tenant();
}
s->bucket_name = t->url_bucket;
diff --git a/src/rgw/rgw_rest_swift.h b/src/rgw/rgw_rest_swift.h
index 154a069d73f..eb1c4422e34 100644
--- a/src/rgw/rgw_rest_swift.h
+++ b/src/rgw/rgw_rest_swift.h
@@ -254,8 +254,7 @@ class RGWFormPost : public RGWPostObj_ObjStore {
bool is_next_file_to_upload() override;
bool is_integral();
bool is_non_expired();
- void get_owner_info(const req_state* s,
- RGWUserInfo& owner_info) const;
+ std::unique_ptr<rgw::sal::User> get_owner_info(const req_state* s) const;
parts_collection_t ctrl_parts;
boost::optional<post_form_part> current_data_part;
@@ -275,6 +274,7 @@ public:
int get_params(optional_yield y) override;
int get_data(ceph::bufferlist& bl, bool& again) override;
+ int error_handler(int err_no, std::string *error_content, optional_yield y) override;
void send_response() override;
static bool is_formpost_req(req_state* const s);
diff --git a/src/rgw/rgw_rest_user_policy.cc b/src/rgw/rgw_rest_user_policy.cc
index 4103e4aff77..3832b9a4993 100644
--- a/src/rgw/rgw_rest_user_policy.cc
+++ b/src/rgw/rgw_rest_user_policy.cc
@@ -12,21 +12,20 @@
#include "rgw_string.h"
#include "rgw_common.h"
+#include "rgw_iam_managed_policy.h"
#include "rgw_op.h"
#include "rgw_process_env.h"
#include "rgw_rest.h"
+#include "rgw_rest_iam.h"
#include "rgw_rest_user_policy.h"
#include "rgw_sal.h"
#include "services/svc_zone.h"
#define dout_subsys ceph_subsys_rgw
-
-void RGWRestUserPolicy::dump(Formatter *f) const
+RGWRestUserPolicy::RGWRestUserPolicy(uint64_t action, uint32_t perm)
+ : action(action), perm(perm)
{
- encode_json("PolicyName", policy_name , f);
- encode_json("UserName", user_name , f);
- encode_json("PolicyDocument", policy, f);
}
void RGWRestUserPolicy::send_response()
@@ -38,149 +37,186 @@ void RGWRestUserPolicy::send_response()
end_header(s);
}
-int RGWRestUserPolicy::verify_permission(optional_yield y)
+int RGWRestUserPolicy::get_params()
{
- if (s->auth.identity->is_anonymous()) {
- return -EACCES;
- }
-
- if(int ret = check_caps(s->user->get_caps()); ret == 0) {
- return ret;
- }
-
- uint64_t op = get_op();
- std::string user_name = s->info.args.get("UserName");
- rgw_user user_id(user_name);
- if (! verify_user_permission(this, s, rgw::ARN(rgw::ARN(user_id.id,
- "user",
- user_id.tenant)), op)) {
- return -EACCES;
+ user_name = s->info.args.get("UserName");
+ if (!validate_iam_user_name(user_name, s->err.message)) {
+ return -EINVAL;
}
return 0;
}
-bool RGWRestUserPolicy::validate_input()
+int RGWRestUserPolicy::init_processing(optional_yield y)
{
- if (policy_name.length() > MAX_POLICY_NAME_LEN) {
- ldpp_dout(this, 0) << "ERROR: Invalid policy name length " << dendl;
- return false;
+ int r = get_params();
+ if (r < 0) {
+ return r;
}
- std::regex regex_policy_name("[A-Za-z0-9:=,.@-]+");
- if (! std::regex_match(policy_name, regex_policy_name)) {
- ldpp_dout(this, 0) << "ERROR: Invalid chars in policy name " << dendl;
- return false;
+ if (const auto* id = std::get_if<rgw_account_id>(&s->owner.id); id) {
+ account_id = *id;
+
+ // look up account user by UserName
+ const std::string& tenant = s->auth.identity->get_tenant();
+ r = driver->load_account_user_by_name(this, y, account_id,
+ tenant, user_name, &user);
+
+ if (r == -ENOENT) {
+ s->err.message = "No such UserName in the account";
+ return -ERR_NO_SUCH_ENTITY;
+ }
+ if (r >= 0) {
+ // user ARN includes account id, path, and display name
+ const RGWUserInfo& info = user->get_info();
+ const std::string resource = string_cat_reserve(info.path, info.display_name);
+ user_arn = rgw::ARN{resource, "user", account_id, true};
+ }
+ } else {
+ // interpret UserName as a uid with optional tenant
+ const auto uid = rgw_user{user_name};
+ // user ARN includes tenant and user id
+ user_arn = rgw::ARN{uid.id, "user", uid.tenant};
+
+ user = driver->get_user(uid);
+ r = user->load_user(this, y);
+ if (r == -ENOENT) {
+ s->err.message = "No such UserName in the tenant";
+ return -ERR_NO_SUCH_ENTITY;
+ }
}
- return true;
+ return r;
}
-int RGWUserPolicyRead::check_caps(const RGWUserCaps& caps)
+int RGWRestUserPolicy::check_caps(const RGWUserCaps& caps)
{
- return caps.check_cap("user-policy", RGW_CAP_READ);
+ return caps.check_cap("user-policy", perm);
}
-int RGWUserPolicyWrite::check_caps(const RGWUserCaps& caps)
+int RGWRestUserPolicy::verify_permission(optional_yield y)
{
- return caps.check_cap("user-policy", RGW_CAP_WRITE);
+ if (s->auth.identity->is_anonymous()) {
+ return -EACCES;
+ }
+
+ // admin caps are required for non-account users
+ if (check_caps(s->user->get_caps()) == 0) {
+ return 0;
+ }
+
+ if (! verify_user_permission(this, s, user_arn, action)) {
+ return -EACCES;
+ }
+ return 0;
}
-uint64_t RGWPutUserPolicy::get_op()
+
+RGWPutUserPolicy::RGWPutUserPolicy(const ceph::bufferlist& post_body)
+ : RGWRestUserPolicy(rgw::IAM::iamPutUserPolicy, RGW_CAP_WRITE),
+ post_body(post_body)
{
- return rgw::IAM::iamPutUserPolicy;
}
int RGWPutUserPolicy::get_params()
{
policy_name = s->info.args.get("PolicyName");
- user_name = s->info.args.get("UserName");
- policy = s->info.args.get("PolicyDocument");
-
- if (policy_name.empty() || user_name.empty() || policy.empty()) {
- ldpp_dout(this, 20) << "ERROR: one of policy name, user name or policy document is empty"
- << dendl;
+ if (!validate_iam_policy_name(policy_name, s->err.message)) {
return -EINVAL;
}
- if (! validate_input()) {
+ policy = s->info.args.get("PolicyDocument");
+ if (policy.empty()) {
+ s->err.message = "Missing required element PolicyDocument";
return -EINVAL;
}
- return 0;
+ return RGWRestUserPolicy::get_params();
}
-void RGWPutUserPolicy::execute(optional_yield y)
+int RGWPutUserPolicy::forward_to_master(optional_yield y, const rgw::SiteConfig& site)
{
- op_ret = get_params();
- if (op_ret < 0) {
- return;
- }
-
- bufferlist bl = bufferlist::static_from_string(policy);
-
- std::unique_ptr<rgw::sal::User> user = driver->get_user(rgw_user(user_name));
-
- op_ret = user->load_user(s, s->yield);
- if (op_ret < 0) {
- op_ret = -ERR_NO_SUCH_ENTITY;
- return;
- }
-
- op_ret = user->read_attrs(s, s->yield);
- if (op_ret == -ENOENT) {
- op_ret = -ERR_NO_SUCH_ENTITY;
- return;
+ RGWXMLDecoder::XMLParser parser;
+ if (!parser.init()) {
+ ldpp_dout(this, 0) << "ERROR: failed to initialize xml parser" << dendl;
+ return -EINVAL;
}
- op_ret = rgw_forward_request_to_master(this, *s->penv.site, s->user->get_id(),
- nullptr, nullptr, s->info, y);
- if (op_ret < 0) {
- ldpp_dout(this, 0) << "ERROR: forward_request_to_master returned ret=" << op_ret << dendl;
- return;
+ s->info.args.remove("UserName");
+ s->info.args.remove("PolicyName");
+ s->info.args.remove("PolicyDocument");
+ s->info.args.remove("Action");
+ s->info.args.remove("Version");
+
+ int r = forward_iam_request_to_master(this, site, s->user->get_info(),
+ post_body, parser, s->info, y);
+ if (r < 0) {
+ ldpp_dout(this, 20) << "ERROR: forward_iam_request_to_master failed with error code: " << r << dendl;
+ return r;
}
+ return 0;
+}
+void RGWPutUserPolicy::execute(optional_yield y)
+{
+ // validate the policy document
try {
+ // non-account identity policy is restricted to the current tenant
+ const std::string* policy_tenant = account_id.empty() ?
+ &s->user->get_tenant() : nullptr;
+
const rgw::IAM::Policy p(
- s->cct, s->user->get_tenant(), bl,
+ s->cct, policy_tenant, policy,
s->cct->_conf.get_val<bool>("rgw_policy_reject_invalid_principals"));
- std::map<std::string, std::string> policies;
- if (auto it = user->get_attrs().find(RGW_ATTR_USER_POLICY); it != user->get_attrs().end()) {
- bufferlist out_bl = it->second;
- decode(policies, out_bl);
- }
- bufferlist in_bl;
- policies[policy_name] = policy;
- constexpr unsigned int USER_POLICIES_MAX_NUM = 100;
- const unsigned int max_num = s->cct->_conf->rgw_user_policies_max_num < 0 ?
- USER_POLICIES_MAX_NUM : s->cct->_conf->rgw_user_policies_max_num;
- if (policies.size() > max_num) {
- ldpp_dout(this, 4) << "IAM user policies has reached the num config: "
- << max_num << ", cant add another" << dendl;
- op_ret = -ERR_INVALID_REQUEST;
- s->err.message =
- "The number of IAM user policies should not exceed allowed limit "
- "of " +
- std::to_string(max_num) + " policies.";
- return;
- }
- encode(policies, in_bl);
- user->get_attrs()[RGW_ATTR_USER_POLICY] = in_bl;
-
- op_ret = user->store_user(s, s->yield, false);
- if (op_ret < 0) {
- op_ret = -ERR_INTERNAL_ERROR;
- }
- } catch (buffer::error& err) {
- ldpp_dout(this, 0) << "ERROR: failed to decode user policies" << dendl;
- op_ret = -EIO;
- } catch (rgw::IAM::PolicyParseException& e) {
+ } catch (const rgw::IAM::PolicyParseException& e) {
ldpp_dout(this, 5) << "failed to parse policy: " << e.what() << dendl;
s->err.message = e.what();
op_ret = -ERR_MALFORMED_DOC;
+ return;
+ }
+
+ const rgw::SiteConfig& site = *s->penv.site;
+ if (!site.is_meta_master()) {
+ op_ret = forward_to_master(y, site);
+ if (op_ret) {
+ return;
+ }
}
+ op_ret = retry_raced_user_write(this, y, user.get(),
+ [this, y] {
+ rgw::sal::Attrs& attrs = user->get_attrs();
+ std::map<std::string, std::string> policies;
+ if (auto it = attrs.find(RGW_ATTR_USER_POLICY); it != attrs.end()) try {
+ decode(policies, it->second);
+ } catch (const buffer::error& err) {
+ ldpp_dout(this, 0) << "ERROR: failed to decode user policies" << dendl;
+ return -EIO;
+ }
+
+ policies[policy_name] = policy;
+
+ constexpr unsigned int USER_POLICIES_MAX_NUM = 100;
+ const unsigned int max_num = s->cct->_conf->rgw_user_policies_max_num < 0 ?
+ USER_POLICIES_MAX_NUM : s->cct->_conf->rgw_user_policies_max_num;
+ if (policies.size() > max_num) {
+ ldpp_dout(this, 4) << "IAM user policies has reached the num config: "
+ << max_num << ", cant add another" << dendl;
+ s->err.message =
+ "The number of IAM user policies should not exceed allowed limit "
+ "of " +
+ std::to_string(max_num) + " policies.";
+ return -ERR_LIMIT_EXCEEDED;
+ }
+
+ bufferlist bl;
+ encode(policies, bl);
+ attrs[RGW_ATTR_USER_POLICY] = std::move(bl);
+
+ return user->store_user(s, y, false);
+ });
+
if (op_ret == 0) {
- s->formatter->open_object_section("PutUserPolicyResponse");
+ s->formatter->open_object_section_in_ns("PutUserPolicyResponse", RGW_REST_IAM_XMLNS);
s->formatter->open_object_section("ResponseMetadata");
s->formatter->dump_string("RequestId", s->trans_id);
s->formatter->close_section();
@@ -188,227 +224,486 @@ void RGWPutUserPolicy::execute(optional_yield y)
}
}
-uint64_t RGWGetUserPolicy::get_op()
+
+RGWGetUserPolicy::RGWGetUserPolicy()
+ : RGWRestUserPolicy(rgw::IAM::iamGetUserPolicy, RGW_CAP_READ)
{
- return rgw::IAM::iamGetUserPolicy;
}
int RGWGetUserPolicy::get_params()
{
policy_name = s->info.args.get("PolicyName");
- user_name = s->info.args.get("UserName");
-
- if (policy_name.empty() || user_name.empty()) {
- ldpp_dout(this, 20) << "ERROR: one of policy name or user name is empty"
- << dendl;
+ if (!validate_iam_policy_name(policy_name, s->err.message)) {
return -EINVAL;
}
- return 0;
+ return RGWRestUserPolicy::get_params();
}
void RGWGetUserPolicy::execute(optional_yield y)
{
- op_ret = get_params();
- if (op_ret < 0) {
- return;
+ std::map<std::string, std::string> policies;
+ if (auto it = user->get_attrs().find(RGW_ATTR_USER_POLICY); it != user->get_attrs().end()) {
+ try {
+ decode(policies, it->second);
+ } catch (buffer::error& err) {
+ ldpp_dout(this, 0) << "ERROR: failed to decode user policies" << dendl;
+ op_ret = -EIO;
+ return;
+ }
}
- std::unique_ptr<rgw::sal::User> user = driver->get_user(rgw_user(user_name));
- op_ret = user->read_attrs(s, s->yield);
- if (op_ret == -ENOENT) {
- ldpp_dout(this, 0) << "ERROR: attrs not found for user" << user_name << dendl;
+ auto policy = policies.find(policy_name);
+ if (policy == policies.end()) {
+ s->err.message = "No such PolicyName on the user";
op_ret = -ERR_NO_SUCH_ENTITY;
return;
}
- if (op_ret == 0) {
- s->formatter->open_object_section("GetUserPolicyResponse");
- s->formatter->open_object_section("ResponseMetadata");
- s->formatter->dump_string("RequestId", s->trans_id);
- s->formatter->close_section();
- s->formatter->open_object_section("GetUserPolicyResult");
- std::map<std::string, std::string> policies;
- if (auto it = user->get_attrs().find(RGW_ATTR_USER_POLICY); it != user->get_attrs().end()) {
- bufferlist bl = it->second;
- try {
- decode(policies, bl);
- } catch (buffer::error& err) {
- ldpp_dout(this, 0) << "ERROR: failed to decode user policies" << dendl;
- op_ret = -EIO;
- return;
- }
- if (auto it = policies.find(policy_name); it != policies.end()) {
- policy = policies[policy_name];
- dump(s->formatter);
- } else {
- ldpp_dout(this, 0) << "ERROR: policy not found" << policy << dendl;
- op_ret = -ERR_NO_SUCH_ENTITY;
- return;
- }
- } else {
- ldpp_dout(this, 0) << "ERROR: RGW_ATTR_USER_POLICY not found" << dendl;
- op_ret = -ERR_NO_SUCH_ENTITY;
+ s->formatter->open_object_section_in_ns("GetUserPolicyResponse", RGW_REST_IAM_XMLNS);
+ s->formatter->open_object_section("ResponseMetadata");
+ s->formatter->dump_string("RequestId", s->trans_id);
+ s->formatter->close_section();
+ s->formatter->open_object_section("GetUserPolicyResult");
+ encode_json("PolicyName", policy_name , s->formatter);
+ encode_json("UserName", user_name, s->formatter);
+ encode_json("PolicyDocument", policy->second, s->formatter);
+ s->formatter->close_section();
+ s->formatter->close_section();
+}
+
+
+RGWListUserPolicies::RGWListUserPolicies()
+ : RGWRestUserPolicy(rgw::IAM::iamListUserPolicies, RGW_CAP_READ)
+{
+}
+
+int RGWListUserPolicies::get_params()
+{
+ marker = s->info.args.get("Marker");
+
+ int r = s->info.args.get_int("MaxItems", &max_items, max_items);
+ if (r < 0 || max_items > 1000) {
+ s->err.message = "Invalid value for MaxItems";
+ return -EINVAL;
+ }
+
+ return RGWRestUserPolicy::get_params();
+}
+
+void RGWListUserPolicies::execute(optional_yield y)
+{
+ std::map<std::string, std::string> policies;
+ if (auto it = user->get_attrs().find(RGW_ATTR_USER_POLICY); it != user->get_attrs().end()) {
+ try {
+ decode(policies, it->second);
+ } catch (buffer::error& err) {
+ ldpp_dout(this, 0) << "ERROR: failed to decode user policies" << dendl;
+ op_ret = -EIO;
return;
}
- s->formatter->close_section();
- s->formatter->close_section();
}
- if (op_ret < 0) {
- op_ret = -ERR_INTERNAL_ERROR;
+
+ s->formatter->open_object_section_in_ns("ListUserPoliciesResponse", RGW_REST_IAM_XMLNS);
+ s->formatter->open_object_section("ResponseMetadata");
+ s->formatter->dump_string("RequestId", s->trans_id);
+ s->formatter->close_section();
+ s->formatter->open_object_section("ListUserPoliciesResult");
+ s->formatter->open_array_section("PolicyNames");
+ auto policy = policies.lower_bound(marker);
+ for (; policy != policies.end() && max_items > 0; ++policy, --max_items) {
+ s->formatter->dump_string("member", policy->first);
+ }
+ s->formatter->close_section(); // PolicyNames
+ const bool is_truncated = (policy != policies.end());
+ encode_json("IsTruncated", is_truncated, s->formatter);
+ if (is_truncated) {
+ encode_json("Marker", policy->first, s->formatter);
}
+ s->formatter->close_section(); // ListUserPoliciesResult
+ s->formatter->close_section(); // ListUserPoliciesResponse
}
-uint64_t RGWListUserPolicies::get_op()
+
+RGWDeleteUserPolicy::RGWDeleteUserPolicy(const ceph::bufferlist& post_body)
+ : RGWRestUserPolicy(rgw::IAM::iamDeleteUserPolicy, RGW_CAP_WRITE),
+ post_body(post_body)
{
- return rgw::IAM::iamListUserPolicies;
}
-int RGWListUserPolicies::get_params()
+int RGWDeleteUserPolicy::get_params()
{
- user_name = s->info.args.get("UserName");
+ policy_name = s->info.args.get("PolicyName");
+ if (!validate_iam_policy_name(policy_name, s->err.message)) {
+ return -EINVAL;
+ }
- if (user_name.empty()) {
- ldpp_dout(this, 20) << "ERROR: user name is empty" << dendl;
+ return RGWRestUserPolicy::get_params();
+}
+
+int RGWDeleteUserPolicy::forward_to_master(optional_yield y, const rgw::SiteConfig& site)
+{
+ RGWXMLDecoder::XMLParser parser;
+ if (!parser.init()) {
+ ldpp_dout(this, 0) << "ERROR: failed to initialize xml parser" << dendl;
return -EINVAL;
}
+ s->info.args.remove("UserName");
+ s->info.args.remove("PolicyName");
+ s->info.args.remove("Action");
+ s->info.args.remove("Version");
+
+ int r = forward_iam_request_to_master(this, site, s->user->get_info(),
+ post_body, parser, s->info, y);
+ if (r < 0) {
+ ldpp_dout(this, 20) << "ERROR: forward_iam_request_to_master failed with error code: " << r << dendl;
+ return r;
+ }
return 0;
}
-void RGWListUserPolicies::execute(optional_yield y)
+void RGWDeleteUserPolicy::execute(optional_yield y)
{
- op_ret = get_params();
+ const rgw::SiteConfig& site = *s->penv.site;
+ if (!site.is_meta_master()) {
+ op_ret = forward_to_master(y, site);
+ if (op_ret) {
+ return;
+ }
+ }
+
+ op_ret = retry_raced_user_write(this, y, user.get(),
+ [this, y, &site] {
+ rgw::sal::Attrs& attrs = user->get_attrs();
+ std::map<std::string, std::string> policies;
+ if (auto it = attrs.find(RGW_ATTR_USER_POLICY); it != attrs.end()) try {
+ decode(policies, it->second);
+ } catch (const buffer::error& err) {
+ ldpp_dout(this, 0) << "ERROR: failed to decode user policies" << dendl;
+ return -EIO;
+ }
+
+ auto policy = policies.find(policy_name);
+ if (policy == policies.end()) {
+ if (!site.is_meta_master()) {
+ return 0; // delete succeeded on the master
+ }
+ s->err.message = "No such PolicyName on the user";
+ return -ERR_NO_SUCH_ENTITY;
+ }
+ policies.erase(policy);
+
+ bufferlist bl;
+ encode(policies, bl);
+ attrs[RGW_ATTR_USER_POLICY] = std::move(bl);
+
+ return user->store_user(s, y, false);
+ });
+
if (op_ret < 0) {
return;
}
- std::unique_ptr<rgw::sal::User> user = driver->get_user(rgw_user(user_name));
- op_ret = user->read_attrs(s, s->yield);
- if (op_ret == -ENOENT) {
- ldpp_dout(this, 0) << "ERROR: attrs not found for user" << user_name << dendl;
- op_ret = -ERR_NO_SUCH_ENTITY;
+ s->formatter->open_object_section_in_ns("DeleteUserPoliciesResponse", RGW_REST_IAM_XMLNS);
+ s->formatter->open_object_section("ResponseMetadata");
+ s->formatter->dump_string("RequestId", s->trans_id);
+ s->formatter->close_section();
+ s->formatter->close_section();
+}
+
+
+class RGWAttachUserPolicy_IAM : public RGWRestUserPolicy {
+ bufferlist post_body;
+ std::string policy_arn;
+
+ int get_params() override;
+ int forward_to_master(optional_yield y, const rgw::SiteConfig& site);
+ public:
+ explicit RGWAttachUserPolicy_IAM(const ceph::bufferlist& post_body)
+ : RGWRestUserPolicy(rgw::IAM::iamAttachUserPolicy, RGW_CAP_WRITE),
+ post_body(post_body) {}
+
+ void execute(optional_yield y) override;
+ const char* name() const override { return "attach_user_policy"; }
+ RGWOpType get_type() override { return RGW_OP_ATTACH_USER_POLICY; }
+};
+
+int RGWAttachUserPolicy_IAM::get_params()
+{
+ policy_arn = s->info.args.get("PolicyArn");
+ if (!validate_iam_policy_arn(policy_arn, s->err.message)) {
+ return -EINVAL;
+ }
+
+ return RGWRestUserPolicy::get_params();
+}
+
+int RGWAttachUserPolicy_IAM::forward_to_master(optional_yield y, const rgw::SiteConfig& site)
+{
+ RGWXMLDecoder::XMLParser parser;
+ if (!parser.init()) {
+ ldpp_dout(this, 0) << "ERROR: failed to initialize xml parser" << dendl;
+ return -EINVAL;
+ }
+
+ s->info.args.remove("UserName");
+ s->info.args.remove("PolicyArn");
+ s->info.args.remove("Action");
+ s->info.args.remove("Version");
+
+ int r = forward_iam_request_to_master(this, site, s->user->get_info(),
+ post_body, parser, s->info, y);
+ if (r < 0) {
+ ldpp_dout(this, 20) << "ERROR: forward_iam_request_to_master failed with error code: " << r << dendl;
+ return r;
+ }
+ return 0;
+}
+
+void RGWAttachUserPolicy_IAM::execute(optional_yield y)
+{
+ // validate the policy arn
+ try {
+ const auto p = rgw::IAM::get_managed_policy(s->cct, policy_arn);
+ if (!p) {
+ op_ret = ERR_NO_SUCH_ENTITY;
+ s->err.message = "The requested PolicyArn is not recognized";
+ return;
+ }
+ } catch (const rgw::IAM::PolicyParseException& e) {
+ ldpp_dout(this, 5) << "failed to parse policy: " << e.what() << dendl;
+ s->err.message = e.what();
+ op_ret = -ERR_MALFORMED_DOC;
return;
}
- if (op_ret == 0) {
- std::map<std::string, std::string> policies;
- if (auto it = user->get_attrs().find(RGW_ATTR_USER_POLICY); it != user->get_attrs().end()) {
- s->formatter->open_object_section("ListUserPoliciesResponse");
- s->formatter->open_object_section("ResponseMetadata");
- s->formatter->dump_string("RequestId", s->trans_id);
- s->formatter->close_section();
- s->formatter->open_object_section("ListUserPoliciesResult");
- bufferlist bl = it->second;
- try {
- decode(policies, bl);
- } catch (buffer::error& err) {
- ldpp_dout(this, 0) << "ERROR: failed to decode user policies" << dendl;
- op_ret = -EIO;
- return;
- }
- s->formatter->open_object_section("PolicyNames");
- for (const auto& p : policies) {
- s->formatter->dump_string("member", p.first);
- }
- s->formatter->close_section();
- s->formatter->close_section();
- s->formatter->close_section();
- } else {
- ldpp_dout(this, 0) << "ERROR: RGW_ATTR_USER_POLICY not found" << dendl;
- op_ret = -ERR_NO_SUCH_ENTITY;
+ const rgw::SiteConfig& site = *s->penv.site;
+ if (!site.is_meta_master()) {
+ op_ret = forward_to_master(y, site);
+ if (op_ret) {
return;
}
}
- if (op_ret < 0) {
- op_ret = -ERR_INTERNAL_ERROR;
+
+ op_ret = retry_raced_user_write(this, y, user.get(),
+ [this, y] {
+ rgw::sal::Attrs& attrs = user->get_attrs();
+ rgw::IAM::ManagedPolicies policies;
+ if (auto it = attrs.find(RGW_ATTR_MANAGED_POLICY); it != attrs.end()) try {
+ decode(policies, it->second);
+ } catch (buffer::error& err) {
+ ldpp_dout(this, 0) << "ERROR: failed to decode user policies" << dendl;
+ return -EIO;
+ }
+ policies.arns.insert(policy_arn);
+
+ bufferlist bl;
+ encode(policies, bl);
+ attrs[RGW_ATTR_MANAGED_POLICY] = std::move(bl);
+
+ return user->store_user(this, y, false);
+ });
+
+ if (op_ret == 0) {
+ s->formatter->open_object_section_in_ns("AttachUserPolicyResponse", RGW_REST_IAM_XMLNS);
+ s->formatter->open_object_section("ResponseMetadata");
+ s->formatter->dump_string("RequestId", s->trans_id);
+ s->formatter->close_section();
+ s->formatter->close_section();
}
}
-uint64_t RGWDeleteUserPolicy::get_op()
+
+class RGWRestAttachedUserPolicy : public RGWRestUserPolicy {
+ public:
+ using RGWRestUserPolicy::RGWRestUserPolicy;
+ int init_processing(optional_yield y) override;
+};
+
+int RGWRestAttachedUserPolicy::init_processing(optional_yield y)
{
- return rgw::IAM::iamDeleteUserPolicy;
+ // managed policy is only supported for account users. adding them to
+ // non-account roles would give blanket permissions to all buckets
+ if (!s->auth.identity->get_account()) {
+ s->err.message = "Managed policies are only supported for account users";
+ return -ERR_METHOD_NOT_ALLOWED;
+ }
+
+ return RGWRestUserPolicy::init_processing(y);
}
-int RGWDeleteUserPolicy::get_params()
-{
- policy_name = s->info.args.get("PolicyName");
- user_name = s->info.args.get("UserName");
+class RGWDetachUserPolicy_IAM : public RGWRestAttachedUserPolicy {
+ bufferlist post_body;
+ std::string policy_arn;
+
+ int get_params() override;
+ int forward_to_master(optional_yield y, const rgw::SiteConfig& site);
+ public:
+ explicit RGWDetachUserPolicy_IAM(const bufferlist& post_body)
+ : RGWRestAttachedUserPolicy(rgw::IAM::iamDetachUserPolicy, RGW_CAP_WRITE),
+ post_body(post_body) {}
+
+ void execute(optional_yield y) override;
+ const char* name() const override { return "detach_user_policy"; }
+ RGWOpType get_type() override { return RGW_OP_DETACH_USER_POLICY; }
+};
- if (policy_name.empty() || user_name.empty()) {
- ldpp_dout(this, 20) << "ERROR: One of policy name or user name is empty"<< dendl;
+int RGWDetachUserPolicy_IAM::get_params()
+{
+ policy_arn = s->info.args.get("PolicyArn");
+ if (!validate_iam_policy_arn(policy_arn, s->err.message)) {
return -EINVAL;
}
- return 0;
+ return RGWRestAttachedUserPolicy::get_params();
}
-void RGWDeleteUserPolicy::execute(optional_yield y)
+int RGWDetachUserPolicy_IAM::forward_to_master(optional_yield y, const rgw::SiteConfig& site)
{
- op_ret = get_params();
- if (op_ret < 0) {
- return;
+ RGWXMLDecoder::XMLParser parser;
+ if (!parser.init()) {
+ ldpp_dout(this, 0) << "ERROR: failed to initialize xml parser" << dendl;
+ return -EINVAL;
}
- std::unique_ptr<rgw::sal::User> user = driver->get_user(rgw_user(user_name));
- op_ret = user->load_user(s, s->yield);
- if (op_ret < 0) {
- op_ret = -ERR_NO_SUCH_ENTITY;
- return;
- }
+ s->info.args.remove("UserName");
+ s->info.args.remove("PolicyArn");
+ s->info.args.remove("Action");
+ s->info.args.remove("Version");
- op_ret = user->read_attrs(this, s->yield);
- if (op_ret == -ENOENT) {
- op_ret = -ERR_NO_SUCH_ENTITY;
- return;
+ int r = forward_iam_request_to_master(this, site, s->user->get_info(),
+ post_body, parser, s->info, y);
+ if (r < 0) {
+ ldpp_dout(this, 20) << "ERROR: forward_iam_request_to_master failed with error code: " << r << dendl;
+ return r;
}
+ return 0;
+}
- op_ret = rgw_forward_request_to_master(this, *s->penv.site, s->user->get_id(),
- nullptr, nullptr, s->info, y);
- if (op_ret < 0) {
- // a policy might've been uploaded to this site when there was no sync
- // req. in earlier releases, proceed deletion
- if (op_ret != -ENOENT) {
- ldpp_dout(this, 5) << "forward_request_to_master returned ret=" << op_ret << dendl;
+void RGWDetachUserPolicy_IAM::execute(optional_yield y)
+{
+ const rgw::SiteConfig& site = *s->penv.site;
+ if (!site.is_meta_master()) {
+ op_ret = forward_to_master(y, site);
+ if (op_ret) {
return;
}
- ldpp_dout(this, 0) << "ERROR: forward_request_to_master returned ret=" << op_ret << dendl;
}
- std::map<std::string, std::string> policies;
- if (auto it = user->get_attrs().find(RGW_ATTR_USER_POLICY); it != user->get_attrs().end()) {
- bufferlist out_bl = it->second;
+ op_ret = retry_raced_user_write(this, y, user.get(),
+ [this, y, &site] {
+ rgw::sal::Attrs& attrs = user->get_attrs();
+ rgw::IAM::ManagedPolicies policies;
+ if (auto it = attrs.find(RGW_ATTR_MANAGED_POLICY); it != attrs.end()) try {
+ decode(policies, it->second);
+ } catch (const buffer::error& err) {
+ ldpp_dout(this, 0) << "ERROR: failed to decode user policies" << dendl;
+ return -EIO;
+ }
+
+ auto i = policies.arns.find(policy_arn);
+ if (i == policies.arns.end()) {
+ if (!site.is_meta_master()) {
+ return 0; // delete succeeded on the master
+ }
+ s->err.message = "No such PolicyArn on the user";
+ return ERR_NO_SUCH_ENTITY;
+ }
+ policies.arns.erase(i);
+
+ bufferlist bl;
+ encode(policies, bl);
+ attrs[RGW_ATTR_MANAGED_POLICY] = std::move(bl);
+
+ return user->store_user(this, y, false);
+ });
+
+ if (op_ret == 0) {
+ s->formatter->open_object_section_in_ns("DetachUserPolicyResponse", RGW_REST_IAM_XMLNS);
+ s->formatter->open_object_section("ResponseMetadata");
+ s->formatter->dump_string("RequestId", s->trans_id);
+ s->formatter->close_section();
+ s->formatter->close_section();
+ }
+}
+
+
+class RGWListAttachedUserPolicies_IAM : public RGWRestAttachedUserPolicy {
+ std::string marker;
+ int max_items = 100;
+ int get_params() override;
+ public:
+ RGWListAttachedUserPolicies_IAM()
+ : RGWRestAttachedUserPolicy(rgw::IAM::iamListAttachedUserPolicies, RGW_CAP_READ)
+ {}
+ void execute(optional_yield y) override;
+ const char* name() const override { return "list_attached_user_policies"; }
+ RGWOpType get_type() override { return RGW_OP_LIST_ATTACHED_USER_POLICIES; }
+};
+
+int RGWListAttachedUserPolicies_IAM::get_params()
+{
+ marker = s->info.args.get("Marker");
+
+ int r = s->info.args.get_int("MaxItems", &max_items, max_items);
+ if (r < 0 || max_items > 1000) {
+ s->err.message = "Invalid value for MaxItems";
+ return -EINVAL;
+ }
+
+ return RGWRestAttachedUserPolicy::get_params();
+}
+
+void RGWListAttachedUserPolicies_IAM::execute(optional_yield y)
+{
+ rgw::IAM::ManagedPolicies policies;
+ const auto& attrs = user->get_attrs();
+ if (auto it = attrs.find(RGW_ATTR_MANAGED_POLICY); it != attrs.end()) {
try {
- decode(policies, out_bl);
+ decode(policies, it->second);
} catch (buffer::error& err) {
ldpp_dout(this, 0) << "ERROR: failed to decode user policies" << dendl;
op_ret = -EIO;
return;
}
+ }
- if (auto p = policies.find(policy_name); p != policies.end()) {
- bufferlist in_bl;
- policies.erase(p);
- encode(policies, in_bl);
- user->get_attrs()[RGW_ATTR_USER_POLICY] = in_bl;
-
- op_ret = user->store_user(s, s->yield, false);
- if (op_ret < 0) {
- op_ret = -ERR_INTERNAL_ERROR;
- }
- if (op_ret == 0) {
- s->formatter->open_object_section("DeleteUserPoliciesResponse");
- s->formatter->open_object_section("ResponseMetadata");
- s->formatter->dump_string("RequestId", s->trans_id);
- s->formatter->close_section();
- s->formatter->close_section();
- }
- } else {
- op_ret = -ERR_NO_SUCH_ENTITY;
- return;
+ s->formatter->open_object_section_in_ns("ListAttachedUserPoliciesResponse", RGW_REST_IAM_XMLNS);
+ s->formatter->open_object_section("ResponseMetadata");
+ s->formatter->dump_string("RequestId", s->trans_id);
+ s->formatter->close_section();
+ s->formatter->open_object_section("ListAttachedUserPoliciesResult");
+ s->formatter->open_array_section("AttachedPolicies");
+ auto policy = policies.arns.lower_bound(marker);
+ for (; policy != policies.arns.end() && max_items > 0; ++policy, --max_items) {
+ s->formatter->open_object_section("member");
+ std::string_view arn = *policy;
+ if (auto p = arn.find('/'); p != arn.npos) {
+ s->formatter->dump_string("PolicyName", arn.substr(p + 1));
}
- } else {
- op_ret = -ERR_NO_SUCH_ENTITY;
- return;
+ s->formatter->dump_string("PolicyArn", arn);
+ s->formatter->close_section(); // member
+ }
+ s->formatter->close_section(); // AttachedPolicies
+ const bool is_truncated = (policy != policies.arns.end());
+ encode_json("IsTruncated", is_truncated, s->formatter);
+ if (is_truncated) {
+ encode_json("Marker", *policy, s->formatter);
}
+ s->formatter->close_section(); // ListAttachedUserPoliciesResult
+ s->formatter->close_section(); // ListAttachedUserPoliciesResponse
+}
+
+
+RGWOp* make_iam_attach_user_policy_op(const ceph::bufferlist& post_body) {
+ return new RGWAttachUserPolicy_IAM(post_body);
+}
+
+RGWOp* make_iam_detach_user_policy_op(const ceph::bufferlist& post_body) {
+ return new RGWDetachUserPolicy_IAM(post_body);
+}
+
+RGWOp* make_iam_list_attached_user_policies_op(const ceph::bufferlist& unused) {
+ return new RGWListAttachedUserPolicies_IAM();
}
diff --git a/src/rgw/rgw_rest_user_policy.h b/src/rgw/rgw_rest_user_policy.h
index 4a123456ecf..5e78eda61e9 100644
--- a/src/rgw/rgw_rest_user_policy.h
+++ b/src/rgw/rgw_rest_user_policy.h
@@ -2,72 +2,77 @@
// vim: ts=8 sw=2 smarttab ft=cpp
#pragma once
+
+#include "rgw_arn.h"
#include "rgw_rest.h"
+#include "rgw_user_types.h"
+#include "rgw_sal_fwd.h"
class RGWRestUserPolicy : public RGWRESTOp {
protected:
- static constexpr int MAX_POLICY_NAME_LEN = 128;
+ RGWRestUserPolicy(uint64_t action, uint32_t perm);
+
+ uint64_t action;
+ uint32_t perm;
+ rgw_account_id account_id;
+ std::unique_ptr<rgw::sal::User> user;
+ rgw::ARN user_arn;
std::string policy_name;
std::string user_name;
std::string policy;
+ virtual int get_params();
bool validate_input();
public:
+ int init_processing(optional_yield y) override;
+ int check_caps(const RGWUserCaps& caps) override;
int verify_permission(optional_yield y) override;
- virtual uint64_t get_op() = 0;
void send_response() override;
- void dump(Formatter *f) const;
-};
-
-class RGWUserPolicyRead : public RGWRestUserPolicy {
-public:
- RGWUserPolicyRead() = default;
- int check_caps(const RGWUserCaps& caps) override;
-};
-
-class RGWUserPolicyWrite : public RGWRestUserPolicy {
-public:
- RGWUserPolicyWrite() = default;
- int check_caps(const RGWUserCaps& caps) override;
};
-class RGWPutUserPolicy : public RGWUserPolicyWrite {
+class RGWPutUserPolicy : public RGWRestUserPolicy {
+ bufferlist post_body;
+ int get_params() override;
+ int forward_to_master(optional_yield y, const rgw::SiteConfig& site);
public:
- RGWPutUserPolicy() = default;
+ RGWPutUserPolicy(const ceph::bufferlist& post_body);
void execute(optional_yield y) override;
- int get_params();
- const char* name() const override { return "put_user-policy"; }
- uint64_t get_op() override;
+ const char* name() const override { return "put_user_policy"; }
RGWOpType get_type() override { return RGW_OP_PUT_USER_POLICY; }
};
-class RGWGetUserPolicy : public RGWUserPolicyRead {
+class RGWGetUserPolicy : public RGWRestUserPolicy {
+ int get_params() override;
public:
- RGWGetUserPolicy() = default;
+ RGWGetUserPolicy();
void execute(optional_yield y) override;
- int get_params();
const char* name() const override { return "get_user_policy"; }
- uint64_t get_op() override;
RGWOpType get_type() override { return RGW_OP_GET_USER_POLICY; }
};
-class RGWListUserPolicies : public RGWUserPolicyRead {
+class RGWListUserPolicies : public RGWRestUserPolicy {
+ std::string marker;
+ int max_items = 100;
+ int get_params() override;
public:
- RGWListUserPolicies() = default;
+ RGWListUserPolicies();
void execute(optional_yield y) override;
- int get_params();
const char* name() const override { return "list_user_policies"; }
- uint64_t get_op() override;
RGWOpType get_type() override { return RGW_OP_LIST_USER_POLICIES; }
};
-class RGWDeleteUserPolicy : public RGWUserPolicyWrite {
+class RGWDeleteUserPolicy : public RGWRestUserPolicy {
+ bufferlist post_body;
+ int get_params() override;
+ int forward_to_master(optional_yield y, const rgw::SiteConfig& site);
public:
- RGWDeleteUserPolicy() = default;
+ RGWDeleteUserPolicy(const ceph::bufferlist& post_body);
void execute(optional_yield y) override;
- int get_params();
const char* name() const override { return "delete_user_policy"; }
- uint64_t get_op() override;
RGWOpType get_type() override { return RGW_OP_DELETE_USER_POLICY; }
};
+
+RGWOp* make_iam_attach_user_policy_op(const ceph::bufferlist& post_body);
+RGWOp* make_iam_detach_user_policy_op(const ceph::bufferlist& post_body);
+RGWOp* make_iam_list_attached_user_policies_op(const ceph::bufferlist& unused);
diff --git a/src/rgw/rgw_role.cc b/src/rgw/rgw_role.cc
index fb188e7f80f..bf9fb5d96f2 100644
--- a/src/rgw/rgw_role.cc
+++ b/src/rgw/rgw_role.cc
@@ -50,8 +50,10 @@ void RGWRoleInfo::dump(Formatter *f) const
encode_json("Path", path, f);
encode_json("Arn", arn, f);
encode_json("CreateDate", creation_date, f);
+ encode_json("Description", description, f);
encode_json("MaxSessionDuration", max_session_duration, f);
encode_json("AssumeRolePolicyDocument", trust_policy, f);
+ encode_json("AccountId", account_id, f);
if (!perm_policy_map.empty()) {
f->open_array_section("PermissionPolicies");
for (const auto& it : perm_policy_map) {
@@ -62,6 +64,13 @@ void RGWRoleInfo::dump(Formatter *f) const
}
f->close_section();
}
+ if (!managed_policies.arns.empty()) {
+ f->open_array_section("ManagedPermissionPolicies");
+ for (const auto& arn : managed_policies.arns) {
+ encode_json("PolicyArn", arn, f);
+ }
+ f->close_section();
+ }
if (!tags.empty()) {
f->open_array_section("Tags");
for (const auto& it : tags) {
@@ -81,8 +90,10 @@ void RGWRoleInfo::decode_json(JSONObj *obj)
JSONDecoder::decode_json("Path", path, obj);
JSONDecoder::decode_json("Arn", arn, obj);
JSONDecoder::decode_json("CreateDate", creation_date, obj);
+ JSONDecoder::decode_json("Description", description, obj);
JSONDecoder::decode_json("MaxSessionDuration", max_session_duration, obj);
JSONDecoder::decode_json("AssumeRolePolicyDocument", trust_policy, obj);
+ JSONDecoder::decode_json("AccountId", account_id, obj);
auto tags_iter = obj->find_first("Tags");
if (!tags_iter.end()) {
@@ -97,8 +108,8 @@ void RGWRoleInfo::decode_json(JSONObj *obj)
}
}
- auto perm_policy_iter = obj->find_first("PermissionPolicies");
- if (!perm_policy_iter.end()) {
+ if (auto perm_policy_iter = obj->find_first("PermissionPolicies");
+ !perm_policy_iter.end()) {
JSONObj* perm_policies = *perm_policy_iter;
auto iter = perm_policies->find_first();
@@ -110,6 +121,13 @@ void RGWRoleInfo::decode_json(JSONObj *obj)
}
}
+ if (auto p = obj->find_first("ManagedPermissionPolicies"); !p.end()) {
+ for (auto iter = (*p)->find_first(); !iter.end(); ++iter) {
+ std::string arn = (*iter)->get_data();
+ this->managed_policies.arns.insert(std::move(arn));
+ }
+ }
+
if (auto pos = name.find('$'); pos != std::string::npos) {
tenant = name.substr(0, pos);
name = name.substr(pos+1);
@@ -118,12 +136,15 @@ void RGWRoleInfo::decode_json(JSONObj *obj)
RGWRole::RGWRole(std::string name,
std::string tenant,
+ rgw_account_id account_id,
std::string path,
std::string trust_policy,
+ std::string description,
std::string max_session_duration_str,
std::multimap<std::string,std::string> tags)
{
info.name = std::move(name);
+ info.account_id = std::move(account_id);
info.path = std::move(path);
info.trust_policy = std::move(trust_policy);
info.tenant = std::move(tenant);
@@ -131,6 +152,7 @@ RGWRole::RGWRole(std::string name,
if (this->info.path.empty())
this->info.path = "/";
extract_name_tenant(this->info.name);
+ info.description = std::move(description);
if (max_session_duration_str.empty()) {
info.max_session_duration = SESSION_DURATION_MIN;
} else {
@@ -169,16 +191,6 @@ int RGWRole::get_by_id(const DoutPrefixProvider *dpp, optional_yield y)
return 0;
}
-void RGWRole::dump(Formatter *f) const
-{
- info.dump(f);
-}
-
-void RGWRole::decode_json(JSONObj *obj)
-{
- info.decode_json(obj);
-}
-
bool RGWRole::validate_max_session_duration(const DoutPrefixProvider* dpp)
{
if (info.max_session_duration < SESSION_DURATION_MIN ||
@@ -419,7 +431,8 @@ public:
auto* driver = mdo->get_driver();
info.mtime = mtime;
std::unique_ptr<rgw::sal::RGWRole> role = driver->get_role(info);
- int ret = role->create(dpp, true, info.id, y);
+ constexpr bool exclusive = false;
+ int ret = role->create(dpp, exclusive, info.id, y);
if (ret == -EEXIST) {
ret = role->update(dpp, y);
}
diff --git a/src/rgw/rgw_role.h b/src/rgw/rgw_role.h
index 9183829d976..585f7239735 100644
--- a/src/rgw/rgw_role.h
+++ b/src/rgw/rgw_role.h
@@ -11,6 +11,7 @@
#include "common/ceph_context.h"
#include "rgw_rados.h"
#include "rgw_metadata.h"
+#include "rgw_iam_managed_policy.h"
class RGWRados;
@@ -23,20 +24,25 @@ struct RGWRoleInfo
std::string arn;
std::string creation_date;
std::string trust_policy;
+ // map from PolicyName to an inline policy document from PutRolePolicy
std::map<std::string, std::string> perm_policy_map;
+ // set of managed policy arns from AttachRolePolicy
+ rgw::IAM::ManagedPolicies managed_policies;
std::string tenant;
- uint64_t max_session_duration;
+ std::string description;
+ uint64_t max_session_duration = 0;
std::multimap<std::string,std::string> tags;
std::map<std::string, bufferlist> attrs;
RGWObjVersionTracker objv_tracker;
real_time mtime;
+ rgw_account_id account_id;
RGWRoleInfo() = default;
~RGWRoleInfo() = default;
void encode(bufferlist& bl) const {
- ENCODE_START(3, 1, bl);
+ ENCODE_START(4, 1, bl);
encode(id, bl);
encode(name, bl);
encode(path, bl);
@@ -46,11 +52,14 @@ struct RGWRoleInfo
encode(perm_policy_map, bl);
encode(tenant, bl);
encode(max_session_duration, bl);
+ encode(account_id, bl);
+ encode(description, bl);
+ encode(managed_policies, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& bl) {
- DECODE_START(3, bl);
+ DECODE_START(4, bl);
decode(id, bl);
decode(name, bl);
decode(path, bl);
@@ -64,6 +73,11 @@ struct RGWRoleInfo
if (struct_v >= 3) {
decode(max_session_duration, bl);
}
+ if (struct_v >= 4) {
+ decode(account_id, bl);
+ decode(description, bl);
+ decode(managed_policies, bl);
+ }
DECODE_FINISH(bl);
}
@@ -98,8 +112,10 @@ public:
RGWRole(std::string name,
std::string tenant,
+ rgw_account_id account_id,
std::string path="",
std::string trust_policy="",
+ std::string description="",
std::string max_session_duration_str="",
std::multimap<std::string,std::string> tags={});
@@ -114,10 +130,12 @@ public:
const std::string& get_id() const { return info.id; }
const std::string& get_name() const { return info.name; }
const std::string& get_tenant() const { return info.tenant; }
+ const rgw_account_id& get_account_id() const { return info.account_id; }
const std::string& get_path() const { return info.path; }
const std::string& get_create_date() const { return info.creation_date; }
const std::string& get_assume_role_policy() const { return info.trust_policy;}
const uint64_t& get_max_session_duration() const { return info.max_session_duration; }
+ RGWObjVersionTracker& get_objv_tracker() { return info.objv_tracker; }
const RGWObjVersionTracker& get_objv_tracker() const { return info.objv_tracker; }
const real_time& get_mtime() const { return info.mtime; }
std::map<std::string, bufferlist>& get_attrs() { return info.attrs; }
@@ -140,8 +158,6 @@ public:
boost::optional<std::multimap<std::string,std::string>> get_tags();
void erase_tags(const std::vector<std::string>& tagKeys);
void update_max_session_duration(const std::string& max_session_duration_str);
- void dump(Formatter *f) const;
- void decode_json(JSONObj *obj);
static const std::string& get_names_oid_prefix();
static const std::string& get_info_oid_prefix();
diff --git a/src/rgw/rgw_sal.h b/src/rgw/rgw_sal.h
index e21a6180f8b..e3a892b6992 100644
--- a/src/rgw/rgw_sal.h
+++ b/src/rgw/rgw_sal.h
@@ -45,6 +45,7 @@ struct rgw_pubsub_topics;
struct rgw_pubsub_bucket_topics;
class RGWZonePlacementInfo;
struct rgw_pubsub_topic;
+struct RGWOIDCProviderInfo;
using RGWBucketListNameFilter = std::function<bool (const std::string&)>;
@@ -182,7 +183,6 @@ namespace rgw { namespace sal {
struct MPSerializer;
class GCChain;
-class RGWOIDCProvider;
class RGWRole;
enum AttrsMod {
@@ -231,6 +231,50 @@ class ObjectProcessor : public DataProcessor {
uint32_t flags) = 0;
};
+/**
+ * @brief A list of buckets
+ *
+ * This is the result from a bucket listing operation.
+ */
+struct BucketList {
+ /// The list of results, sorted by bucket name
+ std::vector<RGWBucketEnt> buckets;
+ /// The next marker to resume listing, or empty
+ std::string next_marker;
+};
+
+/// A list of roles
+struct RoleList {
+ /// The list of results, sorted by name
+ std::vector<RGWRoleInfo> roles;
+ /// The next marker to resume listing, or empty
+ std::string next_marker;
+};
+
+/// A list of users
+struct UserList {
+ /// The list of results, sorted by name
+ std::vector<RGWUserInfo> users;
+ /// The next marker to resume listing, or empty
+ std::string next_marker;
+};
+
+/// A list of groups
+struct GroupList {
+ /// The list of results, sorted by name
+ std::vector<RGWGroupInfo> groups;
+ /// The next marker to resume listing, or empty
+ std::string next_marker;
+};
+
+/// A list of topic names
+struct TopicList {
+ /// The list of results, sorted by name
+ std::vector<std::string> topics;
+ /// The next marker to resume listing, or empty
+ std::string next_marker;
+};
+
/** A list of key-value attributes */
using Attrs = std::map<std::string, ceph::buffer::list>;
@@ -268,6 +312,151 @@ class Driver {
virtual int get_user_by_email(const DoutPrefixProvider* dpp, const std::string& email, optional_yield y, std::unique_ptr<User>* user) = 0;
/** Lookup a User by swift username. Queries driver for user info. */
virtual int get_user_by_swift(const DoutPrefixProvider* dpp, const std::string& user_str, optional_yield y, std::unique_ptr<User>* user) = 0;
+
+ /** Lookup RGWAccountInfo by id */
+ virtual int load_account_by_id(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view id,
+ RGWAccountInfo& info,
+ Attrs& attrs,
+ RGWObjVersionTracker& objv) = 0;
+ /** Lookup RGWAccountInfo by name */
+ virtual int load_account_by_name(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view tenant,
+ std::string_view name,
+ RGWAccountInfo& info,
+ Attrs& attrs,
+ RGWObjVersionTracker& objv) = 0;
+ /** Lookup RGWAccountInfo by email address */
+ virtual int load_account_by_email(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view email,
+ RGWAccountInfo& info,
+ Attrs& attrs,
+ RGWObjVersionTracker& objv) = 0;
+ /** Write or overwrite an account */
+ virtual int store_account(const DoutPrefixProvider* dpp,
+ optional_yield y, bool exclusive,
+ const RGWAccountInfo& info,
+ const RGWAccountInfo* old_info,
+ const Attrs& attrs,
+ RGWObjVersionTracker& objv) = 0;
+ /** Delete an account */
+ virtual int delete_account(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ const RGWAccountInfo& info,
+ RGWObjVersionTracker& objv) = 0;
+
+ /** Load cumulative bucket storage stats for the given owner */
+ virtual int load_stats(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ const rgw_owner& owner,
+ RGWStorageStats& stats,
+ ceph::real_time& last_synced,
+ ceph::real_time& last_updated) = 0;
+ /** Load owner storage stats asynchronously */
+ virtual int load_stats_async(const DoutPrefixProvider* dpp,
+ const rgw_owner& owner,
+ boost::intrusive_ptr<ReadStatsCB> cb) = 0;
+ /** Recalculate the sum of bucket stats */
+ virtual int reset_stats(const DoutPrefixProvider *dpp,
+ optional_yield y,
+ const rgw_owner& owner) = 0;
+ /** Finish syncing owner stats by updating last_synced timestamp */
+ virtual int complete_flush_stats(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ const rgw_owner& owner) = 0;
+
+ /** Look up the owner (user or account) for the given email address */
+ virtual int load_owner_by_email(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view email,
+ rgw_owner& owner) = 0;
+
+ /** Count the number of roles belonging to the given account. */
+ virtual int count_account_roles(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view account_id,
+ uint32_t& count) = 0;
+ /** Return a paginated listing of the account's roles. */
+ virtual int list_account_roles(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view account_id,
+ std::string_view path_prefix,
+ std::string_view marker,
+ uint32_t max_items,
+ RoleList& listing) = 0;
+
+ /** Load an account's user by username. */
+ virtual int load_account_user_by_name(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view account_id,
+ std::string_view tenant,
+ std::string_view username,
+ std::unique_ptr<User>* user) = 0;
+ /** Count the number of users belonging to the given account. */
+ virtual int count_account_users(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view account_id,
+ uint32_t& count) = 0;
+ /** Return a paginated listing of the account's users. */
+ virtual int list_account_users(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view account_id,
+ std::string_view tenant,
+ std::string_view path_prefix,
+ std::string_view marker,
+ uint32_t max_items,
+ UserList& listing) = 0;
+
+ /// @group Group
+ ///@{
+ /** Load an account's group by id. */
+ virtual int load_group_by_id(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view id,
+ RGWGroupInfo& info, Attrs& attrs,
+ RGWObjVersionTracker& objv) = 0;
+ /** Load an account's group by name. */
+ virtual int load_group_by_name(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view account_id,
+ std::string_view name,
+ RGWGroupInfo& info, Attrs& attrs,
+ RGWObjVersionTracker& objv) = 0;
+ /** Write or overwrite a group. */
+ virtual int store_group(const DoutPrefixProvider* dpp, optional_yield y,
+ const RGWGroupInfo& info, const Attrs& attrs,
+ RGWObjVersionTracker& objv, bool exclusive,
+ const RGWGroupInfo* old_info) = 0;
+ /** Remove a group. */
+ virtual int remove_group(const DoutPrefixProvider* dpp, optional_yield y,
+ const RGWGroupInfo& info,
+ RGWObjVersionTracker& objv) = 0;
+ /** Return a paginated listing of the group's users. */
+ virtual int list_group_users(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view tenant,
+ std::string_view id,
+ std::string_view marker,
+ uint32_t max_items,
+ UserList& listing) = 0;
+ /** Count the number of groups belonging to the given account. */
+ virtual int count_account_groups(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view account_id,
+ uint32_t& count) = 0;
+ /** Return a paginated listing of the account's groups. */
+ virtual int list_account_groups(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view account_id,
+ std::string_view path_prefix,
+ std::string_view marker,
+ uint32_t max_items,
+ GroupList& listing) = 0;
+ ///@}
+
/** Get a basic Object. This Object is not looked up, and is incomplete, since is
* does not have a bucket. This should only be used when an Object is needed before
* there is a Bucket, otherwise use the get_object() in the Bucket class. */
@@ -278,6 +467,12 @@ class Driver {
* bucket must still be allocated to support bucket->create(). */
virtual int load_bucket(const DoutPrefixProvider* dpp, const rgw_bucket& b,
std::unique_ptr<Bucket>* bucket, optional_yield y) = 0;
+ /** List the buckets of a given owner */
+ virtual int list_buckets(const DoutPrefixProvider* dpp,
+ const rgw_owner& owner, const std::string& tenant,
+ const std::string& marker, const std::string& end_marker,
+ uint64_t max, bool need_stats, BucketList& buckets,
+ optional_yield y) = 0;
/** For multisite, this driver is the zone's master */
virtual bool is_meta_master() = 0;
/** Get zone info for this driver */
@@ -340,6 +535,13 @@ class Driver {
RGWObjVersionTracker& objv_tracker,
optional_yield y,
const DoutPrefixProvider* dpp) = 0;
+ /** Return a paginated listing of the account's topic names */
+ virtual int list_account_topics(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view account_id,
+ std::string_view marker,
+ uint32_t max_items,
+ TopicList& listing) = 0;
/** Update the bucket-topic mapping in the store, if |add_mapping|=true then
* adding the |bucket_key| |topic| mapping to store, else delete the
* |bucket_key| |topic| mapping from the store. The |bucket_key| is
@@ -427,30 +629,46 @@ class Driver {
/** Get an IAM Role by name etc. */
virtual std::unique_ptr<RGWRole> get_role(std::string name,
std::string tenant,
+ rgw_account_id account_id,
std::string path="",
std::string trust_policy="",
+ std::string description="",
std::string max_session_duration_str="",
std::multimap<std::string,std::string> tags={}) = 0;
/** Get an IAM Role by ID */
virtual std::unique_ptr<RGWRole> get_role(std::string id) = 0;
virtual std::unique_ptr<RGWRole> get_role(const RGWRoleInfo& info) = 0;
/** Get all IAM Roles optionally filtered by path */
- virtual int get_roles(const DoutPrefixProvider *dpp,
- optional_yield y,
- const std::string& path_prefix,
- const std::string& tenant,
- std::vector<std::unique_ptr<RGWRole>>& roles) = 0;
- /** Get an empty Open ID Connector provider */
- virtual std::unique_ptr<RGWOIDCProvider> get_oidc_provider() = 0;
+ virtual int list_roles(const DoutPrefixProvider *dpp,
+ optional_yield y,
+ const std::string& tenant,
+ const std::string& path_prefix,
+ const std::string& marker,
+ uint32_t max_items,
+ RoleList& listing) = 0;
+ virtual int store_oidc_provider(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ const RGWOIDCProviderInfo& info,
+ bool exclusive) = 0;
+ virtual int load_oidc_provider(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view tenant,
+ std::string_view url,
+ RGWOIDCProviderInfo& info) = 0;
+ virtual int delete_oidc_provider(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view tenant,
+ std::string_view url) = 0;
/** Get all Open ID Connector providers, optionally filtered by tenant */
- virtual int get_oidc_providers(const DoutPrefixProvider *dpp,
- const std::string& tenant,
- std::vector<std::unique_ptr<RGWOIDCProvider>>& providers, optional_yield y) = 0;
+ virtual int get_oidc_providers(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view tenant,
+ std::vector<RGWOIDCProviderInfo>& providers) = 0;
/** Get a Writer that appends to an object */
virtual std::unique_ptr<Writer> get_append_writer(const DoutPrefixProvider *dpp,
optional_yield y,
rgw::sal::Object* obj,
- const rgw_user& owner,
+ const ACLOwner& owner,
const rgw_placement_rule *ptail_placement_rule,
const std::string& unique_tag,
uint64_t position,
@@ -459,7 +677,7 @@ class Driver {
virtual std::unique_ptr<Writer> get_atomic_writer(const DoutPrefixProvider *dpp,
optional_yield y,
rgw::sal::Object* obj,
- const rgw_user& owner,
+ const ACLOwner& owner,
const rgw_placement_rule *ptail_placement_rule,
uint64_t olh_epoch,
const std::string& unique_tag) = 0;
@@ -488,18 +706,6 @@ class ReadStatsCB : public boost::intrusive_ref_counter<ReadStatsCB> {
};
/**
- * @brief A list of buckets
- *
- * This is the result from a bucket listing operation.
- */
-struct BucketList {
- /// The list of results, sorted by bucket name
- std::vector<RGWBucketEnt> buckets;
- /// The next marker to resume listing, or empty
- std::string next_marker;
-};
-
-/**
* @brief User abstraction
*
* This represents a user. In general, there will be a @a User associated with an OP
@@ -515,11 +721,6 @@ class User {
/** Clone a copy of this user. Used when modification is necessary of the copy */
virtual std::unique_ptr<User> clone() = 0;
- /** List the buckets owned by a user */
- virtual int list_buckets(const DoutPrefixProvider* dpp,
- const std::string& marker, const std::string& end_marker,
- uint64_t max, bool need_stats, BucketList& buckets,
- optional_yield y) = 0;
/** Get the display name for this User */
virtual std::string& get_display_name() = 0;
@@ -562,16 +763,6 @@ class User {
/** Set the attributes in attrs, leaving any other existing attrs set, and
* write them to the backing store; a merge operation */
virtual int merge_and_store_attrs(const DoutPrefixProvider* dpp, Attrs& new_attrs, optional_yield y) = 0;
- /** Read the User stats from the backing Store, synchronous */
- virtual int read_stats(const DoutPrefixProvider *dpp,
- optional_yield y, RGWStorageStats* stats,
- ceph::real_time* last_stats_sync = nullptr,
- ceph::real_time* last_stats_update = nullptr) = 0;
- /** Read the User stats from the backing Store, asynchronous */
- virtual int read_stats_async(const DoutPrefixProvider *dpp,
- boost::intrusive_ptr<ReadStatsCB> cb) = 0;
- /** Flush accumulated stat changes for this User to the backing store */
- virtual int complete_flush_stats(const DoutPrefixProvider *dpp, optional_yield y) = 0;
/** Read detailed usage stats for this User from the backing store */
virtual int read_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch,
uint64_t end_epoch, uint32_t max_entries,
@@ -588,6 +779,10 @@ class User {
virtual int remove_user(const DoutPrefixProvider* dpp, optional_yield y) = 0;
/** Verify multi-factor authentication for this user */
virtual int verify_mfa(const std::string& mfa_str, bool* verified, const DoutPrefixProvider* dpp, optional_yield y) = 0;
+ /** Return a paginated listing of the user's groups. */
+ virtual int list_groups(const DoutPrefixProvider* dpp, optional_yield y,
+ std::string_view marker, uint32_t max_items,
+ GroupList& listing) = 0;
/* dang temporary; will be removed when User is complete */
virtual RGWUserInfo& get_info() = 0;
@@ -690,7 +885,7 @@ class Bucket {
/// Input parameters for create().
struct CreateParams {
- rgw_user owner;
+ rgw_owner owner;
std::string zonegroup_id;
rgw_placement_rule placement_rule;
// zone placement is optional on buckets created for another zonegroup
@@ -724,18 +919,18 @@ class Bucket {
const bucket_index_layout_generation& idx_layout,
int shard_id, boost::intrusive_ptr<ReadStatsCB> cb) = 0;
/** Sync this bucket's stats to the owning user's stats in the backing store */
- virtual int sync_user_stats(const DoutPrefixProvider *dpp, optional_yield y,
- RGWBucketEnt* optional_ent) = 0;
+ virtual int sync_owner_stats(const DoutPrefixProvider *dpp, optional_yield y,
+ RGWBucketEnt* optional_ent) = 0;
/** Check if this bucket needs resharding, and schedule it if it does */
virtual int check_bucket_shards(const DoutPrefixProvider* dpp,
uint64_t num_objs, optional_yield y) = 0;
/** Change the owner of this bucket in the backing store. Current owner must be set. Does not
* change ownership of the objects in the bucket. */
- virtual int chown(const DoutPrefixProvider* dpp, const rgw_user& new_owner, optional_yield y) = 0;
+ virtual int chown(const DoutPrefixProvider* dpp, const rgw_owner& new_owner, optional_yield y) = 0;
/** Store the cached bucket info into the backing store */
virtual int put_info(const DoutPrefixProvider* dpp, bool exclusive, ceph::real_time mtime, optional_yield y) = 0;
/** Get the owner of this bucket */
- virtual const rgw_user& get_owner() const = 0;
+ virtual const rgw_owner& get_owner() const = 0;
/** Check in the backing store if this bucket is empty */
virtual int check_empty(const DoutPrefixProvider* dpp, optional_yield y) = 0;
/** Check if the given size fits within the quota */
@@ -917,8 +1112,8 @@ class Object {
*/
struct DeleteOp {
struct Params {
- ACLOwner bucket_owner;
- ACLOwner obj_owner;
+ rgw_owner bucket_owner; //< bucket owner for usage/quota accounting
+ ACLOwner obj_owner; //< acl owner for delete marker if necessary
int versioning_status{0};
uint64_t olh_epoch{0};
std::string marker_version_id;
@@ -952,7 +1147,7 @@ class Object {
optional_yield y,
uint32_t flags) = 0;
/** Copy an this object to another object. */
- virtual int copy_object(User* user,
+ virtual int copy_object(const ACLOwner& owner, const rgw_user& remote_user,
req_info* info, const rgw_zone_id& source_zone,
rgw::sal::Object* dest_object, rgw::sal::Bucket* dest_bucket,
rgw::sal::Bucket* src_bucket,
@@ -1074,10 +1269,15 @@ class Object {
virtual rgw_obj get_obj(void) const = 0;
/** Restore the previous swift version of this object */
- virtual int swift_versioning_restore(bool& restored, /* out */
- const DoutPrefixProvider* dpp, optional_yield y) = 0;
+ virtual int swift_versioning_restore(const ACLOwner& owner,
+ const rgw_user& remote_user,
+ bool& restored,
+ const DoutPrefixProvider* dpp,
+ optional_yield y) = 0;
/** Copy the current version of a swift object to the configured destination bucket*/
- virtual int swift_versioning_copy(const DoutPrefixProvider* dpp,
+ virtual int swift_versioning_copy(const ACLOwner& owner,
+ const rgw_user& remote_user,
+ const DoutPrefixProvider* dpp,
optional_yield y) = 0;
/** Get a new ReadOp for this object */
@@ -1224,7 +1424,7 @@ public:
virtual std::unique_ptr<Writer> get_writer(const DoutPrefixProvider *dpp,
optional_yield y,
rgw::sal::Object* obj,
- const rgw_user& owner,
+ const ACLOwner& owner,
const rgw_placement_rule *ptail_placement_rule,
uint64_t part_num,
const std::string& part_num_str) = 0;
diff --git a/src/rgw/rgw_sal_dbstore.cc b/src/rgw/rgw_sal_dbstore.cc
index 0019178f640..2ce6304646e 100644
--- a/src/rgw/rgw_sal_dbstore.cc
+++ b/src/rgw/rgw_sal_dbstore.cc
@@ -32,15 +32,17 @@ using namespace std;
namespace rgw::sal {
- int DBUser::list_buckets(const DoutPrefixProvider *dpp, const string& marker,
- const string& end_marker, uint64_t max, bool need_stats,
- BucketList &result, optional_yield y)
+ int DBStore::list_buckets(const DoutPrefixProvider *dpp,
+ const rgw_owner& owner, const std::string& tenant,
+ const string& marker, const string& end_marker, uint64_t max,
+ bool need_stats, BucketList &result, optional_yield y)
{
RGWUserBuckets ulist;
bool is_truncated = false;
- int ret = store->getDB()->list_buckets(dpp, "", info.user_id, marker,
- end_marker, max, need_stats, &ulist, &is_truncated);
+ std::string ownerstr = to_string(owner);
+ int ret = getDB()->list_buckets(dpp, "", ownerstr,
+ marker, end_marker, max, need_stats, &ulist, &is_truncated);
if (ret < 0)
return ret;
@@ -81,25 +83,6 @@ namespace rgw::sal {
return ret;
}
- int DBUser::read_stats(const DoutPrefixProvider *dpp,
- optional_yield y, RGWStorageStats* stats,
- ceph::real_time *last_stats_sync,
- ceph::real_time *last_stats_update)
- {
- return 0;
- }
-
- /* stats - Not for first pass */
- int DBUser::read_stats_async(const DoutPrefixProvider *dpp, boost::intrusive_ptr<ReadStatsCB> cb)
- {
- return 0;
- }
-
- int DBUser::complete_flush_stats(const DoutPrefixProvider *dpp, optional_yield y)
- {
- return 0;
- }
-
int DBUser::read_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch, uint32_t max_entries,
bool *is_truncated, RGWUsageIter& usage_iter,
map<rgw_user_bucket, rgw_usage_log_entry>& usage)
@@ -152,6 +135,13 @@ namespace rgw::sal {
return 0;
}
+ int DBUser::list_groups(const DoutPrefixProvider* dpp, optional_yield y,
+ std::string_view marker, uint32_t max_items,
+ GroupList& listing)
+ {
+ return -ENOTSUP;
+ }
+
int DBBucket::remove(const DoutPrefixProvider *dpp, bool delete_children, optional_yield y)
{
int ret;
@@ -226,8 +216,8 @@ namespace rgw::sal {
return 0;
}
- int DBBucket::sync_user_stats(const DoutPrefixProvider *dpp, optional_yield y,
- RGWBucketEnt* ent)
+ int DBBucket::sync_owner_stats(const DoutPrefixProvider *dpp, optional_yield y,
+ RGWBucketEnt* ent)
{
return 0;
}
@@ -238,7 +228,7 @@ namespace rgw::sal {
return 0;
}
- int DBBucket::chown(const DoutPrefixProvider *dpp, const rgw_user& new_owner, optional_yield y)
+ int DBBucket::chown(const DoutPrefixProvider *dpp, const rgw_owner& new_owner, optional_yield y)
{
int ret;
@@ -702,7 +692,6 @@ namespace rgw::sal {
int DBObject::DBDeleteOp::delete_obj(const DoutPrefixProvider* dpp, optional_yield y, uint32_t flags)
{
- parent_op.params.bucket_owner = params.bucket_owner.id;
parent_op.params.versioning_status = params.versioning_status;
parent_op.params.obj_owner = params.obj_owner;
parent_op.params.olh_epoch = params.olh_epoch;
@@ -732,13 +721,13 @@ namespace rgw::sal {
DB::Object del_target(store->getDB(), bucket->get_info(), get_obj());
DB::Object::Delete del_op(&del_target);
- del_op.params.bucket_owner = bucket->get_info().owner;
del_op.params.versioning_status = bucket->get_info().versioning_status();
return del_op.delete_obj(dpp);
}
- int DBObject::copy_object(User* user,
+ int DBObject::copy_object(const ACLOwner& owner,
+ const rgw_user& remote_user,
req_info* info,
const rgw_zone_id& source_zone,
rgw::sal::Object* dest_object,
@@ -774,14 +763,16 @@ namespace rgw::sal {
return parent_op.iterate(dpp, ofs, end, cb);
}
- int DBObject::swift_versioning_restore(bool& restored,
+ int DBObject::swift_versioning_restore(const ACLOwner& owner,
+ const rgw_user& remote_user, bool& restored,
const DoutPrefixProvider* dpp, optional_yield y)
{
return 0;
}
- int DBObject::swift_versioning_copy(const DoutPrefixProvider* dpp,
- optional_yield y)
+ int DBObject::swift_versioning_copy(const ACLOwner& owner,
+ const rgw_user& remote_user,
+ const DoutPrefixProvider* dpp, optional_yield y)
{
return 0;
}
@@ -794,7 +785,7 @@ namespace rgw::sal {
int ret;
std::unique_ptr<rgw::sal::Object::DeleteOp> del_op = meta_obj->get_delete_op();
- del_op->params.bucket_owner.id = bucket->get_info().owner;
+ del_op->params.bucket_owner = bucket->get_info().owner;
del_op->params.versioning_status = 0;
// Since the data objects are associated with meta obj till
@@ -834,7 +825,7 @@ namespace rgw::sal {
DB::Object::Write obj_op(&op_target);
/* Create meta object */
- obj_op.meta.owner = owner.id;
+ obj_op.meta.owner = to_string(owner.id);
obj_op.meta.category = RGWObjCategory::MultiMeta;
obj_op.meta.flags = PUT_OBJ_CREATE_EXCL;
obj_op.meta.mtime = &mtime;
@@ -1013,7 +1004,7 @@ namespace rgw::sal {
DB::Object::Write obj_op(&op_target);
ret = obj_op.prepare(dpp);
- obj_op.meta.owner = owner.id;
+ obj_op.meta.owner = to_string(owner.id);
obj_op.meta.flags = PUT_OBJ_CREATE;
obj_op.meta.category = RGWObjCategory::Main;
obj_op.meta.modify_tail = true;
@@ -1103,7 +1094,7 @@ namespace rgw::sal {
const DoutPrefixProvider *dpp,
optional_yield y,
rgw::sal::Object* obj,
- const rgw_user& owner,
+ const ACLOwner& owner,
const rgw_placement_rule *ptail_placement_rule,
uint64_t part_num,
const std::string& part_num_str)
@@ -1117,7 +1108,7 @@ namespace rgw::sal {
MultipartUpload* upload,
rgw::sal::Object* obj,
DBStore* _driver,
- const rgw_user& _owner,
+ const ACLOwner& _owner,
const rgw_placement_rule *_ptail_placement_rule,
uint64_t _part_num, const std::string& _part_num_str):
StoreWriter(dpp, y),
@@ -1263,7 +1254,7 @@ namespace rgw::sal {
optional_yield y,
rgw::sal::Object* _obj,
DBStore* _driver,
- const rgw_user& _owner,
+ const ACLOwner& _owner,
const rgw_placement_rule *_ptail_placement_rule,
uint64_t _olh_epoch,
const std::string& _unique_tag) :
@@ -1406,8 +1397,10 @@ namespace rgw::sal {
std::unique_ptr<RGWRole> DBStore::get_role(std::string name,
std::string tenant,
+ rgw_account_id account_id,
std::string path,
std::string trust_policy,
+ std::string description,
std::string max_session_duration_str,
std::multimap<std::string,std::string> tags)
{
@@ -1427,24 +1420,45 @@ namespace rgw::sal {
return std::unique_ptr<RGWRole>(p);
}
- int DBStore::get_roles(const DoutPrefixProvider *dpp,
- optional_yield y,
- const std::string& path_prefix,
- const std::string& tenant,
- vector<std::unique_ptr<RGWRole>>& roles)
+ int DBStore::list_roles(const DoutPrefixProvider *dpp,
+ optional_yield y,
+ const std::string& tenant,
+ const std::string& path_prefix,
+ const std::string& marker,
+ uint32_t max_items,
+ RoleList& listing)
{
return 0;
}
- std::unique_ptr<RGWOIDCProvider> DBStore::get_oidc_provider()
+ int DBStore::store_oidc_provider(const DoutPrefixProvider *dpp,
+ optional_yield y,
+ const RGWOIDCProviderInfo& info,
+ bool exclusive)
{
- RGWOIDCProvider* p = nullptr;
- return std::unique_ptr<RGWOIDCProvider>(p);
+ return -ENOTSUP;
+ }
+
+ int DBStore::load_oidc_provider(const DoutPrefixProvider *dpp,
+ optional_yield y,
+ std::string_view account,
+ std::string_view url,
+ RGWOIDCProviderInfo& info)
+ {
+ return -ENOTSUP;
+ }
+
+ int DBStore::delete_oidc_provider(const DoutPrefixProvider *dpp,
+ optional_yield y,
+ std::string_view account,
+ std::string_view url)
+ {
+ return -ENOTSUP;
}
int DBStore::get_oidc_providers(const DoutPrefixProvider *dpp,
- const std::string& tenant,
- vector<std::unique_ptr<RGWOIDCProvider>>& providers, optional_yield y)
+ optional_yield y, std::string_view tenant,
+ vector<RGWOIDCProviderInfo>& providers)
{
return 0;
}
@@ -1452,7 +1466,7 @@ namespace rgw::sal {
std::unique_ptr<Writer> DBStore::get_append_writer(const DoutPrefixProvider *dpp,
optional_yield y,
rgw::sal::Object* obj,
- const rgw_user& owner,
+ const ACLOwner& owner,
const rgw_placement_rule *ptail_placement_rule,
const std::string& unique_tag,
uint64_t position,
@@ -1463,7 +1477,7 @@ namespace rgw::sal {
std::unique_ptr<Writer> DBStore::get_atomic_writer(const DoutPrefixProvider *dpp,
optional_yield y,
rgw::sal::Object* obj,
- const rgw_user& owner,
+ const ACLOwner& owner,
const rgw_placement_rule *ptail_placement_rule,
uint64_t olh_epoch,
const std::string& unique_tag) {
@@ -1492,9 +1506,10 @@ namespace rgw::sal {
RGWUserInfo uinfo;
User *u;
int ret = 0;
+ rgw::sal::Attrs attrs;
RGWObjVersionTracker objv_tracker;
- ret = getDB()->get_user(dpp, string("access_key"), key, uinfo, nullptr,
+ ret = getDB()->get_user(dpp, string("access_key"), key, uinfo, &attrs,
&objv_tracker);
if (ret < 0)
@@ -1505,6 +1520,7 @@ namespace rgw::sal {
if (!u)
return -ENOMEM;
+ u->get_attrs() = std::move(attrs);
u->get_version_tracker() = objv_tracker;
user->reset(u);
@@ -1516,9 +1532,10 @@ namespace rgw::sal {
RGWUserInfo uinfo;
User *u;
int ret = 0;
+ rgw::sal::Attrs attrs;
RGWObjVersionTracker objv_tracker;
- ret = getDB()->get_user(dpp, string("email"), email, uinfo, nullptr,
+ ret = getDB()->get_user(dpp, string("email"), email, uinfo, &attrs,
&objv_tracker);
if (ret < 0)
@@ -1529,6 +1546,7 @@ namespace rgw::sal {
if (!u)
return -ENOMEM;
+ u->get_attrs() = std::move(attrs);
u->get_version_tracker() = objv_tracker;
user->reset(u);
@@ -1541,6 +1559,211 @@ namespace rgw::sal {
return -ENOTSUP;
}
+ int DBStore::load_account_by_id(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view id,
+ RGWAccountInfo& info,
+ Attrs& attrs,
+ RGWObjVersionTracker& objv)
+ {
+ return -ENOTSUP;
+ }
+
+ int DBStore::load_account_by_name(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view tenant,
+ std::string_view name,
+ RGWAccountInfo& info,
+ Attrs& attrs,
+ RGWObjVersionTracker& objv)
+ {
+ return -ENOTSUP;
+ }
+
+ int DBStore::load_account_by_email(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view email,
+ RGWAccountInfo& info,
+ Attrs& attrs,
+ RGWObjVersionTracker& objv)
+ {
+ return -ENOTSUP;
+ }
+
+ int DBStore::store_account(const DoutPrefixProvider* dpp,
+ optional_yield y, bool exclusive,
+ const RGWAccountInfo& info,
+ const RGWAccountInfo* old_info,
+ const Attrs& attrs,
+ RGWObjVersionTracker& objv)
+ {
+ return -ENOTSUP;
+ }
+
+ int DBStore::delete_account(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ const RGWAccountInfo& info,
+ RGWObjVersionTracker& objv)
+ {
+ return -ENOTSUP;
+ }
+
+ int DBStore::load_stats(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ const rgw_owner& owner,
+ RGWStorageStats& stats,
+ ceph::real_time& last_synced,
+ ceph::real_time& last_updated)
+ {
+ return 0;
+ }
+ int DBStore::load_stats_async(const DoutPrefixProvider* dpp,
+ const rgw_owner& owner,
+ boost::intrusive_ptr<ReadStatsCB> cb)
+ {
+ return -ENOTSUP;
+ }
+ int DBStore::reset_stats(const DoutPrefixProvider *dpp,
+ optional_yield y,
+ const rgw_owner& owner)
+ {
+ return -ENOTSUP;
+ }
+ int DBStore::complete_flush_stats(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ const rgw_owner& owner)
+ {
+ return 0;
+ }
+
+ int DBStore::load_owner_by_email(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view email,
+ rgw_owner& owner)
+ {
+ RGWUserInfo uinfo;
+ int ret = getDB()->get_user(dpp, "email", std::string{email},
+ uinfo, nullptr, nullptr);
+ if (ret < 0) {
+ return ret;
+ }
+ owner = std::move(uinfo.user_id);
+ return 0;
+ }
+
+ int DBStore::count_account_roles(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view account_id,
+ uint32_t& count)
+ {
+ return -ENOTSUP;
+ }
+
+ int DBStore::list_account_roles(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view account_id,
+ std::string_view path_prefix,
+ std::string_view marker,
+ uint32_t max_items,
+ RoleList& listing)
+ {
+ return -ENOTSUP;
+ }
+
+ int DBStore::load_account_user_by_name(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view account_id,
+ std::string_view tenant,
+ std::string_view username,
+ std::unique_ptr<User>* user)
+ {
+ return -ENOTSUP;
+ }
+
+ int DBStore::count_account_users(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view account_id,
+ uint32_t& count)
+ {
+ return -ENOTSUP;
+ }
+
+ int DBStore::list_account_users(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view account_id,
+ std::string_view tenant,
+ std::string_view path_prefix,
+ std::string_view marker,
+ uint32_t max_items,
+ UserList& listing)
+ {
+ return -ENOTSUP;
+ }
+
+ int DBStore::load_group_by_id(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view id,
+ RGWGroupInfo& info, Attrs& attrs,
+ RGWObjVersionTracker& objv)
+ {
+ return -ENOTSUP;
+ }
+
+ int DBStore::load_group_by_name(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view account_id,
+ std::string_view name,
+ RGWGroupInfo& info, Attrs& attrs,
+ RGWObjVersionTracker& objv)
+ {
+ return -ENOTSUP;
+ }
+
+ int DBStore::store_group(const DoutPrefixProvider* dpp, optional_yield y,
+ const RGWGroupInfo& info, const Attrs& attrs,
+ RGWObjVersionTracker& objv, bool exclusive,
+ const RGWGroupInfo* old_info)
+ {
+ return -ENOTSUP;
+ }
+
+ int DBStore::remove_group(const DoutPrefixProvider* dpp, optional_yield y,
+ const RGWGroupInfo& info,
+ RGWObjVersionTracker& objv)
+ {
+ return -ENOTSUP;
+ }
+
+ int DBStore::list_group_users(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view tenant,
+ std::string_view id,
+ std::string_view marker,
+ uint32_t max_items,
+ UserList& listing)
+ {
+ return -ENOTSUP;
+ }
+
+ int DBStore::count_account_groups(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view account_id,
+ uint32_t& count)
+ {
+ return -ENOTSUP;
+ }
+
+ int DBStore::list_account_groups(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view account_id,
+ std::string_view path_prefix,
+ std::string_view marker,
+ uint32_t max_items,
+ GroupList& listing)
+ {
+ return -ENOTSUP;
+ }
+
std::string DBStore::get_cluster_id(const DoutPrefixProvider* dpp, optional_yield y)
{
return "PLACEHOLDER"; // for instance unique identifier
@@ -1679,6 +1902,16 @@ namespace rgw::sal {
return std::make_unique<DBNotification>(obj, src_obj, event_types);
}
+ int DBStore::list_account_topics(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view account_id,
+ std::string_view marker,
+ uint32_t max_items,
+ TopicList& listing)
+ {
+ return -ENOTSUP;
+ }
+
RGWLC* DBStore::get_rgwlc(void) {
return lc;
}
diff --git a/src/rgw/rgw_sal_dbstore.h b/src/rgw/rgw_sal_dbstore.h
index 6ce6398d062..4770713e762 100644
--- a/src/rgw/rgw_sal_dbstore.h
+++ b/src/rgw/rgw_sal_dbstore.h
@@ -16,7 +16,6 @@
#pragma once
#include "rgw_sal_store.h"
-#include "rgw_oidc_provider.h"
#include "rgw_role.h"
#include "rgw_lc.h"
#include "rgw_multi.h"
@@ -88,15 +87,7 @@ protected:
virtual std::unique_ptr<User> clone() override {
return std::unique_ptr<User>(new DBUser(*this));
}
- int list_buckets(const DoutPrefixProvider *dpp, const std::string& marker, const std::string& end_marker,
- uint64_t max, bool need_stats, BucketList& buckets, optional_yield y) override;
virtual int read_attrs(const DoutPrefixProvider* dpp, optional_yield y) override;
- virtual int read_stats(const DoutPrefixProvider *dpp,
- optional_yield y, RGWStorageStats* stats,
- ceph::real_time *last_stats_sync = nullptr,
- ceph::real_time *last_stats_update = nullptr) override;
- virtual int read_stats_async(const DoutPrefixProvider *dpp, boost::intrusive_ptr<ReadStatsCB> cb) override;
- virtual int complete_flush_stats(const DoutPrefixProvider *dpp, optional_yield y) override;
virtual int read_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch, uint32_t max_entries,
bool* is_truncated, RGWUsageIter& usage_iter,
std::map<rgw_user_bucket, rgw_usage_log_entry>& usage) override;
@@ -108,6 +99,9 @@ protected:
virtual int store_user(const DoutPrefixProvider* dpp, optional_yield y, bool exclusive, RGWUserInfo* old_info = nullptr) override;
virtual int remove_user(const DoutPrefixProvider* dpp, optional_yield y) override;
virtual int verify_mfa(const std::string& mfa_str, bool* verified, const DoutPrefixProvider* dpp, optional_yield y) override;
+ int list_groups(const DoutPrefixProvider* dpp, optional_yield y,
+ std::string_view marker, uint32_t max_items,
+ GroupList& listing) override;
friend class DBBucket;
};
@@ -158,11 +152,11 @@ protected:
std::string *max_marker = nullptr,
bool *syncstopped = nullptr) override;
virtual int read_stats_async(const DoutPrefixProvider *dpp, const bucket_index_layout_generation& idx_layout, int shard_id, boost::intrusive_ptr<ReadStatsCB> ctx) override;
- int sync_user_stats(const DoutPrefixProvider *dpp, optional_yield y,
- RGWBucketEnt* ent) override;
+ int sync_owner_stats(const DoutPrefixProvider *dpp, optional_yield y,
+ RGWBucketEnt* ent) override;
int check_bucket_shards(const DoutPrefixProvider *dpp,
uint64_t num_objs, optional_yield y) override;
- virtual int chown(const DoutPrefixProvider *dpp, const rgw_user& new_owner, optional_yield y) override;
+ virtual int chown(const DoutPrefixProvider *dpp, const rgw_owner& new_owner, optional_yield y) override;
virtual int put_info(const DoutPrefixProvider *dpp, bool exclusive, ceph::real_time mtime, optional_yield y) override;
virtual int check_empty(const DoutPrefixProvider *dpp, optional_yield y) override;
virtual int check_quota(const DoutPrefixProvider *dpp, RGWQuota& quota, uint64_t obj_size, optional_yield y, bool check_size_only = false) override;
@@ -341,24 +335,6 @@ protected:
virtual int reload_packages(const DoutPrefixProvider* dpp, optional_yield y) override;
};
- class DBOIDCProvider : public RGWOIDCProvider {
- DBStore* store;
- public:
- DBOIDCProvider(DBStore* _store) : store(_store) {}
- ~DBOIDCProvider() = default;
-
- virtual int store_url(const DoutPrefixProvider *dpp, const std::string& url, bool exclusive, optional_yield y) override { return 0; }
- virtual int read_url(const DoutPrefixProvider *dpp, const std::string& url, const std::string& tenant, optional_yield y) override { return 0; }
- virtual int delete_obj(const DoutPrefixProvider *dpp, optional_yield y) override { return 0;}
-
- void encode(bufferlist& bl) const {
- RGWOIDCProvider::encode(bl);
- }
- void decode(bufferlist::const_iterator& bl) {
- RGWOIDCProvider::decode(bl);
- }
- };
-
/*
* For multipart upload, below is the process flow -
*
@@ -478,7 +454,7 @@ protected:
virtual std::unique_ptr<Writer> get_writer(const DoutPrefixProvider *dpp,
optional_yield y,
rgw::sal::Object* obj,
- const rgw_user& owner,
+ const ACLOwner& owner,
const rgw_placement_rule *ptail_placement_rule,
uint64_t part_num,
const std::string& part_num_str) override;
@@ -546,7 +522,8 @@ protected:
virtual int delete_object(const DoutPrefixProvider* dpp,
optional_yield y,
uint32_t flags) override;
- virtual int copy_object(User* user,
+ virtual int copy_object(const ACLOwner& owner,
+ const rgw_user& remote_user,
req_info* info, const rgw_zone_id& source_zone,
rgw::sal::Object* dest_object, rgw::sal::Bucket* dest_bucket,
rgw::sal::Bucket* src_bucket,
@@ -587,9 +564,11 @@ protected:
virtual int dump_obj_layout(const DoutPrefixProvider *dpp, optional_yield y, Formatter* f) override;
/* Swift versioning */
- virtual int swift_versioning_restore(bool& restored,
+ virtual int swift_versioning_restore(const ACLOwner& owner,
+ const rgw_user& remote_user, bool& restored,
const DoutPrefixProvider* dpp, optional_yield y) override;
- virtual int swift_versioning_copy(const DoutPrefixProvider* dpp,
+ virtual int swift_versioning_copy(const ACLOwner& owner,
+ const rgw_user& remote_user, const DoutPrefixProvider* dpp,
optional_yield y) override;
/* OPs */
@@ -619,7 +598,7 @@ protected:
class DBAtomicWriter : public StoreWriter {
protected:
rgw::sal::DBStore* store;
- const rgw_user& owner;
+ const ACLOwner& owner;
const rgw_placement_rule *ptail_placement_rule;
uint64_t olh_epoch;
const std::string& unique_tag;
@@ -638,7 +617,7 @@ protected:
optional_yield y,
rgw::sal::Object* obj,
DBStore* _store,
- const rgw_user& _owner,
+ const ACLOwner& _owner,
const rgw_placement_rule *_ptail_placement_rule,
uint64_t _olh_epoch,
const std::string& _unique_tag);
@@ -665,7 +644,7 @@ protected:
class DBMultipartWriter : public StoreWriter {
protected:
rgw::sal::DBStore* store;
- const rgw_user& owner;
+ const ACLOwner& owner;
const rgw_placement_rule *ptail_placement_rule;
uint64_t olh_epoch;
rgw::sal::Object* head_obj;
@@ -688,7 +667,7 @@ public:
optional_yield y, MultipartUpload* upload,
rgw::sal::Object* obj,
DBStore* _store,
- const rgw_user& owner,
+ const ACLOwner& owner,
const rgw_placement_rule *ptail_placement_rule,
uint64_t part_num, const std::string& part_num_str);
~DBMultipartWriter() = default;
@@ -748,11 +727,135 @@ public:
virtual int get_user_by_access_key(const DoutPrefixProvider *dpp, const std::string& key, optional_yield y, std::unique_ptr<User>* user) override;
virtual int get_user_by_email(const DoutPrefixProvider *dpp, const std::string& email, optional_yield y, std::unique_ptr<User>* user) override;
virtual int get_user_by_swift(const DoutPrefixProvider *dpp, const std::string& user_str, optional_yield y, std::unique_ptr<User>* user) override;
+
+ int load_account_by_id(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view id,
+ RGWAccountInfo& info,
+ Attrs& attrs,
+ RGWObjVersionTracker& objv) override;
+ int load_account_by_name(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view tenant,
+ std::string_view name,
+ RGWAccountInfo& info,
+ Attrs& attrs,
+ RGWObjVersionTracker& objv) override;
+ int load_account_by_email(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view email,
+ RGWAccountInfo& info,
+ Attrs& attrs,
+ RGWObjVersionTracker& objv) override;
+ int store_account(const DoutPrefixProvider* dpp,
+ optional_yield y, bool exclusive,
+ const RGWAccountInfo& info,
+ const RGWAccountInfo* old_info,
+ const Attrs& attrs,
+ RGWObjVersionTracker& objv) override;
+ int delete_account(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ const RGWAccountInfo& info,
+ RGWObjVersionTracker& objv) override;
+
+ int load_stats(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ const rgw_owner& owner,
+ RGWStorageStats& stats,
+ ceph::real_time& last_synced,
+ ceph::real_time& last_updated) override;
+ int load_stats_async(const DoutPrefixProvider* dpp,
+ const rgw_owner& owner,
+ boost::intrusive_ptr<ReadStatsCB> cb) override;
+ int reset_stats(const DoutPrefixProvider *dpp,
+ optional_yield y,
+ const rgw_owner& owner) override;
+ int complete_flush_stats(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ const rgw_owner& owner) override;
+
+ int load_owner_by_email(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view email,
+ rgw_owner& owner) override;
+
+ int count_account_roles(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view account_id,
+ uint32_t& count) override;
+ int list_account_roles(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view account_id,
+ std::string_view path_prefix,
+ std::string_view marker,
+ uint32_t max_items,
+ RoleList& listing) override;
+
+ int load_account_user_by_name(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view account_id,
+ std::string_view tenant,
+ std::string_view username,
+ std::unique_ptr<User>* user) override;
+ int count_account_users(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view account_id,
+ uint32_t& count) override;
+ int list_account_users(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view account_id,
+ std::string_view tenant,
+ std::string_view path_prefix,
+ std::string_view marker,
+ uint32_t max_items,
+ UserList& listing) override;
+
+ int load_group_by_id(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view id,
+ RGWGroupInfo& info, Attrs& attrs,
+ RGWObjVersionTracker& objv) override;
+ int load_group_by_name(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view account_id,
+ std::string_view name,
+ RGWGroupInfo& info, Attrs& attrs,
+ RGWObjVersionTracker& objv) override;
+ int store_group(const DoutPrefixProvider* dpp, optional_yield y,
+ const RGWGroupInfo& info, const Attrs& attrs,
+ RGWObjVersionTracker& objv, bool exclusive,
+ const RGWGroupInfo* old_info) override;
+ int remove_group(const DoutPrefixProvider* dpp, optional_yield y,
+ const RGWGroupInfo& info,
+ RGWObjVersionTracker& objv) override;
+ int list_group_users(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view tenant,
+ std::string_view id,
+ std::string_view marker,
+ uint32_t max_items,
+ UserList& listing) override;
+ int count_account_groups(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view account_id,
+ uint32_t& count) override;
+ int list_account_groups(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view account_id,
+ std::string_view path_prefix,
+ std::string_view marker,
+ uint32_t max_items,
+ GroupList& listing) override;
+
virtual std::unique_ptr<Object> get_object(const rgw_obj_key& k) override;
virtual std::string get_cluster_id(const DoutPrefixProvider* dpp, optional_yield y);
std::unique_ptr<Bucket> get_bucket(const RGWBucketInfo& i) override;
int load_bucket(const DoutPrefixProvider *dpp, const rgw_bucket& b,
std::unique_ptr<Bucket>* bucket, optional_yield y) override;
+ int list_buckets(const DoutPrefixProvider *dpp,
+ const rgw_owner& owner, const std::string& tenant,
+ const std::string& marker, const std::string& end_marker,
+ uint64_t max, bool need_stats, BucketList& buckets, optional_yield y) override;
virtual bool is_meta_master() override;
virtual Zone* get_zone() { return &zone; }
virtual std::string zone_unique_id(uint64_t unique_num) override;
@@ -777,7 +880,14 @@ public:
std::string& _req_id,
optional_yield y) override;
- virtual RGWLC* get_rgwlc(void) override;
+ int list_account_topics(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view account_id,
+ std::string_view marker,
+ uint32_t max_items,
+ TopicList& listing) override;
+
+ virtual RGWLC* get_rgwlc(void) override;
virtual RGWCoroutinesManagerRegistry* get_cr_registry() override { return NULL; }
virtual int log_usage(const DoutPrefixProvider *dpp, std::map<rgw_user_bucket, RGWUsageBatch>& usage_info, optional_yield y) override;
virtual int log_op(const DoutPrefixProvider *dpp, std::string& oid, bufferlist& bl) override;
@@ -817,25 +927,42 @@ public:
std::unique_ptr<LuaManager> get_lua_manager(const std::string& luarocks_path) override;
virtual std::unique_ptr<RGWRole> get_role(std::string name,
std::string tenant,
+ rgw_account_id account_id,
std::string path="",
std::string trust_policy="",
+ std::string description="",
std::string max_session_duration_str="",
std::multimap<std::string,std::string> tags={}) override;
virtual std::unique_ptr<RGWRole> get_role(std::string id) override;
virtual std::unique_ptr<RGWRole> get_role(const RGWRoleInfo& info) override;
- virtual int get_roles(const DoutPrefixProvider *dpp,
+ int list_roles(const DoutPrefixProvider *dpp,
+ optional_yield y,
+ const std::string& tenant,
+ const std::string& path_prefix,
+ const std::string& marker,
+ uint32_t max_items,
+ RoleList& listing) override;
+ int store_oidc_provider(const DoutPrefixProvider *dpp,
+ optional_yield y,
+ const RGWOIDCProviderInfo& info,
+ bool exclusive) override;
+ int load_oidc_provider(const DoutPrefixProvider *dpp,
+ optional_yield y,
+ std::string_view tenant,
+ std::string_view url,
+ RGWOIDCProviderInfo& info) override;
+ int delete_oidc_provider(const DoutPrefixProvider *dpp,
+ optional_yield y,
+ std::string_view tenant,
+ std::string_view url) override;
+ virtual int get_oidc_providers(const DoutPrefixProvider* dpp,
optional_yield y,
- const std::string& path_prefix,
- const std::string& tenant,
- std::vector<std::unique_ptr<RGWRole>>& roles) override;
- virtual std::unique_ptr<RGWOIDCProvider> get_oidc_provider() override;
- virtual int get_oidc_providers(const DoutPrefixProvider *dpp,
- const std::string& tenant,
- std::vector<std::unique_ptr<RGWOIDCProvider>>& providers, optional_yield y) override;
+ std::string_view tenant,
+ std::vector<RGWOIDCProviderInfo>& providers) override;
virtual std::unique_ptr<Writer> get_append_writer(const DoutPrefixProvider *dpp,
optional_yield y,
rgw::sal::Object* obj,
- const rgw_user& owner,
+ const ACLOwner& owner,
const rgw_placement_rule *ptail_placement_rule,
const std::string& unique_tag,
uint64_t position,
@@ -843,7 +970,7 @@ public:
virtual std::unique_ptr<Writer> get_atomic_writer(const DoutPrefixProvider *dpp,
optional_yield y,
rgw::sal::Object* obj,
- const rgw_user& owner,
+ const ACLOwner& owner,
const rgw_placement_rule *ptail_placement_rule,
uint64_t olh_epoch,
const std::string& unique_tag) override;
diff --git a/src/rgw/rgw_sal_filter.cc b/src/rgw/rgw_sal_filter.cc
index 94b922acff4..66a2466040b 100644
--- a/src/rgw/rgw_sal_filter.cc
+++ b/src/rgw/rgw_sal_filter.cc
@@ -155,6 +155,216 @@ int FilterDriver::get_user_by_swift(const DoutPrefixProvider* dpp, const std::st
return 0;
}
+int FilterDriver::load_account_by_id(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view id,
+ RGWAccountInfo& info,
+ Attrs& attrs,
+ RGWObjVersionTracker& objv)
+{
+ return next->load_account_by_id(dpp, y, id, info, attrs, objv);
+}
+
+int FilterDriver::load_account_by_name(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view tenant,
+ std::string_view name,
+ RGWAccountInfo& info,
+ Attrs& attrs,
+ RGWObjVersionTracker& objv)
+{
+ return next->load_account_by_name(dpp, y, tenant, name, info, attrs, objv);
+}
+
+int FilterDriver::load_account_by_email(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view email,
+ RGWAccountInfo& info,
+ Attrs& attrs,
+ RGWObjVersionTracker& objv)
+{
+ return next->load_account_by_email(dpp, y, email, info, attrs, objv);
+}
+
+int FilterDriver::store_account(const DoutPrefixProvider* dpp,
+ optional_yield y, bool exclusive,
+ const RGWAccountInfo& info,
+ const RGWAccountInfo* old_info,
+ const Attrs& attrs,
+ RGWObjVersionTracker& objv)
+{
+ return next->store_account(dpp, y, exclusive, info, old_info, attrs, objv);
+}
+
+int FilterDriver::delete_account(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ const RGWAccountInfo& info,
+ RGWObjVersionTracker& objv)
+{
+ return next->delete_account(dpp, y, info, objv);
+}
+
+int FilterDriver::load_stats(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ const rgw_owner& owner,
+ RGWStorageStats& stats,
+ ceph::real_time& last_synced,
+ ceph::real_time& last_updated)
+{
+ return next->load_stats(dpp, y, owner, stats, last_synced, last_updated);
+}
+
+int FilterDriver::load_stats_async(const DoutPrefixProvider* dpp,
+ const rgw_owner& owner,
+ boost::intrusive_ptr<ReadStatsCB> cb)
+{
+ return next->load_stats_async(dpp, owner, std::move(cb));
+}
+
+int FilterDriver::reset_stats(const DoutPrefixProvider *dpp,
+ optional_yield y,
+ const rgw_owner& owner)
+{
+ return next->reset_stats(dpp, y, owner);
+}
+
+int FilterDriver::complete_flush_stats(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ const rgw_owner& owner)
+{
+ return next->complete_flush_stats(dpp, y, owner);
+}
+
+int FilterDriver::load_owner_by_email(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view email,
+ rgw_owner& owner)
+{
+ return next->load_owner_by_email(dpp, y, email, owner);
+}
+
+int FilterDriver::count_account_roles(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view account_id,
+ uint32_t& count)
+{
+ return next->count_account_roles(dpp, y, account_id, count);
+}
+
+int FilterDriver::list_account_roles(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view account_id,
+ std::string_view path_prefix,
+ std::string_view marker,
+ uint32_t max_items,
+ RoleList& listing)
+{
+ return next->list_account_roles(dpp, y, account_id, path_prefix,
+ marker, max_items, listing);
+}
+
+int FilterDriver::load_account_user_by_name(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view account_id,
+ std::string_view tenant,
+ std::string_view username,
+ std::unique_ptr<User>* user)
+{
+ std::unique_ptr<User> nu;
+ int ret = next->load_account_user_by_name(dpp, y, account_id, tenant,
+ username, &nu);
+ if (ret >= 0) {
+ *user = std::make_unique<FilterUser>(std::move(nu));
+ }
+ return ret;
+}
+
+int FilterDriver::count_account_users(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view account_id,
+ uint32_t& count)
+{
+ return next->count_account_users(dpp, y, account_id, count);
+}
+
+int FilterDriver::list_account_users(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view account_id,
+ std::string_view tenant,
+ std::string_view path_prefix,
+ std::string_view marker,
+ uint32_t max_items,
+ UserList& listing)
+{
+ return next->list_account_users(dpp, y, account_id, tenant, path_prefix,
+ marker, max_items, listing);
+}
+
+int FilterDriver::load_group_by_id(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view id,
+ RGWGroupInfo& info, Attrs& attrs,
+ RGWObjVersionTracker& objv)
+{
+ return next->load_group_by_id(dpp, y, id, info, attrs, objv);
+}
+
+int FilterDriver::load_group_by_name(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view account_id,
+ std::string_view name,
+ RGWGroupInfo& info, Attrs& attrs,
+ RGWObjVersionTracker& objv)
+{
+ return next->load_group_by_name(dpp, y, account_id, name, info, attrs, objv);
+}
+
+int FilterDriver::store_group(const DoutPrefixProvider* dpp, optional_yield y,
+ const RGWGroupInfo& info, const Attrs& attrs,
+ RGWObjVersionTracker& objv, bool exclusive,
+ const RGWGroupInfo* old_info)
+{
+ return next->store_group(dpp, y, info, attrs, objv, exclusive, old_info);
+}
+
+int FilterDriver::remove_group(const DoutPrefixProvider* dpp, optional_yield y,
+ const RGWGroupInfo& info,
+ RGWObjVersionTracker& objv)
+{
+ return next->remove_group(dpp, y, info, objv);
+}
+
+int FilterDriver::list_group_users(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view tenant,
+ std::string_view id,
+ std::string_view marker,
+ uint32_t max_items,
+ UserList& listing)
+{
+ return next->list_group_users(dpp, y, tenant, id, marker, max_items, listing);
+}
+
+int FilterDriver::count_account_groups(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view account_id,
+ uint32_t& count)
+{
+ return next->count_account_groups(dpp, y, account_id, count);
+}
+
+int FilterDriver::list_account_groups(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view account_id,
+ std::string_view path_prefix,
+ std::string_view marker,
+ uint32_t max_items,
+ GroupList& listing)
+{
+ return next->list_account_groups(dpp, y, account_id, path_prefix,
+ marker, max_items, listing);
+}
+
std::unique_ptr<Object> FilterDriver::get_object(const rgw_obj_key& k)
{
std::unique_ptr<Object> o = next->get_object(k);
@@ -175,6 +385,15 @@ int FilterDriver::load_bucket(const DoutPrefixProvider* dpp, const rgw_bucket& b
return ret;
}
+int FilterDriver::list_buckets(const DoutPrefixProvider* dpp,
+ const rgw_owner& owner, const std::string& tenant,
+ const std::string& marker, const std::string& end_marker,
+ uint64_t max, bool need_stats, BucketList &buckets, optional_yield y)
+{
+ return next->list_buckets(dpp, owner, tenant, marker, end_marker,
+ max, need_stats, buckets, y);
+}
+
bool FilterDriver::is_meta_master()
{
return next->is_meta_master();
@@ -389,12 +608,14 @@ std::unique_ptr<LuaManager> FilterDriver::get_lua_manager(const std::string& lua
std::unique_ptr<RGWRole> FilterDriver::get_role(std::string name,
std::string tenant,
+ rgw_account_id account_id,
std::string path,
std::string trust_policy,
+ std::string description,
std::string max_session_duration_str,
std::multimap<std::string,std::string> tags)
{
- return next->get_role(name, tenant, path, trust_policy, max_session_duration_str, tags);
+ return next->get_role(name, tenant, std::move(account_id), path, trust_policy, std::move(description), max_session_duration_str, tags);
}
std::unique_ptr<RGWRole> FilterDriver::get_role(std::string id)
@@ -407,31 +628,55 @@ std::unique_ptr<RGWRole> FilterDriver::get_role(const RGWRoleInfo& info)
return next->get_role(info);
}
-int FilterDriver::get_roles(const DoutPrefixProvider *dpp,
- optional_yield y,
- const std::string& path_prefix,
- const std::string& tenant,
- std::vector<std::unique_ptr<RGWRole>>& roles)
+int FilterDriver::list_roles(const DoutPrefixProvider *dpp,
+ optional_yield y,
+ const std::string& tenant,
+ const std::string& path_prefix,
+ const std::string& marker,
+ uint32_t max_items,
+ RoleList& listing)
+{
+ return next->list_roles(dpp, y, tenant, path_prefix,
+ marker, max_items, listing);
+}
+
+int FilterDriver::store_oidc_provider(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ const RGWOIDCProviderInfo& info,
+ bool exclusive)
+{
+ return next->store_oidc_provider(dpp, y, info, exclusive);
+}
+
+int FilterDriver::load_oidc_provider(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view tenant,
+ std::string_view url,
+ RGWOIDCProviderInfo& info)
{
- return next->get_roles(dpp, y, path_prefix, tenant, roles);
+ return next->load_oidc_provider(dpp, y, tenant, url, info);
}
-std::unique_ptr<RGWOIDCProvider> FilterDriver::get_oidc_provider()
+int FilterDriver::delete_oidc_provider(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view tenant,
+ std::string_view url)
{
- return next->get_oidc_provider();
+ return next->delete_oidc_provider(dpp, y, tenant, url);
}
-int FilterDriver::get_oidc_providers(const DoutPrefixProvider *dpp,
- const std::string& tenant,
- std::vector<std::unique_ptr<RGWOIDCProvider>>& providers, optional_yield y)
+int FilterDriver::get_oidc_providers(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view tenant,
+ std::vector<RGWOIDCProviderInfo>& providers)
{
- return next->get_oidc_providers(dpp, tenant, providers, y);
+ return next->get_oidc_providers(dpp, y, tenant, providers);
}
std::unique_ptr<Writer> FilterDriver::get_append_writer(const DoutPrefixProvider *dpp,
optional_yield y,
rgw::sal::Object* obj,
- const rgw_user& owner,
+ const ACLOwner& owner,
const rgw_placement_rule *ptail_placement_rule,
const std::string& unique_tag,
uint64_t position,
@@ -448,7 +693,7 @@ std::unique_ptr<Writer> FilterDriver::get_append_writer(const DoutPrefixProvider
std::unique_ptr<Writer> FilterDriver::get_atomic_writer(const DoutPrefixProvider *dpp,
optional_yield y,
rgw::sal::Object* obj,
- const rgw_user& owner,
+ const ACLOwner& owner,
const rgw_placement_rule *ptail_placement_rule,
uint64_t olh_epoch,
const std::string& unique_tag)
@@ -480,14 +725,6 @@ CephContext* FilterDriver::ctx(void)
return next->ctx();
}
-int FilterUser::list_buckets(const DoutPrefixProvider* dpp, const std::string& marker,
- const std::string& end_marker, uint64_t max,
- bool need_stats, BucketList &buckets, optional_yield y)
-{
- return next->list_buckets(dpp, marker, end_marker, max,
- need_stats, buckets, y);
-}
-
int FilterUser::read_attrs(const DoutPrefixProvider* dpp, optional_yield y)
{
return next->read_attrs(dpp, y);
@@ -499,24 +736,6 @@ int FilterUser::merge_and_store_attrs(const DoutPrefixProvider* dpp,
return next->merge_and_store_attrs(dpp, new_attrs, y);
}
-int FilterUser::read_stats(const DoutPrefixProvider *dpp,
- optional_yield y, RGWStorageStats* stats,
- ceph::real_time* last_stats_sync,
- ceph::real_time* last_stats_update)
-{
- return next->read_stats(dpp, y, stats, last_stats_sync, last_stats_update);
-}
-
-int FilterUser::read_stats_async(const DoutPrefixProvider *dpp, boost::intrusive_ptr<ReadStatsCB> cb)
-{
- return next->read_stats_async(dpp, cb);
-}
-
-int FilterUser::complete_flush_stats(const DoutPrefixProvider *dpp, optional_yield y)
-{
- return next->complete_flush_stats(dpp, y);
-}
-
int FilterUser::read_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch,
uint64_t end_epoch, uint32_t max_entries,
bool* is_truncated, RGWUsageIter& usage_iter,
@@ -553,6 +772,13 @@ int FilterUser::verify_mfa(const std::string& mfa_str, bool* verified,
return next->verify_mfa(mfa_str, verified, dpp, y);
}
+int FilterUser::list_groups(const DoutPrefixProvider* dpp, optional_yield y,
+ std::string_view marker, uint32_t max_items,
+ GroupList& listing)
+{
+ return next->list_groups(dpp, y, marker, max_items, listing);
+}
+
std::unique_ptr<Object> FilterBucket::get_object(const rgw_obj_key& k)
{
std::unique_ptr<Object> o = next->get_object(k);
@@ -617,10 +843,10 @@ int FilterBucket::read_stats_async(const DoutPrefixProvider *dpp,
return next->read_stats_async(dpp, idx_layout, shard_id, ctx);
}
-int FilterBucket::sync_user_stats(const DoutPrefixProvider *dpp, optional_yield y,
- RGWBucketEnt* ent)
+int FilterBucket::sync_owner_stats(const DoutPrefixProvider *dpp, optional_yield y,
+ RGWBucketEnt* ent)
{
- return next->sync_user_stats(dpp, y, ent);
+ return next->sync_owner_stats(dpp, y, ent);
}
int FilterBucket::check_bucket_shards(const DoutPrefixProvider* dpp,
@@ -629,7 +855,7 @@ int FilterBucket::check_bucket_shards(const DoutPrefixProvider* dpp,
return next->check_bucket_shards(dpp, num_objs, y);
}
-int FilterBucket::chown(const DoutPrefixProvider* dpp, const rgw_user& new_owner, optional_yield y)
+int FilterBucket::chown(const DoutPrefixProvider* dpp, const rgw_owner& new_owner, optional_yield y)
{
return next->chown(dpp, new_owner, y);
}
@@ -640,7 +866,7 @@ int FilterBucket::put_info(const DoutPrefixProvider* dpp, bool exclusive,
return next->put_info(dpp, exclusive, _mtime, y);
}
-const rgw_user& FilterBucket::get_owner() const
+const rgw_owner& FilterBucket::get_owner() const
{
return next->get_owner();
}
@@ -759,7 +985,8 @@ int FilterObject::delete_object(const DoutPrefixProvider* dpp,
return next->delete_object(dpp, y, flags);
}
-int FilterObject::copy_object(User* user,
+int FilterObject::copy_object(const ACLOwner& owner,
+ const rgw_user& remote_user,
req_info* info,
const rgw_zone_id& source_zone,
rgw::sal::Object* dest_object,
@@ -787,7 +1014,7 @@ int FilterObject::copy_object(User* user,
const DoutPrefixProvider* dpp,
optional_yield y)
{
- return next->copy_object(user, info, source_zone,
+ return next->copy_object(owner, remote_user, info, source_zone,
nextObject(dest_object),
nextBucket(dest_bucket),
nextBucket(src_bucket),
@@ -892,16 +1119,21 @@ void FilterObject::set_bucket(Bucket* b)
next->set_bucket(nextBucket(b));
};
-int FilterObject::swift_versioning_restore(bool& restored,
- const DoutPrefixProvider* dpp, optional_yield y)
+int FilterObject::swift_versioning_restore(const ACLOwner& owner,
+ const rgw_user& remote_user,
+ bool& restored,
+ const DoutPrefixProvider* dpp,
+ optional_yield y)
{
- return next->swift_versioning_restore(restored, dpp, y);
+ return next->swift_versioning_restore(owner, remote_user, restored, dpp, y);
}
-int FilterObject::swift_versioning_copy(const DoutPrefixProvider* dpp,
- optional_yield y)
+int FilterObject::swift_versioning_copy(const ACLOwner& owner,
+ const rgw_user& remote_user,
+ const DoutPrefixProvider* dpp,
+ optional_yield y)
{
- return next->swift_versioning_copy(dpp, y);
+ return next->swift_versioning_copy(owner, remote_user, dpp, y);
}
std::unique_ptr<Object::ReadOp> FilterObject::get_read_op()
@@ -1055,7 +1287,7 @@ std::unique_ptr<Writer> FilterMultipartUpload::get_writer(
const DoutPrefixProvider *dpp,
optional_yield y,
rgw::sal::Object* obj,
- const rgw_user& owner,
+ const ACLOwner& owner,
const rgw_placement_rule *ptail_placement_rule,
uint64_t part_num,
const std::string& part_num_str)
diff --git a/src/rgw/rgw_sal_filter.h b/src/rgw/rgw_sal_filter.h
index b07179aa44e..95d00960cbd 100644
--- a/src/rgw/rgw_sal_filter.h
+++ b/src/rgw/rgw_sal_filter.h
@@ -16,7 +16,6 @@
#pragma once
#include "rgw_sal.h"
-#include "rgw_oidc_provider.h"
#include "rgw_role.h"
namespace rgw { namespace sal {
@@ -156,10 +155,133 @@ public:
virtual int get_user_by_swift(const DoutPrefixProvider* dpp, const
std::string& user_str, optional_yield y,
std::unique_ptr<User>* user) override;
+
+ int load_account_by_id(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view id,
+ RGWAccountInfo& info,
+ Attrs& attrs,
+ RGWObjVersionTracker& objv) override;
+ int load_account_by_name(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view tenant,
+ std::string_view name,
+ RGWAccountInfo& info,
+ Attrs& attrs,
+ RGWObjVersionTracker& objv) override;
+ int load_account_by_email(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view email,
+ RGWAccountInfo& info,
+ Attrs& attrs,
+ RGWObjVersionTracker& objv) override;
+ int store_account(const DoutPrefixProvider* dpp,
+ optional_yield y, bool exclusive,
+ const RGWAccountInfo& info,
+ const RGWAccountInfo* old_info,
+ const Attrs& attrs,
+ RGWObjVersionTracker& objv) override;
+ int delete_account(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ const RGWAccountInfo& info,
+ RGWObjVersionTracker& objv) override;
+
+ int load_stats(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ const rgw_owner& owner,
+ RGWStorageStats& stats,
+ ceph::real_time& last_synced,
+ ceph::real_time& last_updated) override;
+ int load_stats_async(const DoutPrefixProvider* dpp,
+ const rgw_owner& owner,
+ boost::intrusive_ptr<ReadStatsCB> cb) override;
+ int reset_stats(const DoutPrefixProvider *dpp,
+ optional_yield y,
+ const rgw_owner& owner) override;
+ int complete_flush_stats(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ const rgw_owner& owner) override;
+ int load_owner_by_email(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view email,
+ rgw_owner& owner) override;
+ int count_account_roles(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view account_id,
+ uint32_t& count) override;
+ int list_account_roles(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view account_id,
+ std::string_view path_prefix,
+ std::string_view marker,
+ uint32_t max_items,
+ RoleList& listing) override;
+ int load_account_user_by_name(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view account_id,
+ std::string_view tenant,
+ std::string_view username,
+ std::unique_ptr<User>* user) override;
+ int count_account_users(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view account_id,
+ uint32_t& count) override;
+ int list_account_users(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view account_id,
+ std::string_view tenant,
+ std::string_view path_prefix,
+ std::string_view marker,
+ uint32_t max_items,
+ UserList& listing) override;
+
+ int load_group_by_id(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view id,
+ RGWGroupInfo& info, Attrs& attrs,
+ RGWObjVersionTracker& objv) override;
+ int load_group_by_name(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view account_id,
+ std::string_view name,
+ RGWGroupInfo& info, Attrs& attrs,
+ RGWObjVersionTracker& objv) override;
+ int store_group(const DoutPrefixProvider* dpp, optional_yield y,
+ const RGWGroupInfo& info, const Attrs& attrs,
+ RGWObjVersionTracker& objv, bool exclusive,
+ const RGWGroupInfo* old_info) override;
+ int remove_group(const DoutPrefixProvider* dpp, optional_yield y,
+ const RGWGroupInfo& info,
+ RGWObjVersionTracker& objv) override;
+ int list_group_users(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view tenant,
+ std::string_view id,
+ std::string_view marker,
+ uint32_t max_items,
+ UserList& listing) override;
+ int count_account_groups(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view account_id,
+ uint32_t& count) override;
+ int list_account_groups(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view account_id,
+ std::string_view path_prefix,
+ std::string_view marker,
+ uint32_t max_items,
+ GroupList& listing) override;
+
virtual std::unique_ptr<Object> get_object(const rgw_obj_key& k) override;
std::unique_ptr<Bucket> get_bucket(const RGWBucketInfo& i) override;
int load_bucket(const DoutPrefixProvider* dpp, const rgw_bucket& b,
std::unique_ptr<Bucket>* bucket, optional_yield y) override;
+ int list_buckets(const DoutPrefixProvider* dpp,
+ const rgw_owner& owner, const std::string& tenant,
+ const std::string& marker, const std::string& end_marker,
+ uint64_t max, bool need_stats, BucketList& buckets,
+ optional_yield y) override;
+
virtual bool is_meta_master() override;
virtual Zone* get_zone() override { return zone.get(); }
virtual std::string zone_unique_id(uint64_t unique_num) override;
@@ -222,6 +344,15 @@ public:
const DoutPrefixProvider* dpp) override {
return next->remove_topic_v2(topic_name, tenant, objv_tracker, y, dpp);
}
+ int list_account_topics(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view account_id,
+ std::string_view marker,
+ uint32_t max_items,
+ TopicList& listing) override {
+ return next->list_account_topics(dpp, y, account_id, marker,
+ max_items, listing);
+ }
int update_bucket_topic_mapping(const rgw_pubsub_topic& topic,
const std::string& bucket_key,
bool add_mapping,
@@ -298,27 +429,42 @@ public:
virtual std::unique_ptr<LuaManager> get_lua_manager(const std::string& luarocks_path) override;
virtual std::unique_ptr<RGWRole> get_role(std::string name,
std::string tenant,
+ rgw_account_id account_id,
std::string path="",
std::string trust_policy="",
- std::string
- max_session_duration_str="",
+ std::string description="",
+ std::string max_session_duration_str="",
std::multimap<std::string,std::string> tags={}) override;
virtual std::unique_ptr<RGWRole> get_role(std::string id) override;
virtual std::unique_ptr<RGWRole> get_role(const RGWRoleInfo& info) override;
- virtual int get_roles(const DoutPrefixProvider *dpp,
- optional_yield y,
- const std::string& path_prefix,
- const std::string& tenant,
- std::vector<std::unique_ptr<RGWRole>>& roles) override;
- virtual std::unique_ptr<RGWOIDCProvider> get_oidc_provider() override;
- virtual int get_oidc_providers(const DoutPrefixProvider *dpp,
- const std::string& tenant,
- std::vector<std::unique_ptr<RGWOIDCProvider>>&
- providers, optional_yield y) override;
+ virtual int list_roles(const DoutPrefixProvider *dpp,
+ optional_yield y,
+ const std::string& tenant,
+ const std::string& path_prefix,
+ const std::string& marker,
+ uint32_t max_items,
+ RoleList& listing) override;
+ int store_oidc_provider(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ const RGWOIDCProviderInfo& info,
+ bool exclusive) override;
+ int load_oidc_provider(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view tenant,
+ std::string_view url,
+ RGWOIDCProviderInfo& info) override;
+ int delete_oidc_provider(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view tenant,
+ std::string_view url) override;
+ int get_oidc_providers(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view tenant,
+ std::vector<RGWOIDCProviderInfo>& providers) override;
virtual std::unique_ptr<Writer> get_append_writer(const DoutPrefixProvider *dpp,
optional_yield y,
rgw::sal::Object* obj,
- const rgw_user& owner,
+ const ACLOwner& owner,
const rgw_placement_rule
*ptail_placement_rule,
const std::string& unique_tag,
@@ -327,7 +473,7 @@ public:
virtual std::unique_ptr<Writer> get_atomic_writer(const DoutPrefixProvider *dpp,
optional_yield y,
rgw::sal::Object* obj,
- const rgw_user& owner,
+ const ACLOwner& owner,
const rgw_placement_rule *ptail_placement_rule,
uint64_t olh_epoch,
const std::string& unique_tag) override;
@@ -356,11 +502,6 @@ public:
virtual std::unique_ptr<User> clone() override {
return std::make_unique<FilterUser>(*this);
}
- virtual int list_buckets(const DoutPrefixProvider* dpp,
- const std::string& marker, const std::string& end_marker,
- uint64_t max, bool need_stats, BucketList& buckets,
- optional_yield y) override;
-
virtual std::string& get_display_name() override { return next->get_display_name(); }
virtual const std::string& get_tenant() override { return next->get_tenant(); }
virtual void set_tenant(std::string& _t) override { next->set_tenant(_t); }
@@ -382,13 +523,6 @@ public:
virtual int read_attrs(const DoutPrefixProvider* dpp, optional_yield y) override;
virtual int merge_and_store_attrs(const DoutPrefixProvider* dpp, Attrs&
new_attrs, optional_yield y) override;
- virtual int read_stats(const DoutPrefixProvider *dpp,
- optional_yield y, RGWStorageStats* stats,
- ceph::real_time* last_stats_sync = nullptr,
- ceph::real_time* last_stats_update = nullptr) override;
- virtual int read_stats_async(const DoutPrefixProvider *dpp,
- boost::intrusive_ptr<ReadStatsCB> cb) override;
- virtual int complete_flush_stats(const DoutPrefixProvider *dpp, optional_yield y) override;
virtual int read_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch,
uint64_t end_epoch, uint32_t max_entries,
bool* is_truncated, RGWUsageIter& usage_iter,
@@ -402,6 +536,9 @@ public:
virtual int remove_user(const DoutPrefixProvider* dpp, optional_yield y) override;
virtual int verify_mfa(const std::string& mfa_str, bool* verified,
const DoutPrefixProvider* dpp, optional_yield y) override;
+ int list_groups(const DoutPrefixProvider* dpp, optional_yield y,
+ std::string_view marker, uint32_t max_items,
+ GroupList& listing) override;
RGWUserInfo& get_info() override { return next->get_info(); }
virtual void print(std::ostream& out) const override { return next->print(out); }
@@ -447,15 +584,15 @@ public:
virtual int read_stats_async(const DoutPrefixProvider *dpp,
const bucket_index_layout_generation& idx_layout,
int shard_id, boost::intrusive_ptr<ReadStatsCB> ctx) override;
- int sync_user_stats(const DoutPrefixProvider *dpp, optional_yield y,
- RGWBucketEnt* ent) override;
+ int sync_owner_stats(const DoutPrefixProvider *dpp, optional_yield y,
+ RGWBucketEnt* ent) override;
int check_bucket_shards(const DoutPrefixProvider* dpp,
uint64_t num_objs, optional_yield y) override;
- virtual int chown(const DoutPrefixProvider* dpp, const rgw_user& new_owner,
+ virtual int chown(const DoutPrefixProvider* dpp, const rgw_owner& new_owner,
optional_yield y) override;
virtual int put_info(const DoutPrefixProvider* dpp, bool exclusive,
ceph::real_time mtime, optional_yield y) override;
- virtual const rgw_user& get_owner() const override;
+ virtual const rgw_owner& get_owner() const override;
virtual int check_empty(const DoutPrefixProvider* dpp, optional_yield y) override;
virtual int check_quota(const DoutPrefixProvider *dpp, RGWQuota& quota,
uint64_t obj_size, optional_yield y,
@@ -585,7 +722,8 @@ public:
virtual int delete_object(const DoutPrefixProvider* dpp,
optional_yield y,
uint32_t flags) override;
- virtual int copy_object(User* user,
+ virtual int copy_object(const ACLOwner& owner,
+ const rgw_user& remote_user,
req_info* info, const rgw_zone_id& source_zone,
rgw::sal::Object* dest_object, rgw::sal::Bucket* dest_bucket,
rgw::sal::Bucket* src_bucket,
@@ -673,10 +811,15 @@ public:
virtual bool have_instance(void) override { return next->have_instance(); }
virtual void clear_instance() override { return next->clear_instance(); }
- virtual int swift_versioning_restore(bool& restored, /* out */
- const DoutPrefixProvider* dpp, optional_yield y) override;
- virtual int swift_versioning_copy(const DoutPrefixProvider* dpp,
- optional_yield y) override;
+ virtual int swift_versioning_restore(const ACLOwner& owner,
+ const rgw_user& remote_user,
+ bool& restored,
+ const DoutPrefixProvider* dpp,
+ optional_yield y) override;
+ virtual int swift_versioning_copy(const ACLOwner& owner,
+ const rgw_user& remote_user,
+ const DoutPrefixProvider* dpp,
+ optional_yield y) override;
virtual std::unique_ptr<ReadOp> get_read_op() override;
virtual std::unique_ptr<DeleteOp> get_delete_op() override;
@@ -767,7 +910,7 @@ public:
virtual std::unique_ptr<Writer> get_writer(const DoutPrefixProvider *dpp,
optional_yield y,
rgw::sal::Object* obj,
- const rgw_user& owner,
+ const ACLOwner& owner,
const rgw_placement_rule *ptail_placement_rule,
uint64_t part_num,
const std::string& part_num_str) override;
diff --git a/src/rgw/rgw_sal_fwd.h b/src/rgw/rgw_sal_fwd.h
index 123d17a5162..e447cba4aae 100644
--- a/src/rgw/rgw_sal_fwd.h
+++ b/src/rgw/rgw_sal_fwd.h
@@ -33,8 +33,9 @@ namespace sal {
class Driver;
class User;
+ struct UserList;
class Bucket;
- class BucketList;
+ struct BucketList;
class Object;
class MultipartUpload;
class Lifecycle;
@@ -45,6 +46,10 @@ namespace sal {
class Zone;
class LuaManager;
struct RGWRoleInfo;
+ class RGWRole;
+ struct RoleList;
+ struct GroupList;
+ struct TopicList;
class DataProcessor;
class ObjectProcessor;
class ReadStatsCB;
diff --git a/src/rgw/rgw_sal_store.h b/src/rgw/rgw_sal_store.h
index 1ba44bc02ec..8bfff802526 100644
--- a/src/rgw/rgw_sal_store.h
+++ b/src/rgw/rgw_sal_store.h
@@ -135,7 +135,7 @@ class StoreBucket : public Bucket {
virtual Attrs& get_attrs(void) override { return attrs; }
virtual int set_attrs(Attrs a) override { attrs = a; return 0; }
- virtual const rgw_user& get_owner() const override { return info.owner; };
+ virtual const rgw_owner& get_owner() const override { return info.owner; }
virtual bool empty() const override { return info.bucket.name.empty(); }
virtual const std::string& get_name() const override { return info.bucket.name; }
virtual const std::string& get_tenant() const override { return info.bucket.tenant; }
diff --git a/src/rgw/rgw_signal.cc b/src/rgw/rgw_signal.cc
index 4bb29d0df68..e7a6de5190c 100644
--- a/src/rgw/rgw_signal.cc
+++ b/src/rgw/rgw_signal.cc
@@ -33,6 +33,10 @@ static int signal_fd[2] = {0, 0};
namespace rgw {
namespace signal {
+void sig_handler_noop(int signum) {
+ /* NOP */
+} /* sig_handler_noop */
+
void sighup_handler(int signum) {
if (rgw::AppMain::ops_log_file != nullptr) {
rgw::AppMain::ops_log_file->reopen();
diff --git a/src/rgw/rgw_signal.h b/src/rgw/rgw_signal.h
index 68fc4f614a3..3cd09e7645e 100644
--- a/src/rgw/rgw_signal.h
+++ b/src/rgw/rgw_signal.h
@@ -19,6 +19,7 @@
namespace rgw {
namespace signal {
+void sig_handler_noop(int signum);
void signal_shutdown();
void wait_shutdown();
int signal_fd_init();
diff --git a/src/rgw/rgw_sts.cc b/src/rgw/rgw_sts.cc
index 557bcf24f2b..1486868e1e1 100644
--- a/src/rgw/rgw_sts.cc
+++ b/src/rgw/rgw_sts.cc
@@ -18,6 +18,7 @@
#include "include/types.h"
#include "rgw_string.h"
+#include "rgw_account.h"
#include "rgw_b64.h"
#include "rgw_common.h"
#include "rgw_tools.h"
@@ -290,7 +291,15 @@ std::tuple<int, rgw::sal::RGWRole*> STSService::getRoleInfo(const DoutPrefixProv
if (auto r_arn = rgw::ARN::parse(arn); r_arn) {
auto pos = r_arn->resource.find_last_of('/');
string roleName = r_arn->resource.substr(pos + 1);
- std::unique_ptr<rgw::sal::RGWRole> role = driver->get_role(roleName, r_arn->account);
+ string tenant = r_arn->account;
+
+ rgw_account_id account;
+ if (rgw::account::validate_id(tenant)) {
+ account = std::move(tenant);
+ tenant.clear();
+ }
+
+ std::unique_ptr<rgw::sal::RGWRole> role = driver->get_role(roleName, tenant, account);
if (int ret = role->get(dpp, y); ret < 0) {
if (ret == -ENOENT) {
ldpp_dout(dpp, 0) << "Role doesn't exist: " << roleName << dendl;
diff --git a/src/rgw/rgw_swift_auth.cc b/src/rgw/rgw_swift_auth.cc
index aa31adc1d0a..af60a0e275d 100644
--- a/src/rgw/rgw_swift_auth.cc
+++ b/src/rgw/rgw_swift_auth.cc
@@ -124,11 +124,16 @@ void TempURLEngine::get_owner_info(const DoutPrefixProvider* dpp, const req_stat
throw ret;
}
+ const rgw_user* uid = std::get_if<rgw_user>(&bucket->get_info().owner);
+ if (!uid) {
+ throw -EPERM;
+ }
+
ldpp_dout(dpp, 20) << "temp url user (bucket owner): " << bucket->get_info().owner
<< dendl;
std::unique_ptr<rgw::sal::User> user;
- user = driver->get_user(bucket->get_info().owner);
+ user = driver->get_user(*uid);
if (user->load_user(dpp, s->yield) < 0) {
throw -EPERM;
}
@@ -508,9 +513,18 @@ ExternalTokenEngine::authenticate(const DoutPrefixProvider* dpp,
throw ret;
}
- auto apl = apl_factory->create_apl_local(cct, s, user->get_info(),
- extract_swift_subuser(swift_user),
- std::nullopt, rgw::auth::LocalApplier::NO_ACCESS_KEY);
+ std::optional<RGWAccountInfo> account;
+ std::vector<IAM::Policy> policies;
+ ret = load_account_and_policies(dpp, y, driver, user->get_info(),
+ user->get_attrs(), account, policies);
+ if (ret < 0) {
+ return result_t::deny(-EPERM);
+ }
+
+ auto apl = apl_factory->create_apl_local(
+ cct, s, user->get_info(), std::move(account),
+ std::move(policies), extract_swift_subuser(swift_user),
+ std::nullopt, LocalApplier::NO_ACCESS_KEY);
return result_t::grant(std::move(apl));
}
@@ -628,6 +642,14 @@ SignedTokenEngine::authenticate(const DoutPrefixProvider* dpp,
throw ret;
}
+ std::optional<RGWAccountInfo> account;
+ std::vector<IAM::Policy> policies;
+ ret = load_account_and_policies(dpp, s->yield, driver, user->get_info(),
+ user->get_attrs(), account, policies);
+ if (ret < 0) {
+ return result_t::deny(-EPERM);
+ }
+
ldpp_dout(dpp, 10) << "swift_user=" << swift_user << dendl;
const auto siter = user->get_info().swift_keys.find(swift_user);
@@ -662,9 +684,10 @@ SignedTokenEngine::authenticate(const DoutPrefixProvider* dpp,
return result_t::deny(-EPERM);
}
- auto apl = apl_factory->create_apl_local(cct, s, user->get_info(),
- extract_swift_subuser(swift_user),
- std::nullopt, rgw::auth::LocalApplier::NO_ACCESS_KEY);
+ auto apl = apl_factory->create_apl_local(
+ cct, s, user->get_info(), std::move(account),
+ std::move(policies), extract_swift_subuser(swift_user),
+ std::nullopt, LocalApplier::NO_ACCESS_KEY);
return result_t::grant(std::move(apl));
}
diff --git a/src/rgw/rgw_swift_auth.h b/src/rgw/rgw_swift_auth.h
index 3564a6b39b5..9049c54f5ca 100644
--- a/src/rgw/rgw_swift_auth.h
+++ b/src/rgw/rgw_swift_auth.h
@@ -24,8 +24,9 @@ class TempURLApplier : public rgw::auth::LocalApplier {
public:
TempURLApplier(CephContext* const cct,
const RGWUserInfo& user_info)
- : LocalApplier(cct, user_info, LocalApplier::NO_SUBUSER, std::nullopt, LocalApplier::NO_ACCESS_KEY) {
- };
+ : LocalApplier(cct, user_info, std::nullopt, {}, LocalApplier::NO_SUBUSER,
+ std::nullopt, LocalApplier::NO_ACCESS_KEY)
+ {}
void modify_request_state(const DoutPrefixProvider* dpp, req_state * s) const override; /* in/out */
void write_ops_log_entry(rgw_log_entry& entry) const override;
@@ -155,10 +156,14 @@ class SwiftAnonymousApplier : public rgw::auth::LocalApplier {
public:
SwiftAnonymousApplier(CephContext* const cct,
const RGWUserInfo& user_info)
- : LocalApplier(cct, user_info, LocalApplier::NO_SUBUSER, std::nullopt, LocalApplier::NO_ACCESS_KEY) {
+ : LocalApplier(cct, user_info, std::nullopt, {}, LocalApplier::NO_SUBUSER,
+ std::nullopt, LocalApplier::NO_ACCESS_KEY) {
+ }
+ bool is_admin_of(const rgw_owner& o) const {return false;}
+ bool is_owner_of(const rgw_owner& o) const {
+ auto* uid = std::get_if<rgw_user>(&o);
+ return uid && uid->id == RGW_USER_ANON_ID;
}
- bool is_admin_of(const rgw_user& uid) const {return false;}
- bool is_owner_of(const rgw_user& uid) const {return uid.id.compare(RGW_USER_ANON_ID) == 0;}
};
class SwiftAnonymousEngine : public rgw::auth::AnonymousEngine {
@@ -234,13 +239,16 @@ class DefaultStrategy : public rgw::auth::Strategy,
aplptr_t create_apl_local(CephContext* const cct,
const req_state* const s,
const RGWUserInfo& user_info,
+ std::optional<RGWAccountInfo> account,
+ std::vector<IAM::Policy> policies,
const std::string& subuser,
const std::optional<uint32_t>& perm_mask,
const std::string& access_key_id) const override {
auto apl = \
rgw::auth::add_3rdparty(driver, rgw_user(s->account_name),
rgw::auth::add_sysreq(cct, driver, s,
- rgw::auth::LocalApplier(cct, user_info, subuser, perm_mask, access_key_id)));
+ LocalApplier(cct, user_info, std::move(account), std::move(policies),
+ subuser, perm_mask, access_key_id)));
/* TODO(rzarzynski): replace with static_ptr. */
return aplptr_t(new decltype(apl)(std::move(apl)));
}
diff --git a/src/rgw/rgw_user.cc b/src/rgw/rgw_user.cc
index 02403e5f342..6636d8bea19 100644
--- a/src/rgw/rgw_user.cc
+++ b/src/rgw/rgw_user.cc
@@ -13,18 +13,19 @@
using namespace std;
-int rgw_user_sync_all_stats(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver,
- rgw::sal::User* user, optional_yield y)
+int rgw_sync_all_stats(const DoutPrefixProvider *dpp,
+ optional_yield y, rgw::sal::Driver* driver,
+ const rgw_owner& owner, const std::string& tenant)
{
size_t max_entries = dpp->get_cct()->_conf->rgw_list_buckets_max_chunk;
rgw::sal::BucketList listing;
int ret = 0;
do {
- ret = user->list_buckets(dpp, listing.next_marker, string(),
- max_entries, false, listing, y);
+ ret = driver->list_buckets(dpp, owner, tenant, listing.next_marker,
+ string(), max_entries, false, listing, y);
if (ret < 0) {
- ldpp_dout(dpp, 0) << "failed to read user buckets: ret=" << ret << dendl;
+ ldpp_dout(dpp, 0) << "failed to list buckets: " << cpp_strerror(ret) << dendl;
return ret;
}
@@ -35,7 +36,7 @@ int rgw_user_sync_all_stats(const DoutPrefixProvider *dpp, rgw::sal::Driver* dri
ldpp_dout(dpp, 0) << "ERROR: could not read bucket info: bucket=" << bucket << " ret=" << ret << dendl;
continue;
}
- ret = bucket->sync_user_stats(dpp, y, &ent);
+ ret = bucket->sync_owner_stats(dpp, y, &ent);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: could not sync bucket stats: ret=" << ret << dendl;
return ret;
@@ -47,9 +48,9 @@ int rgw_user_sync_all_stats(const DoutPrefixProvider *dpp, rgw::sal::Driver* dri
}
} while (!listing.next_marker.empty());
- ret = user->complete_flush_stats(dpp, y);
+ ret = driver->complete_flush_stats(dpp, y, owner);
if (ret < 0) {
- cerr << "ERROR: failed to complete syncing user stats: ret=" << ret << std::endl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to complete syncing owner stats: ret=" << ret << dendl;
return ret;
}
@@ -66,8 +67,9 @@ int rgw_user_get_all_buckets_stats(const DoutPrefixProvider *dpp,
rgw::sal::BucketList listing;
do {
- int ret = user->list_buckets(dpp, listing.next_marker, string(),
- max_entries, false, listing, y);
+ int ret = driver->list_buckets(dpp, user->get_id(), user->get_tenant(),
+ listing.next_marker, string(),
+ max_entries, false, listing, y);
if (ret < 0) {
ldpp_dout(dpp, 0) << "failed to read user buckets: ret=" << ret << dendl;
return ret;
diff --git a/src/rgw/rgw_user_types.h b/src/rgw/rgw_user_types.h
index 1aaf4cfa5d3..2329eca3d60 100644
--- a/src/rgw/rgw_user_types.h
+++ b/src/rgw/rgw_user_types.h
@@ -19,12 +19,23 @@
#pragma once
-#include <string_view>
+#include <iosfwd>
+#include <string>
+#include <variant>
#include <fmt/format.h>
#include "common/dout.h"
#include "common/Formatter.h"
+// strong typedef to std::string
+struct rgw_account_id : std::string {
+ using std::string::string;
+ using std::string::operator=;
+ explicit rgw_account_id(const std::string& s) : std::string(s) {}
+};
+void encode_json_impl(const char* name, const rgw_account_id& id, Formatter* f);
+void decode_json_obj(rgw_account_id& id, JSONObj* obj);
+
struct rgw_user {
// note: order of member variables matches the sort order of operator<=>
std::string tenant;
@@ -125,3 +136,19 @@ struct rgw_user {
static void generate_test_instances(std::list<rgw_user*>& o);
};
WRITE_CLASS_ENCODER(rgw_user)
+
+
+/// Resources are either owned by accounts, or by users or roles (represented as
+/// rgw_user) that don't belong to an account.
+///
+/// This variant is present in binary encoding formats, so existing types cannot
+/// be changed or removed. New types can only be added to the end.
+using rgw_owner = std::variant<rgw_user, rgw_account_id>;
+
+rgw_owner parse_owner(const std::string& str);
+std::string to_string(const rgw_owner& o);
+
+std::ostream& operator<<(std::ostream& out, const rgw_owner& o);
+
+void encode_json_impl(const char *name, const rgw_owner& o, ceph::Formatter *f);
+void decode_json_obj(rgw_owner& o, JSONObj *obj);
diff --git a/src/rgw/rgw_xml.cc b/src/rgw/rgw_xml.cc
index 1bcbcdad245..3ce031c2faa 100644
--- a/src/rgw/rgw_xml.cc
+++ b/src/rgw/rgw_xml.cc
@@ -431,6 +431,20 @@ void decode_xml_obj(utime_t& val, XMLObj *obj)
}
}
+void decode_xml_obj(ceph::real_time& val, XMLObj *obj)
+{
+ const std::string s = obj->get_data();
+ uint64_t epoch;
+ uint64_t nsec;
+ int r = utime_t::parse_date(s, &epoch, &nsec);
+ if (r == 0) {
+ using namespace std::chrono;
+ val = real_time{seconds(epoch) + nanoseconds(nsec)};
+ } else {
+ throw RGWXMLDecoder::err("failed to decode real_time");
+ }
+}
+
void encode_xml(const char *name, const string& val, Formatter *f)
{
f->dump_string(name, val);
diff --git a/src/rgw/rgw_xml.h b/src/rgw/rgw_xml.h
index 5d3e7278952..8e2a281b649 100644
--- a/src/rgw/rgw_xml.h
+++ b/src/rgw/rgw_xml.h
@@ -9,6 +9,7 @@
#include <iosfwd>
#include <include/types.h>
#include <common/Formatter.h>
+#include "common/ceph_time.h"
class XMLObj;
class RGWXMLParser;
@@ -190,6 +191,7 @@ void decode_xml_obj(bool& val, XMLObj *obj);
void decode_xml_obj(bufferlist& val, XMLObj *obj);
class utime_t;
void decode_xml_obj(utime_t& val, XMLObj *obj);
+void decode_xml_obj(ceph::real_time& val, XMLObj *obj);
template<class T>
void decode_xml_obj(std::optional<T>& val, XMLObj *obj)
diff --git a/src/rgw/rgw_zone.cc b/src/rgw/rgw_zone.cc
index ed438dead93..8d8b44cd961 100644
--- a/src/rgw/rgw_zone.cc
+++ b/src/rgw/rgw_zone.cc
@@ -298,6 +298,8 @@ void RGWZoneParams::decode_json(JSONObj *obj)
JSONDecoder::decode_json("otp_pool", otp_pool, obj);
JSONDecoder::decode_json("notif_pool", notif_pool, obj);
JSONDecoder::decode_json("topics_pool", topics_pool, obj);
+ JSONDecoder::decode_json("account_pool", account_pool, obj);
+ JSONDecoder::decode_json("group_pool", group_pool, obj);
JSONDecoder::decode_json("system_key", system_key, obj);
JSONDecoder::decode_json("placement_pools", placement_pools, obj);
JSONDecoder::decode_json("tier_config", tier_config, obj);
@@ -323,6 +325,8 @@ void RGWZoneParams::dump(Formatter *f) const
encode_json("otp_pool", otp_pool, f);
encode_json("notif_pool", notif_pool, f);
encode_json("topics_pool", topics_pool, f);
+ encode_json("account_pool", account_pool, f);
+ encode_json("group_pool", group_pool, f);
encode_json_plain("system_key", system_key, f);
encode_json("placement_pools", placement_pools, f);
encode_json("tier_config", tier_config, f);
@@ -482,6 +486,8 @@ void add_zone_pools(const RGWZoneParams& info,
pools.insert(info.oidc_pool);
pools.insert(info.notif_pool);
pools.insert(info.topics_pool);
+ pools.insert(info.account_pool);
+ pools.insert(info.group_pool);
for (const auto& [pname, placement] : info.placement_pools) {
pools.insert(placement.index_pool);
@@ -587,6 +593,8 @@ int RGWZoneParams::fix_pool_names(const DoutPrefixProvider *dpp, optional_yield
oidc_pool = fix_zone_pool_dup(pools, name, ".rgw.meta:oidc", oidc_pool);
notif_pool = fix_zone_pool_dup(pools, name ,".rgw.log:notif", notif_pool);
topics_pool = fix_zone_pool_dup(pools, name, ".rgw.meta:topics", topics_pool);
+ account_pool = fix_zone_pool_dup(pools, name, ".rgw.meta:accounts", account_pool);
+ group_pool = fix_zone_pool_dup(pools, name, ".rgw.meta:groups", group_pool);
for(auto& iter : placement_pools) {
iter.second.index_pool = fix_zone_pool_dup(pools, name, "." + default_bucket_index_pool_suffix,
@@ -1250,6 +1258,8 @@ int init_zone_pool_names(const DoutPrefixProvider *dpp, optional_yield y,
info.notif_pool = fix_zone_pool_dup(pools, info.name, ".rgw.log:notif", info.notif_pool);
info.topics_pool =
fix_zone_pool_dup(pools, info.name, ".rgw.meta:topics", info.topics_pool);
+ info.account_pool = fix_zone_pool_dup(pools, info.name, ".rgw.meta:accounts", info.account_pool);
+ info.group_pool = fix_zone_pool_dup(pools, info.name, ".rgw.meta:groups", info.group_pool);
for (auto& [pname, placement] : info.placement_pools) {
placement.index_pool = fix_zone_pool_dup(pools, info.name, "." + default_bucket_index_pool_suffix, placement.index_pool);
diff --git a/src/rgw/services/svc_bucket.h b/src/rgw/services/svc_bucket.h
index caf6e029452..5963c54171f 100644
--- a/src/rgw/services/svc_bucket.h
+++ b/src/rgw/services/svc_bucket.h
@@ -104,7 +104,7 @@ public:
const DoutPrefixProvider *dpp) = 0;
virtual int read_buckets_stats(RGWSI_Bucket_X_Ctx& ctx,
- std::map<std::string, RGWBucketEnt>& m,
+ std::vector<RGWBucketEnt>& buckets,
optional_yield y,
const DoutPrefixProvider *dpp) = 0;
};
diff --git a/src/rgw/services/svc_bucket_sobj.cc b/src/rgw/services/svc_bucket_sobj.cc
index 9927cc2d3a9..41e7b02e175 100644
--- a/src/rgw/services/svc_bucket_sobj.cc
+++ b/src/rgw/services/svc_bucket_sobj.cc
@@ -626,13 +626,11 @@ int RGWSI_Bucket_SObj::read_bucket_stats(RGWSI_Bucket_X_Ctx& ctx,
}
int RGWSI_Bucket_SObj::read_buckets_stats(RGWSI_Bucket_X_Ctx& ctx,
- map<string, RGWBucketEnt>& m,
+ std::vector<RGWBucketEnt>& buckets,
optional_yield y,
const DoutPrefixProvider *dpp)
{
- map<string, RGWBucketEnt>::iterator iter;
- for (iter = m.begin(); iter != m.end(); ++iter) {
- RGWBucketEnt& ent = iter->second;
+ for (auto& ent : buckets) {
int r = read_bucket_stats(ctx, ent.bucket, &ent, y, dpp);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: " << __func__ << "(): read_bucket_stats returned r=" << r << dendl;
@@ -640,5 +638,5 @@ int RGWSI_Bucket_SObj::read_buckets_stats(RGWSI_Bucket_X_Ctx& ctx,
}
}
- return m.size();
+ return buckets.size();
}
diff --git a/src/rgw/services/svc_bucket_sobj.h b/src/rgw/services/svc_bucket_sobj.h
index 7a466ca37dd..9b95ca18fa4 100644
--- a/src/rgw/services/svc_bucket_sobj.h
+++ b/src/rgw/services/svc_bucket_sobj.h
@@ -173,7 +173,7 @@ public:
const DoutPrefixProvider *dpp) override;
int read_buckets_stats(RGWSI_Bucket_X_Ctx& ctx,
- std::map<std::string, RGWBucketEnt>& m,
+ std::vector<RGWBucketEnt>& buckets,
optional_yield y,
const DoutPrefixProvider *dpp) override;
};
diff --git a/src/rgw/services/svc_user.h b/src/rgw/services/svc_user.h
index f53ec49f259..7149f3e2197 100644
--- a/src/rgw/services/svc_user.h
+++ b/src/rgw/services/svc_user.h
@@ -22,7 +22,7 @@
#include "rgw_service.h"
#include "rgw_sal_fwd.h"
-class RGWUserBuckets;
+struct RGWUID;
class RGWSI_User : public RGWServiceInstance
{
@@ -42,6 +42,8 @@ public:
/* base svc_user interfaces */
+ virtual rgw_raw_obj get_buckets_obj(const rgw_user& user_id) const = 0;
+
virtual int read_user_info(RGWSI_MetaBackend::Context *ctx,
const rgw_user& user,
RGWUserInfo *info,
@@ -71,6 +73,7 @@ public:
virtual int get_user_info_by_email(RGWSI_MetaBackend::Context *ctx,
const std::string& email, RGWUserInfo *info,
RGWObjVersionTracker *objv_tracker,
+ std::map<std::string, bufferlist>* pattrs,
real_time *pmtime,
optional_yield y,
const DoutPrefixProvider *dpp) = 0;
@@ -78,6 +81,7 @@ public:
const std::string& swift_name,
RGWUserInfo *info, /* out */
RGWObjVersionTracker * const objv_tracker,
+ std::map<std::string, bufferlist>* pattrs,
real_time * const pmtime,
optional_yield y,
const DoutPrefixProvider *dpp) = 0;
@@ -85,44 +89,11 @@ public:
const std::string& access_key,
RGWUserInfo *info,
RGWObjVersionTracker* objv_tracker,
+ std::map<std::string, bufferlist>* pattrs,
real_time *pmtime,
optional_yield y,
const DoutPrefixProvider *dpp) = 0;
-
- virtual int add_bucket(const DoutPrefixProvider *dpp,
- const rgw_user& user,
- const rgw_bucket& bucket,
- ceph::real_time creation_time,
- optional_yield y) = 0;
- virtual int remove_bucket(const DoutPrefixProvider *dpp,
- const rgw_user& user,
- const rgw_bucket& _bucket, optional_yield) = 0;
- virtual int list_buckets(const DoutPrefixProvider *dpp,
- const rgw_user& user,
- const std::string& marker,
- const std::string& end_marker,
- uint64_t max,
- RGWUserBuckets *buckets,
- bool *is_truncated,
- optional_yield y) = 0;
-
- virtual int flush_bucket_stats(const DoutPrefixProvider *dpp,
- const rgw_user& user,
- const RGWBucketEnt& ent, optional_yield y) = 0;
- virtual int complete_flush_stats(const DoutPrefixProvider *dpp,
- const rgw_user& user, optional_yield y) = 0;
- virtual int reset_bucket_stats(const DoutPrefixProvider *dpp,
- const rgw_user& user,
- optional_yield y) = 0;
- virtual int read_stats(const DoutPrefixProvider *dpp,
- RGWSI_MetaBackend::Context *ctx,
- const rgw_user& user, RGWStorageStats *stats,
- ceph::real_time *last_stats_sync, /* last time a full stats sync completed */
- ceph::real_time *last_stats_update,
- optional_yield y) = 0; /* last time a stats update was done */
-
- virtual int read_stats_async(const DoutPrefixProvider *dpp,
- const rgw_user& user,
- boost::intrusive_ptr<rgw::sal::ReadStatsCB> cb) = 0;
+ virtual int read_email_index(const DoutPrefixProvider* dpp, optional_yield y,
+ std::string_view email, RGWUID& uid) = 0;
};
diff --git a/src/rgw/services/svc_user_rados.cc b/src/rgw/services/svc_user_rados.cc
index 0d01c96d481..a7cdc08185e 100644
--- a/src/rgw/services/svc_user_rados.cc
+++ b/src/rgw/services/svc_user_rados.cc
@@ -13,12 +13,16 @@
#include "svc_sync_modules.h"
#include "rgw_user.h"
+#include "rgw_account.h"
#include "rgw_bucket.h"
#include "rgw_tools.h"
#include "rgw_zone.h"
#include "rgw_rados.h"
-#include "cls/user/cls_user_client.h"
+#include "driver/rados/account.h"
+#include "driver/rados/buckets.h"
+#include "driver/rados/group.h"
+#include "driver/rados/users.h"
#define dout_subsys ceph_subsys_rgw
@@ -136,8 +140,8 @@ int RGWSI_User_RADOS::read_user_info(RGWSI_MetaBackend::Context *ctx,
auto iter = bl.cbegin();
try {
decode(user_id, iter);
- if (user_id.user_id != user) {
- ldpp_dout(dpp, -1) << "ERROR: rgw_get_user_info_by_uid(): user id mismatch: " << user_id.user_id << " != " << user << dendl;
+ if (rgw_user{user_id.id} != user) {
+ ldpp_dout(dpp, -1) << "ERROR: rgw_get_user_info_by_uid(): user id mismatch: " << user_id.id << " != " << user << dendl;
return -EIO;
}
if (!iter.end()) {
@@ -151,6 +155,21 @@ int RGWSI_User_RADOS::read_user_info(RGWSI_MetaBackend::Context *ctx,
return 0;
}
+// simple struct and function to help decide whether we need to add/remove
+// links to the account users index
+struct users_entry {
+ std::string_view account_id, path, name;
+ constexpr operator bool() { return !account_id.empty(); }
+ constexpr auto operator<=>(const users_entry&) const = default;
+};
+
+static users_entry account_users_link(const RGWUserInfo* info) {
+ if (info && !info->account_id.empty()) {
+ return {info->account_id, info->path, info->display_name};
+ }
+ return {};
+}
+
static bool s3_key_active(const RGWUserInfo* info, const std::string& id) {
if (!info) {
return false;
@@ -170,6 +189,7 @@ static bool swift_key_active(const RGWUserInfo* info, const std::string& id) {
class PutOperation
{
RGWSI_User_RADOS::Svc& svc;
+ librados::Rados& rados;
RGWSI_MetaBackend_SObj::Context_SObj *ctx;
RGWUID ui;
const RGWUserInfo& info;
@@ -190,6 +210,7 @@ class PutOperation
public:
PutOperation(RGWSI_User_RADOS::Svc& svc,
+ librados::Rados& rados,
RGWSI_MetaBackend::Context *_ctx,
const RGWUserInfo& info,
RGWUserInfo *old_info,
@@ -198,11 +219,11 @@ public:
bool exclusive,
map<string, bufferlist> *pattrs,
optional_yield y) :
- svc(svc), info(info), old_info(old_info),
+ svc(svc), rados(rados), info(info), old_info(old_info),
objv_tracker(objv_tracker), mtime(mtime),
exclusive(exclusive), pattrs(pattrs), y(y) {
ctx = static_cast<RGWSI_MetaBackend_SObj::Context_SObj *>(_ctx);
- ui.user_id = info.user_id;
+ ui.id = info.user_id.to_str();
}
int prepare(const DoutPrefixProvider *dpp) {
@@ -224,7 +245,7 @@ public:
continue;
/* check if swift mapping exists */
RGWUserInfo inf;
- int r = svc.user->get_user_info_by_swift(ctx, id, &inf, nullptr, nullptr, y, dpp);
+ int r = svc.user->get_user_info_by_swift(ctx, id, &inf, nullptr, nullptr, nullptr, y, dpp);
if (r >= 0 && inf.user_id != info.user_id &&
(!old_info || inf.user_id != old_info->user_id)) {
ldpp_dout(dpp, 0) << "WARNING: can't store user info, swift id (" << id
@@ -240,7 +261,7 @@ public:
if (s3_key_active(old_info, id)) // old key already active
continue;
RGWUserInfo inf;
- int r = svc.user->get_user_info_by_access_key(ctx, id, &inf, nullptr, nullptr, y, dpp);
+ int r = svc.user->get_user_info_by_access_key(ctx, id, &inf, nullptr, nullptr, nullptr, y, dpp);
if (r >= 0 && inf.user_id != info.user_id &&
(!old_info || inf.user_id != old_info->user_id)) {
ldpp_dout(dpp, 0) << "WARNING: can't store user info, access key already mapped to another user" << dendl;
@@ -248,6 +269,26 @@ public:
}
}
+ if (account_users_link(&info) &&
+ account_users_link(&info) != account_users_link(old_info)) {
+ if (info.display_name.empty()) {
+ ldpp_dout(dpp, 0) << "WARNING: can't store user info, display name "
+ "can't be empty in an account" << dendl;
+ return -EINVAL;
+ }
+
+ const RGWZoneParams& zone = svc.zone->get_zone_params();
+ const auto& users = rgwrados::account::get_users_obj(zone, info.account_id);
+ std::string existing_uid;
+ int r = rgwrados::users::get(dpp, y, rados, users,
+ info.display_name, existing_uid);
+ if (r >= 0 && existing_uid != info.user_id.id) {
+ ldpp_dout(dpp, 0) << "WARNING: can't store user info, display name "
+ "already exists in account" << dendl;
+ return -EEXIST;
+ }
+ }
+
return 0;
}
@@ -272,9 +313,12 @@ public:
encode(ui, link_bl);
if (!info.user_email.empty()) {
- if (!old_info ||
- old_info->user_email.compare(info.user_email) != 0) { /* only if new index changed */
- ret = rgw_put_system_obj(dpp, svc.sysobj, svc.zone->get_zone_params().user_email_pool, info.user_email,
+ // only if new index changed
+ if (!old_info || !boost::iequals(info.user_email, old_info->user_email)) {
+ // store as lower case for case-insensitive matching
+ std::string oid = info.user_email;
+ boost::to_lower(oid);
+ ret = rgw_put_system_obj(dpp, svc.sysobj, svc.zone->get_zone_params().user_email_pool, oid,
link_bl, exclusive, NULL, real_time(), y);
if (ret < 0)
return ret;
@@ -313,6 +357,42 @@ public:
}
}
+ if (account_users_link(&info) &&
+ account_users_link(&info) != account_users_link(old_info)) {
+ // link the user to its account
+ const RGWZoneParams& zone = svc.zone->get_zone_params();
+ const auto& users = rgwrados::account::get_users_obj(zone, info.account_id);
+ ret = rgwrados::users::add(dpp, y, rados, users, info, false,
+ std::numeric_limits<uint32_t>::max());
+ if (ret < 0) {
+ ldpp_dout(dpp, 20) << "WARNING: failed to link user "
+ << info.user_id << " to account " << info.account_id
+ << ": " << cpp_strerror(ret) << dendl;
+ return ret;
+ }
+ ldpp_dout(dpp, 20) << "linked user " << info.user_id
+ << " to account " << info.account_id << dendl;
+ }
+
+ for (const auto& group_id : info.group_ids) {
+ if (old_info && old_info->group_ids.count(group_id)) {
+ continue;
+ }
+ // link the user to its group
+ const RGWZoneParams& zone = svc.zone->get_zone_params();
+ const auto& users = rgwrados::group::get_users_obj(zone, group_id);
+ ret = rgwrados::users::add(dpp, y, rados, users, info, false,
+ std::numeric_limits<uint32_t>::max());
+ if (ret < 0) {
+ ldpp_dout(dpp, 20) << "WARNING: failed to link user "
+ << info.user_id << " to group " << group_id
+ << ": " << cpp_strerror(ret) << dendl;
+ return ret;
+ }
+ ldpp_dout(dpp, 20) << "linked user " << info.user_id
+ << " to group " << group_id << dendl;
+ }
+
return 0;
}
@@ -333,7 +413,7 @@ public:
}
if (!old_info.user_email.empty() &&
- old_info.user_email != new_info.user_email) {
+ !boost::iequals(old_info.user_email, new_info.user_email)) {
ret = svc.user->remove_email_index(dpp, old_info.user_email, y);
if (ret < 0 && ret != -ENOENT) {
set_err_msg("ERROR: could not remove index for email " + old_info.user_email);
@@ -361,6 +441,32 @@ public:
}
}
+ if (account_users_link(&old_info) &&
+ account_users_link(&old_info) != account_users_link(&info)) {
+ // unlink the old name from its account
+ const RGWZoneParams& zone = svc.zone->get_zone_params();
+ const auto& users = rgwrados::account::get_users_obj(zone, old_info.account_id);
+ ret = rgwrados::users::remove(dpp, y, rados, users, old_info.display_name);
+ if (ret < 0 && ret != -ENOENT) {
+ set_err_msg("ERROR: could not unlink from account " + old_info.account_id);
+ return ret;
+ }
+ }
+
+ for (const auto& group_id : old_info.group_ids) {
+ if (info.group_ids.count(group_id)) {
+ continue;
+ }
+ // remove from the old group
+ const RGWZoneParams& zone = svc.zone->get_zone_params();
+ const auto& users = rgwrados::group::get_users_obj(zone, group_id);
+ ret = rgwrados::users::remove(dpp, y, rados, users, old_info.display_name);
+ if (ret < 0 && ret != -ENOENT) {
+ set_err_msg("ERROR: could not unlink from group " + group_id);
+ return ret;
+ }
+ }
+
return 0;
}
@@ -379,7 +485,7 @@ int RGWSI_User_RADOS::store_user_info(RGWSI_MetaBackend::Context *ctx,
optional_yield y,
const DoutPrefixProvider *dpp)
{
- PutOperation op(svc, ctx,
+ PutOperation op(svc, *rados, ctx,
info, old_info,
objv_tracker,
mtime, exclusive,
@@ -420,7 +526,9 @@ int RGWSI_User_RADOS::remove_email_index(const DoutPrefixProvider *dpp,
if (email.empty()) {
return 0;
}
- rgw_raw_obj obj(svc.zone->get_zone_params().user_email_pool, email);
+ std::string oid = email;
+ boost::to_lower(oid);
+ rgw_raw_obj obj(svc.zone->get_zone_params().user_email_pool, oid);
auto sysobj = svc.sysobj->get_obj(obj);
return sysobj.wop().remove(dpp, y);
}
@@ -481,13 +589,36 @@ int RGWSI_User_RADOS::remove_user_info(RGWSI_MetaBackend::Context *ctx,
return ret;
}
- rgw_raw_obj uid_bucks = get_buckets_obj(info.user_id);
- ldpp_dout(dpp, 10) << "removing user buckets index" << dendl;
- auto sysobj = svc.sysobj->get_obj(uid_bucks);
- ret = sysobj.wop().remove(dpp, y);
- if (ret < 0 && ret != -ENOENT) {
- ldpp_dout(dpp, 0) << "ERROR: could not remove " << info.user_id << ":" << uid_bucks << ", should be fixed (err=" << ret << ")" << dendl;
- return ret;
+ if (info.account_id.empty()) {
+ rgw_raw_obj uid_bucks = get_buckets_obj(info.user_id);
+ ldpp_dout(dpp, 10) << "removing user buckets index" << dendl;
+ auto sysobj = svc.sysobj->get_obj(uid_bucks);
+ ret = sysobj.wop().remove(dpp, y);
+ if (ret < 0 && ret != -ENOENT) {
+ ldpp_dout(dpp, 0) << "ERROR: could not remove " << info.user_id << ":" << uid_bucks << ", should be fixed (err=" << ret << ")" << dendl;
+ return ret;
+ }
+ } else if (info.type != TYPE_ROOT) {
+ // unlink the name from its account
+ const RGWZoneParams& zone = svc.zone->get_zone_params();
+ const auto& users = rgwrados::account::get_users_obj(zone, info.account_id);
+ ret = rgwrados::users::remove(dpp, y, *rados, users, info.display_name);
+ if (ret < 0) {
+ ldpp_dout(dpp, 0) << "ERROR: could not unlink from account "
+ << info.account_id << ": " << cpp_strerror(ret) << dendl;
+ return ret;
+ }
+ }
+
+ for (const auto& group_id : info.group_ids) {
+ const RGWZoneParams& zone = svc.zone->get_zone_params();
+ const auto& users = rgwrados::group::get_users_obj(zone, group_id);
+ ret = rgwrados::users::remove(dpp, y, *rados, users, info.display_name);
+ if (ret < 0 && ret != -ENOENT) {
+ ldpp_dout(dpp, 0) << "ERROR: could not unlink from group "
+ << group_id << ": " << cpp_strerror(ret) << dendl;
+ return ret;
+ }
}
ret = remove_uid_index(ctx, info, objv_tracker, y, dpp);
@@ -516,11 +647,32 @@ int RGWSI_User_RADOS::remove_uid_index(RGWSI_MetaBackend::Context *ctx, const RG
return 0;
}
+static int read_index(const DoutPrefixProvider* dpp, optional_yield y,
+ RGWSI_SysObj* svc_sysobj, const rgw_pool& pool,
+ const std::string& key, ceph::real_time* mtime,
+ RGWUID& uid)
+{
+ bufferlist bl;
+ int r = rgw_get_system_obj(svc_sysobj, pool, key, bl,
+ nullptr, mtime, y, dpp);
+ if (r < 0) {
+ return r;
+ }
+ try {
+ auto iter = bl.cbegin();
+ decode(uid, iter);
+ } catch (const buffer::error&) {
+ return -EIO;
+ }
+ return 0;
+}
+
int RGWSI_User_RADOS::get_user_info_from_index(RGWSI_MetaBackend::Context* ctx,
const string& key,
const rgw_pool& pool,
RGWUserInfo *info,
RGWObjVersionTracker* objv_tracker,
+ std::map<std::string, bufferlist>* pattrs,
real_time* pmtime, optional_yield y,
const DoutPrefixProvider* dpp)
{
@@ -530,34 +682,31 @@ int RGWSI_User_RADOS::get_user_info_from_index(RGWSI_MetaBackend::Context* ctx,
*info = e->info;
if (objv_tracker)
*objv_tracker = e->objv_tracker;
+ if (pattrs)
+ *pattrs = e->attrs;
if (pmtime)
*pmtime = e->mtime;
return 0;
}
user_info_cache_entry e;
- bufferlist bl;
RGWUID uid;
- int ret = rgw_get_system_obj(svc.sysobj, pool, key, bl, nullptr, &e.mtime, y, dpp);
- if (ret < 0)
+ int ret = read_index(dpp, y, svc.sysobj, pool, key, &e.mtime, uid);
+ if (ret < 0) {
return ret;
+ }
- rgw_cache_entry_info cache_info;
-
- auto iter = bl.cbegin();
- try {
- decode(uid, iter);
+ if (rgw::account::validate_id(uid.id)) {
+ // this index is used for an account, not a user
+ return -ENOENT;
+ }
- int ret = read_user_info(ctx, uid.user_id,
- &e.info, &e.objv_tracker, nullptr, &cache_info, nullptr,
- y, dpp);
- if (ret < 0) {
- return ret;
- }
- } catch (buffer::error& err) {
- ldpp_dout(dpp, 0) << "ERROR: failed to decode user info, caught buffer::error" << dendl;
- return -EIO;
+ rgw_cache_entry_info cache_info;
+ ret = read_user_info(ctx, rgw_user{uid.id}, &e.info, &e.objv_tracker,
+ nullptr, &cache_info, &e.attrs, y, dpp);
+ if (ret < 0) {
+ return ret;
}
uinfo_cache->put(dpp, svc.cache, cache_key, &e, { &cache_info });
@@ -567,6 +716,9 @@ int RGWSI_User_RADOS::get_user_info_from_index(RGWSI_MetaBackend::Context* ctx,
*objv_tracker = e.objv_tracker;
if (pmtime)
*pmtime = e.mtime;
+ ldpp_dout(dpp, 20) << "get_user_info_from_index found " << e.attrs.size() << " xattrs" << dendl;
+ if (pattrs)
+ *pattrs = std::move(e.attrs);
return 0;
}
@@ -578,11 +730,14 @@ int RGWSI_User_RADOS::get_user_info_from_index(RGWSI_MetaBackend::Context* ctx,
int RGWSI_User_RADOS::get_user_info_by_email(RGWSI_MetaBackend::Context *ctx,
const string& email, RGWUserInfo *info,
RGWObjVersionTracker *objv_tracker,
+ std::map<std::string, bufferlist>* pattrs,
real_time *pmtime, optional_yield y,
const DoutPrefixProvider *dpp)
{
- return get_user_info_from_index(ctx, email, svc.zone->get_zone_params().user_email_pool,
- info, objv_tracker, pmtime, y, dpp);
+ std::string oid = email;
+ boost::to_lower(oid);
+ return get_user_info_from_index(ctx, oid, svc.zone->get_zone_params().user_email_pool,
+ info, objv_tracker, pattrs, pmtime, y, dpp);
}
/**
@@ -593,13 +748,14 @@ int RGWSI_User_RADOS::get_user_info_by_swift(RGWSI_MetaBackend::Context *ctx,
const string& swift_name,
RGWUserInfo *info, /* out */
RGWObjVersionTracker * const objv_tracker,
+ std::map<std::string, bufferlist>* pattrs,
real_time * const pmtime, optional_yield y,
const DoutPrefixProvider *dpp)
{
return get_user_info_from_index(ctx,
swift_name,
svc.zone->get_zone_params().user_swift_pool,
- info, objv_tracker, pmtime, y, dpp);
+ info, objv_tracker, pattrs, pmtime, y, dpp);
}
/**
@@ -610,375 +766,23 @@ int RGWSI_User_RADOS::get_user_info_by_access_key(RGWSI_MetaBackend::Context *ct
const std::string& access_key,
RGWUserInfo *info,
RGWObjVersionTracker* objv_tracker,
+ std::map<std::string, bufferlist>* pattrs,
real_time *pmtime, optional_yield y,
const DoutPrefixProvider *dpp)
{
return get_user_info_from_index(ctx,
access_key,
svc.zone->get_zone_params().user_keys_pool,
- info, objv_tracker, pmtime, y, dpp);
-}
-
-int RGWSI_User_RADOS::cls_user_update_buckets(const DoutPrefixProvider *dpp, rgw_raw_obj& obj, list<cls_user_bucket_entry>& entries, bool add, optional_yield y)
-{
- rgw_rados_ref rados_obj;
- int r = rgw_get_rados_ref(dpp, rados, obj, &rados_obj);
- if (r < 0) {
- return r;
- }
-
- librados::ObjectWriteOperation op;
- cls_user_set_buckets(op, entries, add);
- r = rados_obj.operate(dpp, &op, y);
- if (r < 0) {
- return r;
- }
-
- return 0;
-}
-
-int RGWSI_User_RADOS::cls_user_add_bucket(const DoutPrefixProvider *dpp, rgw_raw_obj& obj, const cls_user_bucket_entry& entry, optional_yield y)
-{
- list<cls_user_bucket_entry> l;
- l.push_back(entry);
-
- return cls_user_update_buckets(dpp, obj, l, true, y);
-}
-
-int RGWSI_User_RADOS::cls_user_remove_bucket(const DoutPrefixProvider *dpp, rgw_raw_obj& obj, const cls_user_bucket& bucket, optional_yield y)
-{
- rgw_rados_ref rados_obj;
- int r = rgw_get_rados_ref(dpp, rados, obj, &rados_obj);
- if (r < 0) {
- return r;
- }
-
- librados::ObjectWriteOperation op;
- ::cls_user_remove_bucket(op, bucket);
- r = rados_obj.operate(dpp, &op, y);
- if (r < 0)
- return r;
-
- return 0;
-}
-
-int RGWSI_User_RADOS::add_bucket(const DoutPrefixProvider *dpp,
- const rgw_user& user,
- const rgw_bucket& bucket,
- ceph::real_time creation_time,
- optional_yield y)
-{
- int ret;
-
- cls_user_bucket_entry new_bucket;
-
- bucket.convert(&new_bucket.bucket);
- new_bucket.size = 0;
- if (real_clock::is_zero(creation_time))
- new_bucket.creation_time = real_clock::now();
- else
- new_bucket.creation_time = creation_time;
-
- rgw_raw_obj obj = get_buckets_obj(user);
- ret = cls_user_add_bucket(dpp, obj, new_bucket, y);
- if (ret < 0) {
- ldpp_dout(dpp, 0) << "ERROR: error adding bucket to user: ret=" << ret << dendl;
- return ret;
- }
-
- return 0;
-}
-
-
-int RGWSI_User_RADOS::remove_bucket(const DoutPrefixProvider *dpp,
- const rgw_user& user,
- const rgw_bucket& _bucket,
- optional_yield y)
-{
- cls_user_bucket bucket;
- bucket.name = _bucket.name;
- rgw_raw_obj obj = get_buckets_obj(user);
- int ret = cls_user_remove_bucket(dpp, obj, bucket, y);
- if (ret < 0) {
- ldpp_dout(dpp, 0) << "ERROR: error removing bucket from user: ret=" << ret << dendl;
- }
-
- return 0;
-}
-
-int RGWSI_User_RADOS::cls_user_flush_bucket_stats(const DoutPrefixProvider *dpp,
- rgw_raw_obj& user_obj,
- const RGWBucketEnt& ent, optional_yield y)
-{
- cls_user_bucket_entry entry;
- ent.convert(&entry);
-
- list<cls_user_bucket_entry> entries;
- entries.push_back(entry);
-
- int r = cls_user_update_buckets(dpp, user_obj, entries, false, y);
- if (r < 0) {
- ldpp_dout(dpp, 20) << "cls_user_update_buckets() returned " << r << dendl;
- return r;
- }
-
- return 0;
-}
-
-int RGWSI_User_RADOS::cls_user_list_buckets(const DoutPrefixProvider *dpp,
- rgw_raw_obj& obj,
- const string& in_marker,
- const string& end_marker,
- const int max_entries,
- list<cls_user_bucket_entry>& entries,
- string * const out_marker,
- bool * const truncated,
- optional_yield y)
-{
- rgw_rados_ref rados_obj;
- int r = rgw_get_rados_ref(dpp, rados, obj, &rados_obj);
- if (r < 0) {
- return r;
- }
-
- librados::ObjectReadOperation op;
- int rc;
-
- cls_user_bucket_list(op, in_marker, end_marker, max_entries, entries, out_marker, truncated, &rc);
- bufferlist ibl;
- r = rados_obj.operate(dpp, &op, &ibl, y);
- if (r < 0)
- return r;
- if (rc < 0)
- return rc;
-
- return 0;
-}
-
-int RGWSI_User_RADOS::list_buckets(const DoutPrefixProvider *dpp,
- const rgw_user& user,
- const string& marker,
- const string& end_marker,
- uint64_t max,
- RGWUserBuckets *buckets,
- bool *is_truncated, optional_yield y)
-{
- int ret;
-
- buckets->clear();
- if (user.id == RGW_USER_ANON_ID) {
- ldpp_dout(dpp, 20) << "RGWSI_User_RADOS::list_buckets(): anonymous user" << dendl;
- *is_truncated = false;
- return 0;
- }
- rgw_raw_obj obj = get_buckets_obj(user);
-
- bool truncated = false;
- string m = marker;
-
- uint64_t total = 0;
-
- do {
- std::list<cls_user_bucket_entry> entries;
- ret = cls_user_list_buckets(dpp, obj, m, end_marker, max - total, entries, &m, &truncated, y);
- if (ret == -ENOENT) {
- ret = 0;
- }
-
- if (ret < 0) {
- return ret;
- }
-
- for (auto& entry : entries) {
- buckets->add(RGWBucketEnt(user, std::move(entry)));
- total++;
- }
-
- } while (truncated && total < max);
-
- if (is_truncated) {
- *is_truncated = truncated;
- }
-
- return 0;
+ info, objv_tracker, pattrs, pmtime, y, dpp);
}
-int RGWSI_User_RADOS::flush_bucket_stats(const DoutPrefixProvider *dpp,
- const rgw_user& user,
- const RGWBucketEnt& ent,
- optional_yield y)
+int RGWSI_User_RADOS::read_email_index(const DoutPrefixProvider* dpp,
+ optional_yield y,
+ std::string_view email,
+ RGWUID& uid)
{
- rgw_raw_obj obj = get_buckets_obj(user);
-
- return cls_user_flush_bucket_stats(dpp, obj, ent, y);
+ const rgw_pool& pool = svc.zone->get_zone_params().user_email_pool;
+ std::string oid{email};
+ boost::to_lower(oid);
+ return read_index(dpp, y, svc.sysobj, pool, oid, nullptr, uid);
}
-
-int RGWSI_User_RADOS::reset_bucket_stats(const DoutPrefixProvider *dpp,
- const rgw_user& user,
- optional_yield y)
-{
- return cls_user_reset_stats(dpp, user, y);
-}
-
-int RGWSI_User_RADOS::cls_user_reset_stats(const DoutPrefixProvider *dpp, const rgw_user& user, optional_yield y)
-{
- rgw_raw_obj obj = get_buckets_obj(user);
- rgw_rados_ref rados_obj;
- int r = rgw_get_rados_ref(dpp, rados, obj, &rados_obj);
- if (r < 0) {
- return r;
- }
-
- int rval;
-
- cls_user_reset_stats2_op call;
- cls_user_reset_stats2_ret ret;
-
- do {
- buffer::list in, out;
- librados::ObjectWriteOperation op;
-
- call.time = real_clock::now();
- ret.update_call(call);
-
- encode(call, in);
- op.exec("user", "reset_user_stats2", in, &out, &rval);
- r = rados_obj.operate(dpp, &op, y, librados::OPERATION_RETURNVEC);
- if (r < 0) {
- return r;
- }
- try {
- auto bliter = out.cbegin();
- decode(ret, bliter);
- } catch (ceph::buffer::error& err) {
- return -EINVAL;
- }
- } while (ret.truncated);
-
- return rval;
-}
-
-int RGWSI_User_RADOS::complete_flush_stats(const DoutPrefixProvider *dpp,
- const rgw_user& user, optional_yield y)
-{
- rgw_raw_obj obj = get_buckets_obj(user);
- rgw_rados_ref rados_obj;
- int r = rgw_get_rados_ref(dpp, rados, obj, &rados_obj);
- if (r < 0) {
- return r;
- }
-
- librados::ObjectWriteOperation op;
- ::cls_user_complete_stats_sync(op);
- return rados_obj.operate(dpp, &op, y);
-}
-
-int RGWSI_User_RADOS::cls_user_get_header(const DoutPrefixProvider *dpp,
- const rgw_user& user, cls_user_header *header,
- optional_yield y)
-{
- rgw_raw_obj obj = get_buckets_obj(user);
- rgw_rados_ref rados_obj;
- int r = rgw_get_rados_ref(dpp, rados, obj, &rados_obj);
- if (r < 0) {
- return r;
- }
- int rc;
- bufferlist ibl;
- librados::ObjectReadOperation op;
- ::cls_user_get_header(op, header, &rc);
- return rados_obj.operate(dpp, &op, &ibl, y);
-}
-
-int RGWSI_User_RADOS::cls_user_get_header_async(const DoutPrefixProvider *dpp, const string& user_str, RGWGetUserHeader_CB *cb)
-{
- rgw_raw_obj obj = get_buckets_obj(rgw_user(user_str));
- rgw_rados_ref ref;
- int r = rgw_get_rados_ref(dpp, rados, obj, &ref);
- if (r < 0) {
- return r;
- }
-
- r = ::cls_user_get_header_async(ref.ioctx, ref.obj.oid, cb);
- if (r < 0) {
- return r;
- }
-
- return 0;
-}
-
-int RGWSI_User_RADOS::read_stats(const DoutPrefixProvider *dpp,
- RGWSI_MetaBackend::Context *ctx,
- const rgw_user& user, RGWStorageStats *stats,
- ceph::real_time *last_stats_sync,
- ceph::real_time *last_stats_update,
- optional_yield y)
-{
- string user_str = user.to_str();
-
- RGWUserInfo info;
- real_time mtime;
- int ret = read_user_info(ctx, user, &info, nullptr, &mtime, nullptr, nullptr, y, dpp);
- if (ret < 0)
- {
- return ret;
- }
-
- cls_user_header header;
- int r = cls_user_get_header(dpp, rgw_user(user_str), &header, y);
- if (r < 0 && r != -ENOENT)
- return r;
-
- const cls_user_stats& hs = header.stats;
-
- stats->size = hs.total_bytes;
- stats->size_rounded = hs.total_bytes_rounded;
- stats->num_objects = hs.total_entries;
-
- if (last_stats_sync) {
- *last_stats_sync = header.last_stats_sync;
- }
-
- if (last_stats_update) {
- *last_stats_update = header.last_stats_update;
- }
-
- return 0;
-}
-
-class RGWGetUserStatsContext : public RGWGetUserHeader_CB {
- boost::intrusive_ptr<rgw::sal::ReadStatsCB> cb;
-
-public:
- explicit RGWGetUserStatsContext(boost::intrusive_ptr<rgw::sal::ReadStatsCB> cb)
- : cb(std::move(cb)) {}
-
- void handle_response(int r, cls_user_header& header) override {
- const cls_user_stats& hs = header.stats;
- RGWStorageStats stats;
-
- stats.size = hs.total_bytes;
- stats.size_rounded = hs.total_bytes_rounded;
- stats.num_objects = hs.total_entries;
-
- cb->handle_response(r, stats);
- cb.reset();
- }
-};
-
-int RGWSI_User_RADOS::read_stats_async(const DoutPrefixProvider *dpp,
- const rgw_user& user,
- boost::intrusive_ptr<rgw::sal::ReadStatsCB> _cb)
-{
- string user_str = user.to_str();
-
- RGWGetUserStatsContext *cb = new RGWGetUserStatsContext(std::move(_cb));
- int r = cls_user_get_header_async(dpp, user_str, cb);
- if (r < 0) {
- delete cb;
- return r;
- }
-
- return 0;
-}
-
diff --git a/src/rgw/services/svc_user_rados.h b/src/rgw/services/svc_user_rados.h
index 6912327f0b1..406024e6fa8 100644
--- a/src/rgw/services/svc_user_rados.h
+++ b/src/rgw/services/svc_user_rados.h
@@ -47,19 +47,21 @@ class RGWSI_User_RADOS : public RGWSI_User
struct user_info_cache_entry {
RGWUserInfo info;
RGWObjVersionTracker objv_tracker;
+ std::map<std::string, bufferlist> attrs;
real_time mtime;
};
using RGWChainedCacheImpl_user_info_cache_entry = RGWChainedCacheImpl<user_info_cache_entry>;
std::unique_ptr<RGWChainedCacheImpl_user_info_cache_entry> uinfo_cache;
- rgw_raw_obj get_buckets_obj(const rgw_user& user_id) const;
+ rgw_raw_obj get_buckets_obj(const rgw_user& user_id) const override;
int get_user_info_from_index(RGWSI_MetaBackend::Context *ctx,
const std::string& key,
const rgw_pool& pool,
RGWUserInfo *info,
RGWObjVersionTracker * const objv_tracker,
+ std::map<std::string, bufferlist>* pattrs,
real_time * const pmtime,
optional_yield y,
const DoutPrefixProvider *dpp);
@@ -71,28 +73,6 @@ class RGWSI_User_RADOS : public RGWSI_User
int remove_email_index(const DoutPrefixProvider *dpp, const std::string& email, optional_yield y);
int remove_swift_name_index(const DoutPrefixProvider *dpp, const std::string& swift_name, optional_yield y);
- /* admin management */
- int cls_user_update_buckets(const DoutPrefixProvider *dpp, rgw_raw_obj& obj, std::list<cls_user_bucket_entry>& entries, bool add, optional_yield y);
- int cls_user_add_bucket(const DoutPrefixProvider *dpp, rgw_raw_obj& obj, const cls_user_bucket_entry& entry, optional_yield y);
- int cls_user_remove_bucket(const DoutPrefixProvider *dpp, rgw_raw_obj& obj, const cls_user_bucket& bucket, optional_yield y);
-
- /* quota stats */
- int cls_user_flush_bucket_stats(const DoutPrefixProvider *dpp, rgw_raw_obj& user_obj,
- const RGWBucketEnt& ent, optional_yield y);
- int cls_user_list_buckets(const DoutPrefixProvider *dpp,
- rgw_raw_obj& obj,
- const std::string& in_marker,
- const std::string& end_marker,
- const int max_entries,
- std::list<cls_user_bucket_entry>& entries,
- std::string * const out_marker,
- bool * const truncated,
- optional_yield y);
-
- int cls_user_reset_stats(const DoutPrefixProvider *dpp, const rgw_user& user, optional_yield y);
- int cls_user_get_header(const DoutPrefixProvider *dpp, const rgw_user& user, cls_user_header *header, optional_yield y);
- int cls_user_get_header_async(const DoutPrefixProvider *dpp, const std::string& user, RGWGetUserHeader_CB *cb);
-
int do_start(optional_yield, const DoutPrefixProvider *dpp) override;
public:
librados::Rados* rados{nullptr};
@@ -149,6 +129,7 @@ public:
int get_user_info_by_email(RGWSI_MetaBackend::Context *ctx,
const std::string& email, RGWUserInfo *info,
RGWObjVersionTracker *objv_tracker,
+ std::map<std::string, bufferlist>* pattrs,
real_time *pmtime,
optional_yield y,
const DoutPrefixProvider *dpp) override;
@@ -156,6 +137,7 @@ public:
const std::string& swift_name,
RGWUserInfo *info, /* out */
RGWObjVersionTracker * const objv_tracker,
+ std::map<std::string, bufferlist>* pattrs,
real_time * const pmtime,
optional_yield y,
const DoutPrefixProvider *dpp) override;
@@ -163,49 +145,11 @@ public:
const std::string& access_key,
RGWUserInfo *info,
RGWObjVersionTracker* objv_tracker,
+ std::map<std::string, bufferlist>* pattrs,
real_time *pmtime,
optional_yield y,
const DoutPrefixProvider *dpp) override;
- /* user buckets directory */
-
- int add_bucket(const DoutPrefixProvider *dpp,
- const rgw_user& user,
- const rgw_bucket& bucket,
- ceph::real_time creation_time,
- optional_yield y) override;
- int remove_bucket(const DoutPrefixProvider *dpp,
- const rgw_user& user,
- const rgw_bucket& _bucket,
- optional_yield y) override;
- int list_buckets(const DoutPrefixProvider *dpp,
- const rgw_user& user,
- const std::string& marker,
- const std::string& end_marker,
- uint64_t max,
- RGWUserBuckets *buckets,
- bool *is_truncated,
- optional_yield y) override;
-
- /* quota related */
- int flush_bucket_stats(const DoutPrefixProvider *dpp,
- const rgw_user& user,
- const RGWBucketEnt& ent, optional_yield y) override;
-
- int complete_flush_stats(const DoutPrefixProvider *dpp,
- const rgw_user& user, optional_yield y) override;
-
- int reset_bucket_stats(const DoutPrefixProvider *dpp,
- const rgw_user& user,
- optional_yield y) override;
- int read_stats(const DoutPrefixProvider *dpp,
- RGWSI_MetaBackend::Context *ctx,
- const rgw_user& user, RGWStorageStats *stats,
- ceph::real_time *last_stats_sync, /* last time a full stats sync completed */
- ceph::real_time *last_stats_update,
- optional_yield y) override; /* last time a stats update was done */
-
- int read_stats_async(const DoutPrefixProvider *dpp, const rgw_user& user,
- boost::intrusive_ptr<rgw::sal::ReadStatsCB> cb) override;
+ int read_email_index(const DoutPrefixProvider* dpp, optional_yield y,
+ std::string_view email, RGWUID& uid) override;
};
-
diff --git a/src/script/ptl-tool.py b/src/script/ptl-tool.py
index 176f61f22c8..1d3778f9ad4 100755
--- a/src/script/ptl-tool.py
+++ b/src/script/ptl-tool.py
@@ -3,20 +3,46 @@
# README:
#
# This tool's purpose is to make it easier to merge PRs into test branches and
-# into main. Make sure you generate a Personal access token in GitHub and
-# add it your ~/.github.key.
+# into main.
#
-# Because developers often have custom names for the ceph upstream remote
-# (https://github.com/ceph/ceph.git), You will probably want to export the
-# PTL_TOOL_BASE_PATH environment variable in your shell rc files before using
-# this script:
#
-# export PTL_TOOL_BASE_PATH=refs/remotes/<remotename>/
+# == Getting Started ==
#
-# and PTL_TOOL_BASE_REMOTE as the name of your Ceph upstream remote (default: "upstream"):
+# You will probably want to setup a virtualenv for running this script:
#
-# export PTL_TOOL_BASE_REMOTE=<remotename>
+# (
+# virtualenv ~/ptl-venv
+# source ~/ptl-venv/bin/activate
+# pip3 install GitPython
+# pip3 install python-redmine
+# )
#
+# Then run the tool with:
+#
+# (source ~/ptl-venv/bin/activate && python3 src/script/ptl-tool.py --help)
+#
+# Important files in your $HOME:
+#
+# ~/.redmine_key -- Your redmine API key from right side of: https://tracker.ceph.com/my/account
+#
+# ~/.github.key -- Your github API key: https://github.com/settings/tokens
+#
+# Some important environment variables:
+#
+# - PTL_TOOL_GITHUB_USER (your github username)
+# - PTL_TOOL_GITHUB_API_KEY (your github api key, or what is stored in ~/.github.key)
+# - PTL_TOOL_REDMINE_USER (your redmine username)
+# - PTL_TOOL_REDMINE_API_KEY (your redmine api key, or what is stored in ~/redmine_key)
+# - PTL_TOOL_USER (your desired username embedded in test branch names)
+#
+#
+# You can use this tool to create a QA tracker ticket for you:
+#
+# $ python3 ptl-tool.py ... --create-qa --qa-release reef
+#
+# which will populate the ticket with all the usual information and also push a
+# tagged version of your test branch to ceph-ci for posterity.
+
#
# ** Here are some basic exmples to get started: **
#
@@ -100,44 +126,69 @@
# TODO
# Look for check failures?
-# redmine issue update: http://www.redmine.org/projects/redmine/wiki/Rest_Issues
import argparse
import codecs
import datetime
-import getpass
-import git
+from getpass import getuser
+import git # https://github.com/gitpython-developers/gitpython
import itertools
import json
import logging
import os
import re
+try:
+ from redminelib import Redmine # https://pypi.org/project/python-redmine/
+except ModuleNotFoundError:
+ Redmine = None
import requests
+import signal
import sys
from os.path import expanduser
-log = logging.getLogger(__name__)
-log.addHandler(logging.StreamHandler())
-log.setLevel(logging.INFO)
-
BASE_PROJECT = os.getenv("PTL_TOOL_BASE_PROJECT", "ceph")
BASE_REPO = os.getenv("PTL_TOOL_BASE_REPO", "ceph")
-BASE_REMOTE = os.getenv("PTL_TOOL_BASE_REMOTE", "upstream")
-BASE_PATH = os.getenv("PTL_TOOL_BASE_PATH", "refs/remotes/upstream/")
+BASE_REMOTE_URL = os.getenv("PTL_TOOL_BASE_REMOTE_URL", f"https://github.com/{BASE_PROJECT}/{BASE_REPO}.git")
+CI_REPO = os.getenv("PTL_TOOL_CI_REPO", "ceph-ci")
+CI_REMOTE_URL = os.getenv("PTL_TOOL_CI_REMOTE_URL", f"git@github.com:{BASE_PROJECT}/{CI_REPO}.git")
GITDIR = os.getenv("PTL_TOOL_GITDIR", ".")
-USER = os.getenv("PTL_TOOL_USER", getpass.getuser())
-with open(expanduser("~/.github.key")) as f:
- PASSWORD = f.read().strip()
-TEST_BRANCH = os.getenv("PTL_TOOL_TEST_BRANCH", "wip-{user}-testing-%Y%m%d.%H%M%S")
-
-SPECIAL_BRANCHES = ('main', 'luminous', 'jewel', 'HEAD')
-
+GITHUB_USER = os.getenv("PTL_TOOL_GITHUB_USER", os.getenv("PTL_TOOL_USER", getuser()))
+GITHUB_API_KEY = None
+try:
+ with open(expanduser("~/.github.key")) as f:
+ GITHUB_API_KEY = f.read().strip()
+except FileNotFoundError:
+ pass
+GITHUB_API_KEY = os.getenv("PTL_TOOL_GITHUB_API_KEY", GITHUB_API_KEY)
INDICATIONS = [
re.compile("(Reviewed-by: .+ <[\w@.-]+>)", re.IGNORECASE),
re.compile("(Acked-by: .+ <[\w@.-]+>)", re.IGNORECASE),
re.compile("(Tested-by: .+ <[\w@.-]+>)", re.IGNORECASE),
]
+REDMINE_CUSTOM_FIELD_ID_SHAMAN_BUILD = 26
+REDMINE_CUSTOM_FIELD_ID_QA_RUNS = 27
+REDMINE_CUSTOM_FIELD_ID_QA_RELEASE = 28
+REDMINE_CUSTOM_FIELD_ID_QA_TAGS = 3
+REDMINE_CUSTOM_FIELD_ID_GIT_BRANCH = 29
+REDMINE_ENDPOINT = "https://tracker.ceph.com"
+REDMINE_PROJECT_QA = "ceph-qa"
+REDMINE_TRACKER_QA = "QA Run"
+REDMINE_USER = os.getenv("PTL_TOOL_REDMINE_USER", getuser())
+REDMINE_API_KEY = None
+try:
+ with open(expanduser("~/.redmine_key")) as f:
+ REDMINE_API_KEY = f.read().strip()
+except FileNotFoundError:
+ pass
+REDMINE_API_KEY = os.getenv("PTL_TOOL_REDMINE_API_KEY", REDMINE_API_KEY)
+SPECIAL_BRANCHES = ('main', 'luminous', 'jewel', 'HEAD')
+TEST_BRANCH = os.getenv("PTL_TOOL_TEST_BRANCH", "wip-{user}-testing-%Y%m%d.%H%M%S")
+USER = os.getenv("PTL_TOOL_USER", getuser())
+
+log = logging.getLogger(__name__)
+log.addHandler(logging.StreamHandler())
+log.setLevel(logging.INFO)
# find containing git dir
git_dir = GITDIR
@@ -162,13 +213,16 @@ with codecs.open(git_dir + "/.githubmap", encoding='utf-8') as f:
BZ_MATCH = re.compile("(.*https?://bugzilla.redhat.com/.*)")
TRACKER_MATCH = re.compile("(.*https?://tracker.ceph.com/.*)")
+def gitauth():
+ return (GITHUB_USER, GITHUB_API_KEY)
+
def get(session, url, params=None, paging=True):
if params is None:
params = {}
params['per_page'] = 100
log.debug(f"Fetching {url}")
- response = session.get(url, auth=(USER, PASSWORD), params=params)
+ response = session.get(url, auth=gitauth(), params=params)
log.debug(f"Response = {response}; links = {response.headers.get('link', '')}")
if response.status_code != 200:
log.error(f"Failed to fetch {url}: {response}")
@@ -182,7 +236,7 @@ def get(session, url, params=None, paging=True):
log.debug(f"Fetching {url}")
new_params = dict(params)
new_params.update({'page': page})
- response = session.get(url, auth=(USER, PASSWORD), params=new_params)
+ response = session.get(url, auth=gitauth(), params=new_params)
log.debug(f"Response = {response}; links = {response.headers.get('link', '')}")
if response.status_code != 200:
log.error(f"Failed to fetch {url}: {response}")
@@ -250,6 +304,8 @@ def get_credits(session, pr, pr_req):
def build_branch(args):
base = args.base
branch = datetime.datetime.utcnow().strftime(args.branch).format(user=USER)
+ if args.branch_release:
+ branch = branch + "-" + args.branch_release
if args.debug_build:
branch = branch + "-debug"
label = args.label
@@ -271,9 +327,10 @@ def build_branch(args):
G = git.Repo(args.git)
- # First get the latest base branch and PRs from BASE_REMOTE
- remote = getattr(G.remotes, BASE_REMOTE)
- remote.fetch()
+ if args.create_qa:
+ log.info("connecting to %s", REDMINE_ENDPOINT)
+ R = Redmine(REDMINE_ENDPOINT, username=REDMINE_USER, key=REDMINE_API_KEY)
+ log.debug("connected")
prs = args.prs
if args.pr_label is not None:
@@ -300,30 +357,35 @@ def build_branch(args):
else:
log.info("Detaching HEAD onto base: {}".format(base))
try:
- base_path = args.base_path + base
- base = next(ref for ref in G.refs if ref.path == base_path)
+ G.git.fetch(BASE_REMOTE_URL, base)
# So we know that we're not on an old test branch, detach HEAD onto ref:
- base.checkout()
- except StopIteration:
+ c = G.commit('FETCH_HEAD')
+ except git.exc.GitCommandError:
+ log.debug("could not fetch %s from %s", base, BASE_REMOTE_URL)
log.info(f"Trying to checkout uninterpreted base {base}")
c = G.commit(base)
- G.git.checkout(c)
+ G.git.checkout(c)
assert G.head.is_detached
+ qa_tracker_description = []
+
for pr in prs:
pr = int(pr)
log.info("Merging PR #{pr}".format(pr=pr))
remote_ref = "refs/pull/{pr}/head".format(pr=pr)
- fi = remote.fetch(remote_ref)
- if len(fi) != 1:
- log.error("PR {pr} does not exist?".format(pr=pr))
+ try:
+ G.git.fetch(BASE_REMOTE_URL, remote_ref)
+ except git.exc.GitCommandError:
+ log.error("could not fetch %s from %s", remote_ref, BASE_REMOTE_URL)
sys.exit(1)
- tip = fi[0].ref.commit
+ tip = G.commit("FETCH_HEAD")
endpoint = f"https://api.github.com/repos/{BASE_PROJECT}/{BASE_REPO}/pulls/{pr}"
response = next(get(session, endpoint, paging=False))
+ qa_tracker_description.append(f'* "PR #{pr}":{response["html_url"]} -- {response["title"].strip()}')
+
message = "Merge PR #%d into %s\n\n* %s:\n" % (pr, merge_branch_name, remote_ref)
for commit in G.iter_commits(rev="HEAD.."+str(tip)):
@@ -354,12 +416,23 @@ def build_branch(args):
G.git.commit("--amend", "--no-edit")
if label:
- req = session.post("https://api.github.com/repos/{project}/{repo}/issues/{pr}/labels".format(pr=pr, project=BASE_PROJECT, repo=BASE_REPO), data=json.dumps([label]), auth=(USER, PASSWORD))
+ req = session.post("https://api.github.com/repos/{project}/{repo}/issues/{pr}/labels".format(pr=pr, project=BASE_PROJECT, repo=BASE_REPO), data=json.dumps([label]), auth=gitauth())
if req.status_code != 200:
log.error("PR #%d could not be labeled %s: %s" % (pr, label, req))
sys.exit(1)
log.info("Labeled PR #{pr} {label}".format(pr=pr, label=label))
+ if args.stop_at_built:
+ log.warning("Stopping execution (SIGSTOP) with built branch for further modification. Foreground when execution should resume (typically `fg`).")
+ old_head = G.head.commit
+ signal.raise_signal(signal.SIGSTOP)
+ log.warning("Resuming execution.")
+ new_head = G.head.commit
+ if old_head != new_head:
+ rev = f'{old_head}..{new_head}'
+ for commit in G.iter_commits(rev=rev):
+ qa_tracker_description.append(f'* "commit {commit}":{CI_REMOTE_URL}/commit/{commit} -- {commit.summary}')
+
# If the branch is 'HEAD', leave HEAD detached (but use "main" for commit message)
if branch == 'HEAD':
log.info("Leaving HEAD detached; no branch anchors your commits")
@@ -375,10 +448,60 @@ def build_branch(args):
if created_branch:
# tag it for future reference.
- tag = "testing/%s" % branch
- git.refs.tag.Tag.create(G, tag)
+ tag_name = "testing/%s" % branch
+ tag = git.refs.tag.Tag.create(G, tag_name)
log.info("Created tag %s" % tag)
+ if args.create_qa:
+ if not created_branch:
+ log.error("branch already exists!")
+ sys.exit(1)
+ project = R.project.get(REDMINE_PROJECT_QA)
+ log.debug("got redmine project %s", project)
+ user = R.user.get('current')
+ log.debug("got redmine user %s", user)
+ for tracker in project.trackers:
+ if tracker['name'] == REDMINE_TRACKER_QA:
+ tracker = tracker
+ if tracker is None:
+ log.error("could not find tracker in project: %s", REDMINE_TRACKER_QA)
+ log.debug("got redmine tracker %s", tracker)
+
+ # Use hard-coded custom field ids because there is apparently no way to
+ # figure these out via the python library
+ custom_fields = []
+ custom_fields.append({'id': REDMINE_CUSTOM_FIELD_ID_SHAMAN_BUILD, 'value': branch})
+ custom_fields.append({'id': REDMINE_CUSTOM_FIELD_ID_QA_RUNS, 'value': branch})
+ if args.qa_release:
+ custom_fields.append({'id': REDMINE_CUSTOM_FIELD_ID_QA_RELEASE, 'value': args.qa_release})
+ if args.qa_tags:
+ custom_fields.append({'id': REDMINE_CUSTOM_FIELD_ID_QA_TAGS, 'value': args.qa_tags})
+
+ G.git.push(CI_REMOTE_URL, branch) # for shaman
+ G.git.push(CI_REMOTE_URL, tag.name) # for archival
+ origin_url = f'{BASE_PROJECT}/{CI_REPO}/commits/{tag.name}'
+ custom_fields.append({'id': REDMINE_CUSTOM_FIELD_ID_GIT_BRANCH, 'value': origin_url})
+
+ issue_kwargs = {
+ "assigned_to_id": user['id'],
+ "custom_fields": custom_fields,
+ "description": '\n'.join(qa_tracker_description),
+ "project_id": project['id'],
+ "subject": branch,
+ "watcher_user_ids": user['id'],
+ }
+ log.debug("creating issue with kwargs: %s", issue_kwargs)
+ issue = R.issue.create(**issue_kwargs)
+ log.info("created redmine qa issue: %s", issue.url)
+
+
+ for pr in prs:
+ log.debug(f"Posting QA Run in comment for ={pr}")
+ endpoint = f"https://api.github.com/repos/{BASE_PROJECT}/{BASE_REPO}/issues/{pr}/comments"
+ body = f"This PR is under test in [{issue.url}]({issue.url})."
+ r = session.post(endpoint, auth=gitauth(), data=json.dumps({'body':body}))
+ log.debug(f"= {r}")
+
def main():
parser = argparse.ArgumentParser(description="Ceph PTL tool")
default_base = 'main'
@@ -391,17 +514,30 @@ def main():
default_label = False
else:
argv = sys.argv[1:]
+ parser.add_argument('--base', dest='base', action='store', default=default_base, help='base for branch')
parser.add_argument('--branch', dest='branch', action='store', default=default_branch, help='branch to create ("HEAD" leaves HEAD detached; i.e. no branch is made)')
+ parser.add_argument('--branch-release', dest='branch_release', action='store', help='release name to embed in branch (for shaman)')
+ parser.add_argument('--create-qa', dest='create_qa', action='store_true', help='create QA run ticket')
+ parser.add_argument('--debug', dest='debug', action='store_true', help='turn debugging on')
parser.add_argument('--debug-build', dest='debug_build', action='store_true', help='append -debug to branch name prompting ceph-build to build with CMAKE_BUILD_TYPE=Debug')
- parser.add_argument('--merge-branch-name', dest='merge_branch_name', action='store', default=False, help='name of the branch for merge messages')
- parser.add_argument('--base', dest='base', action='store', default=default_base, help='base for branch')
- parser.add_argument('--base-path', dest='base_path', action='store', default=BASE_PATH, help='base for branch')
parser.add_argument('--git-dir', dest='git', action='store', default=git_dir, help='git directory')
parser.add_argument('--label', dest='label', action='store', default=default_label, help='label PRs for testing')
- parser.add_argument('--pr-label', dest='pr_label', action='store', help='label PRs for testing')
+ parser.add_argument('--merge-branch-name', dest='merge_branch_name', action='store', default=False, help='name of the branch for merge messages')
parser.add_argument('--no-credits', dest='credits', action='store_false', help='skip indication search (Reviewed-by, etc.)')
+ parser.add_argument('--pr-label', dest='pr_label', action='store', help='label PRs for testing')
+ parser.add_argument('--qa-release', dest='qa_release', action='store', help='QA release for tracker')
+ parser.add_argument('--qa-tags', dest='qa_tags', action='store', help='QA tags for tracker')
+ parser.add_argument('--stop-at-built', dest='stop_at_built', action='store_true', help='stop execution when branch is built')
parser.add_argument('prs', metavar="PR", type=int, nargs='*', help='Pull Requests to merge')
args = parser.parse_args(argv)
+
+ if args.debug:
+ log.setLevel(logging.DEBUG)
+
+ if args.create_qa and Redmine is None:
+ log.error("redmine library is not available so cannot create qa tracker ticket")
+ sys.exit(1)
+
return build_branch(args)
if __name__ == "__main__":
diff --git a/src/test/CMakeLists.txt b/src/test/CMakeLists.txt
index 63598971bbd..a0c8fcfe823 100644
--- a/src/test/CMakeLists.txt
+++ b/src/test/CMakeLists.txt
@@ -46,11 +46,14 @@ if(NOT WIN32)
add_subdirectory(cls_journal)
add_subdirectory(cls_rbd)
endif(WITH_RBD)
+ if(WITH_RADOSGW)
+ add_subdirectory(cls_rgw)
+ add_subdirectory(cls_rgw_gc)
+ add_subdirectory(cls_user)
+ endif(WITH_RADOSGW)
add_subdirectory(cls_refcount)
- add_subdirectory(cls_rgw)
add_subdirectory(cls_version)
add_subdirectory(cls_lua)
- add_subdirectory(cls_rgw_gc)
add_subdirectory(cls_queue)
add_subdirectory(cls_2pc_queue)
add_subdirectory(cls_cmpomap)
diff --git a/src/test/cli/radosgw-admin/help.t b/src/test/cli/radosgw-admin/help.t
index b31018081e1..e0ee19e6190 100644
--- a/src/test/cli/radosgw-admin/help.t
+++ b/src/test/cli/radosgw-admin/help.t
@@ -11,6 +11,9 @@
user check check user info
user stats show user stats as accounted by quota subsystem
user list list users
+ user policy attach attach a managed policy
+ user policy detach detach a managed policy
+ user policy list attached list attached managed policies
caps add add user capabilities
caps rm remove user capabilities
subuser create create a new subuser
@@ -18,6 +21,12 @@
subuser rm remove subuser
key create create access key
key rm remove access key
+ account create create a new account
+ account modify modify an existing account
+ account get get account info
+ account stats dump account storage stats
+ account rm remove an account
+ account list list all account ids
bucket list list buckets (specify --allow-unordered for faster, unsorted listing)
bucket limit check show bucket sharding stats
bucket link link bucket to specified user
@@ -55,9 +64,9 @@
period list list all periods
period update update the staging period
period commit commit the staging period
- quota set set quota params
- quota enable enable quota
- quota disable disable quota
+ quota set set quota params for a user/bucket/account
+ quota enable enable quota for a user/bucket/account
+ quota disable disable quota for a user/bucket/account
ratelimit get get ratelimit params
ratelimit set set ratelimit params
ratelimit enable enable ratelimit
@@ -162,6 +171,9 @@
role-policy list list policies attached to a role
role-policy get get the specified inline policy document embedded with the given role
role-policy delete remove policy attached to a role
+ role policy attach attach a managed policy
+ role policy detach detach a managed policy
+ role policy list attached list attached managed policies
role update update max_session_duration of a role
reshard add schedule a resharding of a bucket
reshard list list all bucket resharding or scheduled to be resharded
@@ -198,6 +210,12 @@
--uid=<id> user id
--new-uid=<id> new user id
--subuser=<name> subuser name
+ --account-name=<name> account name
+ --account-id=<id> account id
+ --max-users max number of users for an account
+ --max-roles max number of roles for an account
+ --max-groups max number of groups for an account
+ --max-access-keys max number of keys per user for an account
--access-key=<key> S3 access key
--email=<email> user's email address
--secret/--secret-key=<key> specify secret key
@@ -344,6 +362,8 @@
--policy-name name of the policy document
--policy-doc permission policy document
--path-prefix path prefix for filtering roles
+ --description Role description
+ --policy-arn ARN of a managed policy
MFA options:
--totp-serial a string that represents the ID of a TOTP token
diff --git a/src/test/cls_rgw/CMakeLists.txt b/src/test/cls_rgw/CMakeLists.txt
index 67b8beb6c4b..875ca82c1ab 100644
--- a/src/test/cls_rgw/CMakeLists.txt
+++ b/src/test/cls_rgw/CMakeLists.txt
@@ -1,24 +1,21 @@
-if(${WITH_RADOSGW})
- add_executable(ceph_test_cls_rgw
- test_cls_rgw.cc
- )
- target_link_libraries(ceph_test_cls_rgw
- cls_rgw_client
- librados
- global
- ${UNITTEST_LIBS}
- ${EXTRALIBS}
- ${BLKID_LIBRARIES}
- ${CMAKE_DL_LIBS}
- radostest-cxx)
- install(TARGETS
- ceph_test_cls_rgw
- DESTINATION ${CMAKE_INSTALL_BINDIR})
-
- add_executable(ceph_test_cls_rgw_stats test_cls_rgw_stats.cc
- $<TARGET_OBJECTS:unit-main>)
- target_link_libraries(ceph_test_cls_rgw_stats cls_rgw_client global
- librados ${UNITTEST_LIBS} radostest-cxx)
- install(TARGETS ceph_test_cls_rgw_stats DESTINATION ${CMAKE_INSTALL_BINDIR})
-endif(${WITH_RADOSGW})
+add_executable(ceph_test_cls_rgw
+ test_cls_rgw.cc
+ )
+target_link_libraries(ceph_test_cls_rgw
+ cls_rgw_client
+ librados
+ global
+ ${UNITTEST_LIBS}
+ ${EXTRALIBS}
+ ${BLKID_LIBRARIES}
+ ${CMAKE_DL_LIBS}
+ radostest-cxx)
+install(TARGETS
+ ceph_test_cls_rgw
+ DESTINATION ${CMAKE_INSTALL_BINDIR})
+add_executable(ceph_test_cls_rgw_stats test_cls_rgw_stats.cc
+ $<TARGET_OBJECTS:unit-main>)
+target_link_libraries(ceph_test_cls_rgw_stats cls_rgw_client global
+ librados ${UNITTEST_LIBS} radostest-cxx)
+install(TARGETS ceph_test_cls_rgw_stats DESTINATION ${CMAKE_INSTALL_BINDIR})
diff --git a/src/test/cls_rgw_gc/CMakeLists.txt b/src/test/cls_rgw_gc/CMakeLists.txt
index dd16615253b..9e98fa3b36f 100644
--- a/src/test/cls_rgw_gc/CMakeLists.txt
+++ b/src/test/cls_rgw_gc/CMakeLists.txt
@@ -1,18 +1,15 @@
-if(${WITH_RADOSGW})
- add_executable(ceph_test_cls_rgw_gc
- test_cls_rgw_gc.cc
- )
- target_link_libraries(ceph_test_cls_rgw_gc
- cls_rgw_gc_client
- librados
- global
- ${UNITTEST_LIBS}
- ${EXTRALIBS}
- ${BLKID_LIBRARIES}
- ${CMAKE_DL_LIBS}
- radostest-cxx)
- install(TARGETS
- ceph_test_cls_rgw_gc
- DESTINATION ${CMAKE_INSTALL_BINDIR})
-endif(${WITH_RADOSGW})
-
+add_executable(ceph_test_cls_rgw_gc
+ test_cls_rgw_gc.cc
+ )
+target_link_libraries(ceph_test_cls_rgw_gc
+ cls_rgw_gc_client
+ librados
+ global
+ ${UNITTEST_LIBS}
+ ${EXTRALIBS}
+ ${BLKID_LIBRARIES}
+ ${CMAKE_DL_LIBS}
+ radostest-cxx)
+install(TARGETS
+ ceph_test_cls_rgw_gc
+ DESTINATION ${CMAKE_INSTALL_BINDIR})
diff --git a/src/test/cls_user/CMakeLists.txt b/src/test/cls_user/CMakeLists.txt
new file mode 100644
index 00000000000..9796205d1e6
--- /dev/null
+++ b/src/test/cls_user/CMakeLists.txt
@@ -0,0 +1,5 @@
+add_executable(ceph_test_cls_user test_cls_user.cc
+ $<TARGET_OBJECTS:unit-main>)
+target_link_libraries(ceph_test_cls_user cls_user_client global
+ librados ${UNITTEST_LIBS} radostest-cxx)
+install(TARGETS ceph_test_cls_user DESTINATION ${CMAKE_INSTALL_BINDIR})
diff --git a/src/test/cls_user/test_cls_user.cc b/src/test/cls_user/test_cls_user.cc
new file mode 100644
index 00000000000..c37f7a8e44c
--- /dev/null
+++ b/src/test/cls_user/test_cls_user.cc
@@ -0,0 +1,211 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab ft=cpp
+
+/*
+ * Ceph - scalable distributed file system
+ *
+ * Copyright contributors to the Ceph project
+ *
+ * This is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License version 2.1, as published by the Free Software
+ * Foundation. See file COPYING.
+ */
+
+#include "cls/user/cls_user_client.h"
+#include "test/librados/test_cxx.h"
+#include "gtest/gtest.h"
+
+#include <optional>
+#include <system_error>
+#include "include/expected.hpp"
+
+// create/destroy a pool that's shared by all tests in the process
+struct RadosEnv : public ::testing::Environment {
+ static std::optional<std::string> pool_name;
+ public:
+ static librados::Rados rados;
+ static librados::IoCtx ioctx;
+
+ void SetUp() override {
+ // create pool
+ std::string name = get_temp_pool_name();
+ ASSERT_EQ("", create_one_pool_pp(name, rados));
+ pool_name = name;
+ ASSERT_EQ(rados.ioctx_create(name.c_str(), ioctx), 0);
+ }
+ void TearDown() override {
+ ioctx.close();
+ if (pool_name) {
+ ASSERT_EQ(destroy_one_pool_pp(*pool_name, rados), 0);
+ }
+ }
+};
+std::optional<std::string> RadosEnv::pool_name;
+librados::Rados RadosEnv::rados;
+librados::IoCtx RadosEnv::ioctx;
+
+auto *const rados_env = ::testing::AddGlobalTestEnvironment(new RadosEnv);
+
+// test fixture with helper functions
+class ClsAccount : public ::testing::Test {
+ protected:
+ librados::IoCtx& ioctx = RadosEnv::ioctx;
+
+ int add(const std::string& oid, const cls_user_account_resource& entry,
+ bool exclusive, uint32_t limit)
+ {
+ librados::ObjectWriteOperation op;
+ cls_user_account_resource_add(op, entry, exclusive, limit);
+ return ioctx.operate(oid, &op);
+ }
+
+ auto get(const std::string& oid, std::string_view name)
+ -> tl::expected<cls_user_account_resource, int>
+ {
+ librados::ObjectReadOperation op;
+ cls_user_account_resource resource;
+ int r2 = 0;
+ cls_user_account_resource_get(op, name, resource, &r2);
+
+ int r1 = ioctx.operate(oid, &op, nullptr);
+ if (r1 < 0) return tl::unexpected(r1);
+ if (r2 < 0) return tl::unexpected(r2);
+ return resource;
+ }
+
+ int rm(const std::string& oid, std::string_view name)
+ {
+ librados::ObjectWriteOperation op;
+ cls_user_account_resource_rm(op, name);
+ return ioctx.operate(oid, &op);
+ }
+
+ int list(const std::string& oid, std::string_view marker,
+ std::string_view path_prefix, uint32_t max_entries,
+ std::vector<cls_user_account_resource>& entries, bool& truncated,
+ std::string& next_marker, int& ret)
+ {
+ librados::ObjectReadOperation op;
+ cls_user_account_resource_list(op, marker, path_prefix, max_entries,
+ entries, &truncated, &next_marker, &ret);
+ return ioctx.operate(oid, &op, nullptr);
+ }
+
+ auto list_all(const std::string& oid,
+ std::string_view path_prefix = "",
+ uint32_t max_chunk = 1000)
+ -> std::vector<cls_user_account_resource>
+ {
+ std::vector<cls_user_account_resource> all_entries;
+ std::string marker;
+ bool truncated = true;
+
+ while (truncated) {
+ std::vector<cls_user_account_resource> entries;
+ std::string next_marker;
+ int r2 = 0;
+ int r1 = list(oid, marker, path_prefix, max_chunk,
+ entries, truncated, next_marker, r2);
+ if (r1 < 0) throw std::system_error(r1, std::system_category());
+ if (r2 < 0) throw std::system_error(r2, std::system_category());
+ marker = std::move(next_marker);
+ std::move(entries.begin(), entries.end(),
+ std::back_inserter(all_entries));
+ }
+ return all_entries;
+ }
+};
+
+template <typename ...Args>
+std::vector<cls_user_account_resource> make_list(Args&& ...args)
+{
+ return {std::forward<Args>(args)...};
+}
+
+bool operator==(const cls_user_account_resource& lhs,
+ const cls_user_account_resource& rhs)
+{
+ if (lhs.name != rhs.name) {
+ return false;
+ }
+ return lhs.path == rhs.path;
+ // ignore metadata
+}
+std::ostream& operator<<(std::ostream& out, const cls_user_account_resource& r)
+{
+ return out << r.path << r.name;
+}
+
+TEST_F(ClsAccount, add)
+{
+ const std::string oid = __PRETTY_FUNCTION__;
+ const auto u1 = cls_user_account_resource{.name = "user1"};
+ const auto u2 = cls_user_account_resource{.name = "user2"};
+ const auto u3 = cls_user_account_resource{.name = "USER2"};
+ EXPECT_EQ(-EUSERS, add(oid, u1, true, 0));
+ EXPECT_EQ(0, add(oid, u1, true, 1));
+ EXPECT_EQ(-EUSERS, add(oid, u2, true, 1));
+ EXPECT_EQ(-EEXIST, add(oid, u1, true, 1));
+ EXPECT_EQ(0, add(oid, u1, false, 1)); // allow overwrite at limit
+ EXPECT_EQ(0, add(oid, u2, true, 2));
+ EXPECT_EQ(-EEXIST, add(oid, u3, true, 2)); // case-insensitive match
+}
+
+TEST_F(ClsAccount, get)
+{
+ const std::string oid = __PRETTY_FUNCTION__;
+ const auto u1 = cls_user_account_resource{.name = "user1", .path = "A"};
+ const auto u2 = cls_user_account_resource{.name = "USER1"};
+ EXPECT_EQ(tl::unexpected(-ENOENT), get(oid, u1.name));
+ EXPECT_EQ(-EUSERS, add(oid, u1, true, 0));
+ EXPECT_EQ(tl::unexpected(-ENOENT), get(oid, u1.name));
+ EXPECT_EQ(0, add(oid, u1, true, 1));
+ EXPECT_EQ(u1, get(oid, u1.name));
+ EXPECT_EQ(0, add(oid, u2, false, 1)); // overwrite with different case
+ EXPECT_EQ(u2, get(oid, u1.name)); // accessible by the original name
+}
+
+TEST_F(ClsAccount, rm)
+{
+ const std::string oid = __PRETTY_FUNCTION__;
+ const auto u1 = cls_user_account_resource{.name = "user1"};
+ const auto u2 = cls_user_account_resource{.name = "USER1"};
+ EXPECT_EQ(-ENOENT, rm(oid, u1.name));
+ ASSERT_EQ(0, add(oid, u1, true, 1));
+ ASSERT_EQ(0, rm(oid, u1.name));
+ EXPECT_EQ(-ENOENT, rm(oid, u1.name));
+ ASSERT_EQ(0, add(oid, u1, true, 1));
+ ASSERT_EQ(0, rm(oid, u2.name)); // case-insensitive match
+}
+
+TEST_F(ClsAccount, list)
+{
+ const std::string oid = __PRETTY_FUNCTION__;
+ const auto u1 = cls_user_account_resource{.name = "user1", .path = ""};
+ const auto u2 = cls_user_account_resource{.name = "User2", .path = "A"};
+ const auto u3 = cls_user_account_resource{.name = "user3", .path = "AA"};
+ const auto u4 = cls_user_account_resource{.name = "User4", .path = ""};
+ const auto u5 = cls_user_account_resource{.name = "USER1", .path = "z"};
+ constexpr uint32_t max_users = 1024;
+
+ ASSERT_EQ(0, ioctx.create(oid, true));
+ ASSERT_EQ(make_list(), list_all(oid));
+ ASSERT_EQ(0, add(oid, u1, true, max_users));
+ EXPECT_EQ(make_list(u1), list_all(oid));
+ ASSERT_EQ(0, add(oid, u2, true, max_users));
+ ASSERT_EQ(0, add(oid, u3, true, max_users));
+ ASSERT_EQ(0, add(oid, u4, true, max_users));
+ EXPECT_EQ(make_list(u1, u2, u3, u4), list_all(oid, ""));
+ EXPECT_EQ(make_list(u1, u2, u3, u4), list_all(oid, "", 1)); // paginated
+ EXPECT_EQ(make_list(u2, u3), list_all(oid, "A"));
+ EXPECT_EQ(make_list(u2, u3), list_all(oid, "A", 1)); // paginated
+ EXPECT_EQ(make_list(u3), list_all(oid, "AA"));
+ EXPECT_EQ(make_list(u3), list_all(oid, "AA", 1)); // paginated
+ EXPECT_EQ(make_list(), list_all(oid, "AAu")); // don't match AAuser3
+ ASSERT_EQ(0, rm(oid, u2.name));
+ EXPECT_EQ(make_list(u1, u3, u4), list_all(oid, ""));
+ EXPECT_EQ(make_list(u1, u3, u4), list_all(oid, "", 1)); // paginated
+ ASSERT_EQ(0, add(oid, u5, false, max_users)); // overwrite u1
+ EXPECT_EQ(make_list(u5, u3, u4), list_all(oid, ""));
+}
diff --git a/src/test/mds/TestQuiesceAgent.cc b/src/test/mds/TestQuiesceAgent.cc
index ae951158954..a17ad0e45c9 100644
--- a/src/test/mds/TestQuiesceAgent.cc
+++ b/src/test/mds/TestQuiesceAgent.cc
@@ -139,13 +139,19 @@ class QuiesceAgentTest : public testing::Test {
}
void TearDown() override {
+ for (auto it = quiesce_requests.cbegin(); it != quiesce_requests.cend(); ) {
+ if (it->second.second) {
+ it->second.second->complete(-ECANCELED);
+ }
+ it = quiesce_requests.erase(it);
+ }
+
if (agent) {
agent->shutdown();
agent.reset();
}
}
-
using R = QuiesceMap::Roots::value_type;
using RootInitList = std::initializer_list<R>;
diff --git a/src/test/osd/TestOSDScrub.cc b/src/test/osd/TestOSDScrub.cc
index 4c6d4cceedf..28d94ed66da 100644
--- a/src/test/osd/TestOSDScrub.cc
+++ b/src/test/osd/TestOSDScrub.cc
@@ -196,6 +196,7 @@ TEST(TestOSDScrub, scrub_time_permit) {
now = utime_t(mktime(&tm), 0);
ret = osd->scrub_time_permit(now);
ASSERT_FALSE(ret);
+ mc.shutdown();
}
// Local Variables:
diff --git a/src/test/rgw/bucket_notification/test_bn.py b/src/test/rgw/bucket_notification/test_bn.py
index 25770487cca..fda0cf29173 100644
--- a/src/test/rgw/bucket_notification/test_bn.py
+++ b/src/test/rgw/bucket_notification/test_bn.py
@@ -478,21 +478,27 @@ def connection2():
return conn
-def another_user(tenant=None):
+def another_user(user=None, tenant=None, account=None):
access_key = str(time.time())
secret_key = str(time.time())
- uid = UID_PREFIX + str(time.time())
+ uid = user or UID_PREFIX + str(time.time())
+ cmd = ['user', 'create', '--uid', uid, '--access-key', access_key, '--secret-key', secret_key, '--display-name', 'Superman']
+ arn = f'arn:aws:iam:::user/{uid}'
if tenant:
- _, result = admin(['user', 'create', '--uid', uid, '--tenant', tenant, '--access-key', access_key, '--secret-key', secret_key, '--display-name', '"Super Man"'], get_config_cluster())
- else:
- _, result = admin(['user', 'create', '--uid', uid, '--access-key', access_key, '--secret-key', secret_key, '--display-name', '"Super Man"'], get_config_cluster())
+ cmd += ['--tenant', tenant]
+ arn = f'arn:aws:iam::{tenant}:user/{uid}'
+ if account:
+ cmd += ['--account-id', account, '--account-root']
+ arn = f'arn:aws:iam::{account}:user/Superman'
+ _, result = admin(cmd, get_config_cluster())
assert_equal(result, 0)
+
conn = S3Connection(aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
is_secure=False, port=get_config_port(), host=get_config_host(),
calling_format='boto.s3.connection.OrdinaryCallingFormat')
- return conn
+ return conn, arn
##############
# bucket notifications tests
@@ -621,7 +627,7 @@ def test_ps_s3_topic_admin_on_master():
parsed_result = json.loads(result[0])
assert_equal(parsed_result['arn'], topic_arn3)
matches = [tenant, UID_PREFIX]
- assert_true( all([x in parsed_result['user'] for x in matches]))
+ assert_true( all([x in parsed_result['owner'] for x in matches]))
# delete topic 3
_, result = admin(['topic', 'rm', '--topic', topic_name+'_3', '--tenant', tenant], get_config_cluster())
@@ -3064,13 +3070,18 @@ def test_ps_s3_persistent_cleanup():
http_server.close()
-def wait_for_queue_to_drain(topic_name):
+def wait_for_queue_to_drain(topic_name, tenant=None, account=None):
retries = 0
entries = 1
start_time = time.time()
# topic stats
+ cmd = ['topic', 'stats', '--topic', topic_name]
+ if tenant:
+ cmd += ['--tenant', tenant]
+ if account:
+ cmd += ['--account-id', account]
while entries > 0:
- result = admin(['topic', 'stats', '--topic', topic_name], get_config_cluster())
+ result = admin(cmd, get_config_cluster())
assert_equal(result[1], 0)
parsed_result = json.loads(result[0])
entries = parsed_result['Topic Stats']['Entries']
@@ -3799,9 +3810,8 @@ def test_ps_s3_persistent_multiple_endpoints():
conn.delete_bucket(bucket_name)
http_server.close()
-def persistent_notification(endpoint_type):
+def persistent_notification(endpoint_type, conn, account=None):
""" test pushing persistent notification """
- conn = connection()
zonegroup = get_config_zonegroup()
# create bucket
@@ -3872,7 +3882,7 @@ def persistent_notification(endpoint_type):
keys = list(bucket.list())
- wait_for_queue_to_drain(topic_name)
+ wait_for_queue_to_drain(topic_name, account=account)
receiver.verify_s3_events(keys, exact_match=exact_match, deletions=False)
@@ -3888,7 +3898,7 @@ def persistent_notification(endpoint_type):
time_diff = time.time() - start_time
print('average time for deletion + async http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds')
- wait_for_queue_to_drain(topic_name)
+ wait_for_queue_to_drain(topic_name, account=account)
receiver.verify_s3_events(keys, exact_match=exact_match, deletions=True)
@@ -3906,19 +3916,38 @@ def persistent_notification(endpoint_type):
@attr('http_test')
def test_ps_s3_persistent_notification_http():
""" test pushing persistent notification http """
- persistent_notification('http')
+ conn = connection()
+ persistent_notification('http', conn)
+
+@attr('http_test')
+def test_ps_s3_persistent_notification_http_account():
+ """ test pushing persistent notification via http for account user """
+ account = 'RGW77777777777777777'
+ user = UID_PREFIX + 'test'
+
+ _, result = admin(['account', 'create', '--account-id', account, '--account-name', 'testacct'], get_config_cluster())
+ assert_true(result in [0, 17]) # EEXIST okay if we rerun
+
+ conn, _ = another_user(user=user, account=account)
+ try:
+ persistent_notification('http', conn, account)
+ finally:
+ admin(['user', 'rm', '--uid', user], get_config_cluster())
+ admin(['account', 'rm', '--account-id', account], get_config_cluster())
@attr('amqp_test')
def test_ps_s3_persistent_notification_amqp():
""" test pushing persistent notification amqp """
- persistent_notification('amqp')
+ conn = connection()
+ persistent_notification('amqp', conn)
@attr('kafka_test')
def test_ps_s3_persistent_notification_kafka():
""" test pushing persistent notification kafka """
- persistent_notification('kafka')
+ conn = connection()
+ persistent_notification('kafka', conn)
def random_string(length):
@@ -4318,7 +4347,7 @@ def test_ps_s3_multiple_topics_notification():
def test_ps_s3_topic_permissions():
""" test s3 topic set/get/delete permissions """
conn1 = connection()
- conn2 = another_user()
+ conn2, arn2 = another_user()
zonegroup = get_config_zonegroup()
bucket_name = gen_bucket_name()
topic_name = bucket_name + TOPIC_SUFFIX
@@ -4328,7 +4357,7 @@ def test_ps_s3_topic_permissions():
{
"Sid": "Statement",
"Effect": "Deny",
- "Principal": "*",
+ "Principal": {"AWS": arn2},
"Action": ["sns:Publish", "sns:SetTopicAttributes", "sns:GetTopicAttributes", "sns:DeleteTopic", "sns:CreateTopic"],
"Resource": f"arn:aws:sns:{zonegroup}::{topic_name}"
}
@@ -4344,12 +4373,12 @@ def test_ps_s3_topic_permissions():
try:
# 2nd user tries to override the topic
topic_arn = topic_conf2.set_config()
- assert False, "'AccessDenied' error is expected"
+ assert False, "'AuthorizationError' error is expected"
except ClientError as err:
if 'Error' in err.response:
- assert_equal(err.response['Error']['Code'], 'AccessDenied')
+ assert_equal(err.response['Error']['Code'], 'AuthorizationError')
else:
- assert_equal(err.response['Code'], 'AccessDenied')
+ assert_equal(err.response['Code'], 'AuthorizationError')
except Exception as err:
print('unexpected error type: '+type(err).__name__)
@@ -4360,12 +4389,12 @@ def test_ps_s3_topic_permissions():
try:
# 2nd user tries to set the attribute
status = topic_conf2.set_attributes(attribute_name="persistent", attribute_val="false", topic_arn=topic_arn)
- assert False, "'AccessDenied' error is expected"
+ assert False, "'AuthorizationError' error is expected"
except ClientError as err:
if 'Error' in err.response:
- assert_equal(err.response['Error']['Code'], 'AccessDenied')
+ assert_equal(err.response['Error']['Code'], 'AuthorizationError')
else:
- assert_equal(err.response['Code'], 'AccessDenied')
+ assert_equal(err.response['Code'], 'AuthorizationError')
except Exception as err:
print('unexpected error type: '+type(err).__name__)
@@ -4390,12 +4419,12 @@ def test_ps_s3_topic_permissions():
try:
# 2nd user tries to delete the topic
status = topic_conf2.del_config(topic_arn=topic_arn)
- assert False, "'AccessDenied' error is expected"
+ assert False, "'AuthorizationError' error is expected"
except ClientError as err:
if 'Error' in err.response:
- assert_equal(err.response['Error']['Code'], 'AccessDenied')
+ assert_equal(err.response['Error']['Code'], 'AuthorizationError')
else:
- assert_equal(err.response['Code'], 'AccessDenied')
+ assert_equal(err.response['Code'], 'AuthorizationError')
except Exception as err:
print('unexpected error type: '+type(err).__name__)
@@ -4427,7 +4456,7 @@ def test_ps_s3_topic_permissions():
def test_ps_s3_topic_no_permissions():
""" test s3 topic set/get/delete permissions """
conn1 = connection()
- conn2 = another_user()
+ conn2, _ = another_user()
zonegroup = 'default'
bucket_name = gen_bucket_name()
topic_name = bucket_name + TOPIC_SUFFIX
@@ -4442,12 +4471,12 @@ def test_ps_s3_topic_no_permissions():
try:
# 2nd user tries to override the topic
topic_arn = topic_conf2.set_config()
- assert False, "'AccessDenied' error is expected"
+ assert False, "'AuthorizationError' error is expected"
except ClientError as err:
if 'Error' in err.response:
- assert_equal(err.response['Error']['Code'], 'AccessDenied')
+ assert_equal(err.response['Error']['Code'], 'AuthorizationError')
else:
- assert_equal(err.response['Code'], 'AccessDenied')
+ assert_equal(err.response['Code'], 'AuthorizationError')
except Exception as err:
print('unexpected error type: '+type(err).__name__)
@@ -4458,12 +4487,12 @@ def test_ps_s3_topic_no_permissions():
try:
# 2nd user tries to set the attribute
status = topic_conf2.set_attributes(attribute_name="persistent", attribute_val="false", topic_arn=topic_arn)
- assert False, "'AccessDenied' error is expected"
+ assert False, "'AuthorizationError' error is expected"
except ClientError as err:
if 'Error' in err.response:
- assert_equal(err.response['Error']['Code'], 'AccessDenied')
+ assert_equal(err.response['Error']['Code'], 'AuthorizationError')
else:
- assert_equal(err.response['Code'], 'AccessDenied')
+ assert_equal(err.response['Code'], 'AuthorizationError')
except Exception as err:
print('unexpected error type: '+type(err).__name__)
@@ -4481,12 +4510,12 @@ def test_ps_s3_topic_no_permissions():
try:
# 2nd user tries to delete the topic
status = topic_conf2.del_config(topic_arn=topic_arn)
- assert False, "'AccessDenied' error is expected"
+ assert False, "'AuthorizationError' error is expected"
except ClientError as err:
if 'Error' in err.response:
- assert_equal(err.response['Error']['Code'], 'AccessDenied')
+ assert_equal(err.response['Error']['Code'], 'AuthorizationError')
else:
- assert_equal(err.response['Code'], 'AccessDenied')
+ assert_equal(err.response['Code'], 'AuthorizationError')
except Exception as err:
print('unexpected error type: '+type(err).__name__)
diff --git a/src/test/rgw/rgw_multi/conn.py b/src/test/rgw/rgw_multi/conn.py
index 0ef66b0c4ca..3edb7511125 100644
--- a/src/test/rgw/rgw_multi/conn.py
+++ b/src/test/rgw/rgw_multi/conn.py
@@ -30,15 +30,18 @@ def get_gateway_secure_connection(gateway, credentials):
calling_format = boto.s3.connection.OrdinaryCallingFormat())
return gateway.secure_connection
-def get_gateway_iam_connection(gateway, credentials):
+def get_gateway_iam_connection(gateway, credentials, region):
""" connect to iam api of the given gateway """
if gateway.iam_connection is None:
- gateway.iam_connection = boto.connect_iam(
+ endpoint = f'http://{gateway.host}:{gateway.port}'
+ print(endpoint)
+ gateway.iam_connection = boto3.client(
+ service_name = 'iam',
aws_access_key_id = credentials.access_key,
aws_secret_access_key = credentials.secret,
- host = gateway.host,
- port = gateway.port,
- is_secure = False)
+ endpoint_url = endpoint,
+ region_name=region,
+ use_ssl = False)
return gateway.iam_connection
diff --git a/src/test/rgw/rgw_multi/multisite.py b/src/test/rgw/rgw_multi/multisite.py
index 8642ea3a57c..4f61425684f 100644
--- a/src/test/rgw/rgw_multi/multisite.py
+++ b/src/test/rgw/rgw_multi/multisite.py
@@ -191,17 +191,16 @@ class ZoneConn(object):
self.conn = get_gateway_connection(self.zone.gateways[0], self.credentials)
self.secure_conn = get_gateway_secure_connection(self.zone.gateways[0], self.credentials)
- self.iam_conn = get_gateway_iam_connection(self.zone.gateways[0], self.credentials)
region = "" if self.zone.zonegroup is None else self.zone.zonegroup.name
+ self.iam_conn = get_gateway_iam_connection(self.zone.gateways[0], self.credentials, region)
self.s3_client = get_gateway_s3_client(self.zone.gateways[0], self.credentials, region)
- self.sns_client = get_gateway_sns_client(self.zone.gateways[0], self.credentials,region)
+ self.sns_client = get_gateway_sns_client(self.zone.gateways[0], self.credentials, region)
# create connections for the rest of the gateways (if exist)
for gw in list(self.zone.gateways):
get_gateway_connection(gw, self.credentials)
get_gateway_secure_connection(gw, self.credentials)
-
- get_gateway_iam_connection(gw, self.credentials)
+ get_gateway_iam_connection(gw, self.credentials, region)
def get_connection(self):
@@ -369,10 +368,11 @@ class Credentials:
return ['--access-key', self.access_key, '--secret', self.secret]
class User(SystemObject):
- def __init__(self, uid, data = None, name = None, credentials = None, tenant = None):
+ def __init__(self, uid, data = None, name = None, credentials = None, tenant = None, account = None):
self.name = name
self.credentials = credentials or []
self.tenant = tenant
+ self.account = account
super(User, self).__init__(data, uid)
def user_arg(self):
@@ -380,6 +380,8 @@ class User(SystemObject):
args = ['--uid', self.id]
if self.tenant:
args += ['--tenant', self.tenant]
+ if self.account:
+ args += ['--account-id', self.account, '--account-root']
return args
def build_command(self, command):
diff --git a/src/test/rgw/rgw_multi/tests.py b/src/test/rgw/rgw_multi/tests.py
index 70e003c050c..f0b36865ed1 100644
--- a/src/test/rgw/rgw_multi/tests.py
+++ b/src/test/rgw/rgw_multi/tests.py
@@ -545,10 +545,10 @@ def create_role_per_zone(zonegroup_conns, roles_per_zone = 1):
for i in range(roles_per_zone):
role_name = gen_role_name()
log.info('create role zone=%s name=%s', zone.name, role_name)
- policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"AWS\":[\"arn:aws:iam:::user/testuser\"]},\"Action\":[\"sts:AssumeRole\"]}]}"
- role = zone.create_role("", role_name, policy_document, "")
+ policy_document = json.dumps({'Version': '2012-10-17', 'Statement': [{'Effect': 'Allow', 'Principal': {'AWS': 'arn:aws:iam:::user/testuser'}, 'Action': ['sts:AssumeRole']}]})
+ zone.iam_conn.create_role(RoleName=role_name, AssumeRolePolicyDocument=policy_document)
roles.append(role_name)
- zone_role.append((zone, role))
+ zone_role.append((zone, role_name))
return roles, zone_role
@@ -639,9 +639,131 @@ def check_bucket_eq(zone_conn1, zone_conn2, bucket):
if zone_conn2.zone.has_buckets():
zone_conn2.check_bucket_eq(zone_conn1, bucket.name)
-def check_role_eq(zone_conn1, zone_conn2, role):
- if zone_conn2.zone.has_roles():
- zone_conn2.check_role_eq(zone_conn1, role['create_role_response']['create_role_result']['role']['role_name'])
+def check_role_eq(zone_conn1, zone_conn2, role_name):
+ iam1 = zone_conn1.iam_conn
+ iam2 = zone_conn2.iam_conn
+
+ r1 = iam1.get_role(RoleName=role_name)
+ r2 = iam2.get_role(RoleName=role_name)
+ eq(r1['Role'], r2['Role'])
+
+ # compare inline policies
+ policies1 = iam1.get_paginator('list_role_policies').paginate(RoleName=role_name)
+ policies2 = iam2.get_paginator('list_role_policies').paginate(RoleName=role_name)
+ for p1, p2 in zip(policies1, policies2):
+ eq(p1['PolicyNames'], p2['PolicyNames'])
+
+ # compare managed policies
+ policies1 = iam1.get_paginator('list_attached_role_policies').paginate(RoleName=role_name)
+ policies2 = iam2.get_paginator('list_attached_role_policies').paginate(RoleName=role_name)
+ for p1, p2 in zip(policies1, policies2):
+ eq(p1['AttachedPolicies'], p2['AttachedPolicies'])
+
+def check_roles_eq(zone_conn1, zone_conn2):
+ iam1 = zone_conn1.iam_conn
+ iam2 = zone_conn2.iam_conn
+
+ roles1 = iam1.get_paginator('list_roles').paginate()
+ roles2 = iam2.get_paginator('list_roles').paginate()
+ for r1, r2 in zip(roles1, roles2):
+ eq(r1['Roles'], r2['Roles'])
+
+ for role in r1['Roles']:
+ check_role_eq(zone_conn1, zone_conn2, role['RoleName'])
+
+def check_user_eq(zone_conn1, zone_conn2, user_name):
+ iam1 = zone_conn1.iam_conn
+ iam2 = zone_conn2.iam_conn
+
+ r1 = iam1.get_user(UserName=user_name)
+ r2 = iam2.get_user(UserName=user_name)
+ eq(r1['User'], r2['User'])
+
+ # compare access keys
+ keys1 = iam1.get_paginator('list_access_keys').paginate(UserName=user_name)
+ keys2 = iam2.get_paginator('list_access_keys').paginate(UserName=user_name)
+ for k1, k2 in zip(keys1, keys2):
+ eq(k1['AccessKeyMetadata'], k2['AccessKeyMetadata'])
+
+ # compare group memberships
+ groups1 = iam1.get_paginator('list_groups_for_user').paginate(UserName=user_name)
+ groups2 = iam2.get_paginator('list_groups_for_user').paginate(UserName=user_name)
+ for g1, g2 in zip(groups1, groups2):
+ eq(g1['Groups'], g2['Groups'])
+
+ # compare inline policies
+ policies1 = iam1.get_paginator('list_user_policies').paginate(UserName=user_name)
+ policies2 = iam2.get_paginator('list_user_policies').paginate(UserName=user_name)
+ for p1, p2 in zip(policies1, policies2):
+ eq(p1['PolicyNames'], p2['PolicyNames'])
+
+ # compare managed policies
+ policies1 = iam1.get_paginator('list_attached_user_policies').paginate(UserName=user_name)
+ policies2 = iam2.get_paginator('list_attached_user_policies').paginate(UserName=user_name)
+ for p1, p2 in zip(policies1, policies2):
+ eq(p1['AttachedPolicies'], p2['AttachedPolicies'])
+
+def check_users_eq(zone_conn1, zone_conn2):
+ iam1 = zone_conn1.iam_conn
+ iam2 = zone_conn2.iam_conn
+
+ users1 = iam1.get_paginator('list_users').paginate()
+ users2 = iam2.get_paginator('list_users').paginate()
+ for u1, u2 in zip(users1, users2):
+ eq(u1['Users'], u2['Users'])
+
+ for user in u1['Users']:
+ check_user_eq(zone_conn1, zone_conn2, user['UserName'])
+
+def check_group_eq(zone_conn1, zone_conn2, group_name):
+ iam1 = zone_conn1.iam_conn
+ iam2 = zone_conn2.iam_conn
+
+ r1 = iam1.get_group(GroupName=group_name)
+ r2 = iam2.get_group(GroupName=group_name)
+ eq(r1['Group'], r2['Group'])
+
+ # compare inline policies
+ policies1 = iam1.get_paginator('list_group_policies').paginate(GroupName=group_name)
+ policies2 = iam2.get_paginator('list_group_policies').paginate(GroupName=group_name)
+ for p1, p2 in zip(policies1, policies2):
+ eq(p1['PolicyNames'], p2['PolicyNames'])
+
+ # compare managed policies
+ policies1 = iam1.get_paginator('list_attached_group_policies').paginate(GroupName=group_name)
+ policies2 = iam2.get_paginator('list_attached_group_policies').paginate(GroupName=group_name)
+ for p1, p2 in zip(policies1, policies2):
+ eq(p1['AttachedPolicies'], p2['AttachedPolicies'])
+
+def check_groups_eq(zone_conn1, zone_conn2):
+ iam1 = zone_conn1.iam_conn
+ iam2 = zone_conn2.iam_conn
+
+ groups1 = iam1.get_paginator('list_groups').paginate()
+ groups2 = iam2.get_paginator('list_groups').paginate()
+ for g1, g2 in zip(groups1, groups2):
+ eq(g1['Groups'], g2['Groups'])
+
+ for group in g1['Groups']:
+ check_group_eq(zone_conn1, zone_conn2, group['GroupName'])
+
+def check_oidc_provider_eq(zone_conn1, zone_conn2, arn):
+ iam1 = zone_conn1.iam_conn
+ iam2 = zone_conn2.iam_conn
+
+ p1 = iam1.get_open_id_connect_provider(OpenIDConnectProviderArn=arn)
+ p2 = iam2.get_open_id_connect_provider(OpenIDConnectProviderArn=arn)
+ eq(p1, p2)
+
+def check_oidc_providers_eq(zone_conn1, zone_conn2):
+ iam1 = zone_conn1.iam_conn
+ iam2 = zone_conn2.iam_conn
+
+ providers1 = iam1.list_open_id_connect_providers()['OpenIDConnectProviderList']
+ providers2 = iam2.list_open_id_connect_providers()['OpenIDConnectProviderList']
+ for p1, p2 in zip(providers1, providers2):
+ eq(p1, p2)
+ check_oidc_provider_eq(zone_conn1, zone_conn2, p1['Arn'])
def test_object_sync():
zonegroup = realm.master_zonegroup()
@@ -1770,34 +1892,33 @@ def test_role_sync():
zonegroup_meta_checkpoint(zonegroup)
- for source_conn, role in zone_role:
- for target_conn in zonegroup_conns.zones:
- if source_conn.zone == target_conn.zone:
- continue
-
- check_role_eq(source_conn, target_conn, role)
+ for source_conn, target_conn in combinations(zonegroup_conns.zones, 2):
+ if target_conn.zone.has_roles():
+ check_roles_eq(source_conn, target_conn)
def test_role_delete_sync():
zonegroup = realm.master_zonegroup()
zonegroup_conns = ZonegroupConns(zonegroup)
role_name = gen_role_name()
log.info('create role zone=%s name=%s', zonegroup_conns.master_zone.name, role_name)
- zonegroup_conns.master_zone.create_role("", role_name, None, "")
+ policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"AWS\":[\"arn:aws:iam:::user/testuser\"]},\"Action\":[\"sts:AssumeRole\"]}]}"
+ zonegroup_conns.master_zone.iam_conn.create_role(RoleName=role_name, AssumeRolePolicyDocument=policy_document)
zonegroup_meta_checkpoint(zonegroup)
for zone in zonegroup_conns.zones:
log.info(f'checking if zone: {zone.name} has role: {role_name}')
- assert(zone.has_role(role_name))
+ zone.iam_conn.get_role(RoleName=role_name)
log.info(f'success, zone: {zone.name} has role: {role_name}')
log.info(f"deleting role: {role_name}")
- zonegroup_conns.master_zone.delete_role(role_name)
+ zonegroup_conns.master_zone.iam_conn.delete_role(RoleName=role_name)
zonegroup_meta_checkpoint(zonegroup)
for zone in zonegroup_conns.zones:
log.info(f'checking if zone: {zone.name} does not have role: {role_name}')
- assert(not zone.has_role(role_name))
+ assert_raises(zone.iam_conn.exceptions.NoSuchEntityException,
+ zone.iam_conn.get_role, RoleName=role_name)
log.info(f'success, zone: {zone.name} does not have role: {role_name}')
@@ -3248,3 +3369,71 @@ def test_topic_notification_sync():
for conn in zonegroup_conns.zones:
topic_list = conn.list_topics()
assert_equal(len(topic_list), 0)
+
+def test_account_metadata_sync():
+ zonegroup = realm.master_zonegroup()
+ zonegroup_conns = ZonegroupConns(zonegroup)
+
+ inline_policy = json.dumps({'Version': '2012-10-17', 'Statement': [{'Effect': 'Allow', 'Action': 's3:*', 'Resource': '*'}]})
+ managed_policy_arn = 'arn:aws:iam::aws:policy/AmazonS3FullAccess'
+
+ for source_conn in zonegroup_conns.rw_zones:
+ iam = source_conn.iam_conn
+ name = source_conn.name
+ # create user, add access key, user policy, managed policy
+ iam.create_user(UserName=name)
+ iam.create_access_key(UserName=name)
+ iam.put_user_policy(UserName=name, PolicyName='Allow', PolicyDocument=inline_policy)
+ iam.attach_user_policy(UserName=name, PolicyArn=managed_policy_arn)
+ # create group, group policy, managed policy, add user to group
+ iam.create_group(GroupName=name)
+ iam.put_group_policy(GroupName=name, PolicyName='Allow', PolicyDocument=inline_policy)
+ iam.attach_group_policy(GroupName=name, PolicyArn=managed_policy_arn)
+ iam.add_user_to_group(GroupName=name, UserName=name)
+ # create role, role policy, managed policy
+ iam.create_role(RoleName=name, AssumeRolePolicyDocument=json.dumps({'Version': '2012-10-17', 'Statement': [{'Effect': 'Allow', 'Principal': {'AWS': 'arn:aws:iam:::user/testuser'}, 'Action': ['sts:AssumeRole']}]}))
+ iam.put_role_policy(RoleName=name, PolicyName='Allow', PolicyDocument=inline_policy)
+ iam.attach_role_policy(RoleName=name, PolicyArn=managed_policy_arn)
+ # TODO: test oidc provider
+ #iam.create_open_id_connect_provider(ClientIDList=['clientid'], ThumbprintList=['3768084dfb3d2b68b7897bf5f565da8efEXAMPLE'], Url=f'http://{name}.example.com')
+
+ realm_meta_checkpoint(realm)
+
+ # check that all users/groups/roles are equal across all zones
+ for source_conn, target_conn in combinations(zonegroup_conns.zones, 2):
+ if target_conn.zone.has_roles():
+ check_roles_eq(source_conn, target_conn)
+ check_users_eq(source_conn, target_conn)
+ check_groups_eq(source_conn, target_conn)
+ check_oidc_providers_eq(source_conn, target_conn)
+
+ for source_conn in zonegroup_conns.rw_zones:
+ iam = source_conn.iam_conn
+ name = source_conn.name
+
+ #iam.delete_open_id_connect_provider(OpenIDConnectProviderArn=f'arn:aws:iam::RGW11111111111111111:oidc-provider/{name}.example.com')
+
+ iam.detach_role_policy(RoleName=name, PolicyArn=managed_policy_arn)
+ iam.delete_role_policy(RoleName=name, PolicyName='Allow')
+ iam.delete_role(RoleName=name)
+
+ iam.remove_user_from_group(GroupName=name, UserName=name)
+ iam.detach_group_policy(GroupName=name, PolicyArn=managed_policy_arn)
+ iam.delete_group_policy(GroupName=name, PolicyName='Allow')
+ iam.delete_group(GroupName=name)
+
+ iam.detach_user_policy(UserName=name, PolicyArn=managed_policy_arn)
+ iam.delete_user_policy(UserName=name, PolicyName='Allow')
+ key_id = iam.list_access_keys(UserName=name)['AccessKeyMetadata'][0]['AccessKeyId']
+ iam.delete_access_key(UserName=name, AccessKeyId=key_id)
+ iam.delete_user(UserName=name)
+
+ realm_meta_checkpoint(realm)
+
+ # check that all users/groups/roles are equal across all zones
+ for source_conn, target_conn in combinations(zonegroup_conns.zones, 2):
+ if target_conn.zone.has_roles():
+ check_roles_eq(source_conn, target_conn)
+ check_users_eq(source_conn, target_conn)
+ check_groups_eq(source_conn, target_conn)
+ check_oidc_providers_eq(source_conn, target_conn)
diff --git a/src/test/rgw/test_multi.py b/src/test/rgw/test_multi.py
index 57d27343efc..cef6850c88a 100644
--- a/src/test/rgw/test_multi.py
+++ b/src/test/rgw/test_multi.py
@@ -246,7 +246,7 @@ def init(parse_args):
admin_user = multisite.User('zone.user')
user_creds = gen_credentials()
- user = multisite.User('tester', tenant=args.tenant)
+ user = multisite.User('tester', tenant=args.tenant, account='RGW11111111111111111')
realm = multisite.Realm('r')
if bootstrap:
@@ -381,8 +381,9 @@ def init(parse_args):
arg = ['--display-name', '"Zone User"', '--system']
arg += admin_creds.credential_args()
admin_user.create(zone, arg)
- # create test user
- arg = ['--display-name', '"Test User"', '--caps', 'roles=*']
+ # create test account/user
+ cluster.admin(['account', 'create', '--account-id', user.account])
+ arg = ['--display-name', 'TestUser']
arg += user_creds.credential_args()
user.create(zone, arg)
else:
diff --git a/src/test/rgw/test_rgw_crypto.cc b/src/test/rgw/test_rgw_crypto.cc
index 88104ebefd3..1c1762ff868 100644
--- a/src/test/rgw/test_rgw_crypto.cc
+++ b/src/test/rgw/test_rgw_crypto.cc
@@ -806,6 +806,7 @@ TEST(TestRGWCrypto, verify_Encrypt_Decrypt)
decrypt.flush();
ASSERT_EQ(get_sink.get_sink().length(), test_size);
ASSERT_EQ(get_sink.get_sink(), std::string_view((char*)test_in,test_size));
+ delete[] test_in;
}
while (test_size < 20000);
}
diff --git a/src/test/rgw/test_rgw_iam_policy.cc b/src/test/rgw/test_rgw_iam_policy.cc
index 67c79508a49..d7aa3244a22 100644
--- a/src/test/rgw/test_rgw_iam_policy.cc
+++ b/src/test/rgw/test_rgw_iam_policy.cc
@@ -12,6 +12,8 @@
*
*/
+#include "rgw_iam_policy.h"
+
#include <string>
#include <boost/intrusive_ptr.hpp>
@@ -26,7 +28,7 @@
#include "global/global_init.h"
#include "rgw_auth.h"
#include "rgw_auth_registry.h"
-#include "rgw_iam_policy.h"
+#include "rgw_iam_managed_policy.h"
#include "rgw_op.h"
#include "rgw_process_env.h"
#include "rgw_sal_rados.h"
@@ -48,9 +50,10 @@ using rgw::IAM::Environment;
using rgw::Partition;
using rgw::IAM::Policy;
using rgw::IAM::s3All;
-using rgw::IAM::s3Count;
+using rgw::IAM::s3objectlambdaAll;
using rgw::IAM::s3GetAccelerateConfiguration;
using rgw::IAM::s3GetBucketAcl;
+using rgw::IAM::s3GetBucketOwnershipControls;
using rgw::IAM::s3GetBucketCORS;
using rgw::IAM::s3GetBucketLocation;
using rgw::IAM::s3GetBucketLogging;
@@ -85,6 +88,35 @@ using rgw::IAM::s3PutBucketPolicy;
using rgw::IAM::s3GetBucketObjectLockConfiguration;
using rgw::IAM::s3GetObjectRetention;
using rgw::IAM::s3GetObjectLegalHold;
+using rgw::IAM::s3DescribeJob;
+using rgw::IAM::s3objectlambdaGetObject;
+using rgw::IAM::s3objectlambdaListBucket;
+using rgw::IAM::iamGenerateCredentialReport;
+using rgw::IAM::iamGenerateServiceLastAccessedDetails;
+using rgw::IAM::iamGetUserPolicy;
+using rgw::IAM::iamGetRole;
+using rgw::IAM::iamGetRolePolicy;
+using rgw::IAM::iamGetOIDCProvider;
+using rgw::IAM::iamGetUser;
+using rgw::IAM::iamListUserPolicies;
+using rgw::IAM::iamListAttachedUserPolicies;
+using rgw::IAM::iamListRoles;
+using rgw::IAM::iamListRolePolicies;
+using rgw::IAM::iamListAttachedRolePolicies;
+using rgw::IAM::iamListOIDCProviders;
+using rgw::IAM::iamListRoleTags;
+using rgw::IAM::iamListUsers;
+using rgw::IAM::iamListAccessKeys;
+using rgw::IAM::iamGetGroup;
+using rgw::IAM::iamListGroups;
+using rgw::IAM::iamListGroupsForUser;
+using rgw::IAM::iamGetGroupPolicy;
+using rgw::IAM::iamListGroupPolicies;
+using rgw::IAM::iamListAttachedGroupPolicies;
+using rgw::IAM::iamSimulateCustomPolicy;
+using rgw::IAM::iamSimulatePrincipalPolicy;
+using rgw::IAM::snsGetTopicAttributes;
+using rgw::IAM::snsListTopics;
using rgw::Service;
using rgw::IAM::TokenID;
using rgw::IAM::Version;
@@ -95,24 +127,41 @@ using rgw::IAM::iamDeleteRole;
using rgw::IAM::iamAll;
using rgw::IAM::stsAll;
using rgw::IAM::snsAll;
+using rgw::IAM::organizationsAll;
using rgw::IAM::allCount;
+using rgw::IAM::s3AllValue;
+using rgw::IAM::s3objectlambdaAllValue;
+using rgw::IAM::iamAllValue;
+using rgw::IAM::stsAllValue;
+using rgw::IAM::snsAllValue;
+using rgw::IAM::organizationsAllValue;
+using rgw::IAM::allValue;
+
+using rgw::IAM::get_managed_policy;
+
class FakeIdentity : public Identity {
const Principal id;
public:
explicit FakeIdentity(Principal&& id) : id(std::move(id)) {}
+
+ ACLOwner get_aclowner() const override {
+ ceph_abort();
+ return {};
+ }
+
uint32_t get_perms_from_aclspec(const DoutPrefixProvider* dpp, const aclspec_t& aclspec) const override {
ceph_abort();
return 0;
};
- bool is_admin_of(const rgw_user& uid) const override {
+ bool is_admin_of(const rgw_owner& o) const override {
ceph_abort();
return false;
}
- bool is_owner_of(const rgw_user& uid) const override {
+ bool is_owner_of(const rgw_owner& owner) const override {
ceph_abort();
return false;
}
@@ -132,15 +181,24 @@ public:
return 0;
}
+ const std::string& get_tenant() const override {
+ ceph_abort();
+ static std::string empty;
+ return empty;
+ }
+
+ const std::optional<RGWAccountInfo>& get_account() const override {
+ ceph_abort();
+ static std::optional<RGWAccountInfo> empty;
+ return empty;
+ }
+
void to_str(std::ostream& out) const override {
out << id;
}
- bool is_identity(const flat_set<Principal>& ids) const override {
- if (id.is_wildcard() && (!ids.empty())) {
- return true;
- }
- return ids.find(id) != ids.end() || ids.find(Principal::wildcard()) != ids.end();
+ bool is_identity(const Principal& p) const override {
+ return id.is_wildcard() || p.is_wildcard() || p == id;
}
uint32_t get_identity_type() const override {
@@ -168,9 +226,7 @@ public:
TEST_F(PolicyTest, Parse1) {
boost::optional<Policy> p;
- ASSERT_NO_THROW(p = Policy(cct.get(), arbitrary_tenant,
- bufferlist::static_from_string(example1),
- true));
+ ASSERT_NO_THROW(p = Policy(cct.get(), &arbitrary_tenant, example1, true));
ASSERT_TRUE(p);
EXPECT_EQ(p->text, example1);
@@ -198,8 +254,7 @@ TEST_F(PolicyTest, Parse1) {
}
TEST_F(PolicyTest, Eval1) {
- auto p = Policy(cct.get(), arbitrary_tenant,
- bufferlist::static_from_string(example1), true);
+ auto p = Policy(cct.get(), &arbitrary_tenant, example1, true);
Environment e;
ARN arn1(Partition::aws, Service::s3,
@@ -222,9 +277,7 @@ TEST_F(PolicyTest, Eval1) {
TEST_F(PolicyTest, Parse2) {
boost::optional<Policy> p;
- ASSERT_NO_THROW(p = Policy(cct.get(), arbitrary_tenant,
- bufferlist::static_from_string(example2),
- true));
+ ASSERT_NO_THROW(p = Policy(cct.get(), &arbitrary_tenant, example2, true));
ASSERT_TRUE(p);
EXPECT_EQ(p->text, example2);
@@ -236,11 +289,11 @@ TEST_F(PolicyTest, Parse2) {
EXPECT_FALSE(p->statements[0].princ.empty());
EXPECT_EQ(p->statements[0].princ.size(), 1U);
EXPECT_EQ(*p->statements[0].princ.begin(),
- Principal::tenant("ACCOUNT-ID-WITHOUT-HYPHENS"));
+ Principal::account("ACCOUNT-ID-WITHOUT-HYPHENS"));
EXPECT_TRUE(p->statements[0].noprinc.empty());
EXPECT_EQ(p->statements[0].effect, Effect::Allow);
Action_t act;
- for (auto i = 0ULL; i < s3Count; i++)
+ for (auto i = 0ULL; i < s3All; i++)
act[i] = 1;
act[s3All] = 1;
EXPECT_EQ(p->statements[0].action, act);
@@ -265,16 +318,15 @@ TEST_F(PolicyTest, Parse2) {
}
TEST_F(PolicyTest, Eval2) {
- auto p = Policy(cct.get(), arbitrary_tenant,
- bufferlist::static_from_string(example2), true);
+ auto p = Policy(cct.get(), &arbitrary_tenant, example2, true);
Environment e;
auto trueacct = FakeIdentity(
- Principal::tenant("ACCOUNT-ID-WITHOUT-HYPHENS"));
+ Principal::account("ACCOUNT-ID-WITHOUT-HYPHENS"));
auto notacct = FakeIdentity(
- Principal::tenant("some-other-account"));
- for (auto i = 0ULL; i < s3Count; ++i) {
+ Principal::account("some-other-account"));
+ for (auto i = 0ULL; i < s3All; ++i) {
ARN arn1(Partition::aws, Service::s3,
"", arbitrary_tenant, "mybucket");
EXPECT_EQ(p.eval(e, trueacct, i, arn1),
@@ -306,8 +358,7 @@ TEST_F(PolicyTest, Eval2) {
TEST_F(PolicyTest, Parse3) {
boost::optional<Policy> p;
- ASSERT_NO_THROW(p = Policy(cct.get(), arbitrary_tenant,
- bufferlist::static_from_string(example3), true));
+ ASSERT_NO_THROW(p = Policy(cct.get(), &arbitrary_tenant, example3, true));
ASSERT_TRUE(p);
EXPECT_EQ(p->text, example3);
@@ -370,6 +421,7 @@ TEST_F(PolicyTest, Parse3) {
act2[s3GetObjectVersionTorrent] = 1;
act2[s3GetAccelerateConfiguration] = 1;
act2[s3GetBucketAcl] = 1;
+ act2[s3GetBucketOwnershipControls] = 1;
act2[s3GetBucketCORS] = 1;
act2[s3GetBucketVersioning] = 1;
act2[s3GetBucketRequestPayment] = 1;
@@ -420,8 +472,7 @@ TEST_F(PolicyTest, Parse3) {
}
TEST_F(PolicyTest, Eval3) {
- auto p = Policy(cct.get(), arbitrary_tenant,
- bufferlist::static_from_string(example3), true);
+ auto p = Policy(cct.get(), &arbitrary_tenant, example3, true);
Environment em;
Environment tr = { { "aws:MultiFactorAuthPresent", "true" } };
Environment fa = { { "aws:MultiFactorAuthPresent", "false" } };
@@ -440,6 +491,7 @@ TEST_F(PolicyTest, Eval3) {
s3allow[s3GetObjectVersionTorrent] = 1;
s3allow[s3GetAccelerateConfiguration] = 1;
s3allow[s3GetBucketAcl] = 1;
+ s3allow[s3GetBucketOwnershipControls] = 1;
s3allow[s3GetBucketCORS] = 1;
s3allow[s3GetBucketVersioning] = 1;
s3allow[s3GetBucketRequestPayment] = 1;
@@ -472,7 +524,7 @@ TEST_F(PolicyTest, Eval3) {
Effect::Allow);
- for (auto op = 0ULL; op < s3Count; ++op) {
+ for (auto op = 0ULL; op < s3All; ++op) {
if ((op == s3ListAllMyBuckets) || (op == s3PutBucketPolicy)) {
continue;
}
@@ -531,8 +583,7 @@ TEST_F(PolicyTest, Eval3) {
TEST_F(PolicyTest, Parse4) {
boost::optional<Policy> p;
- ASSERT_NO_THROW(p = Policy(cct.get(), arbitrary_tenant,
- bufferlist::static_from_string(example4), true));
+ ASSERT_NO_THROW(p = Policy(cct.get(), &arbitrary_tenant, example4, true));
ASSERT_TRUE(p);
EXPECT_EQ(p->text, example4);
@@ -560,8 +611,7 @@ TEST_F(PolicyTest, Parse4) {
}
TEST_F(PolicyTest, Eval4) {
- auto p = Policy(cct.get(), arbitrary_tenant,
- bufferlist::static_from_string(example4), true);
+ auto p = Policy(cct.get(), &arbitrary_tenant, example4, true);
Environment e;
ARN arn1(Partition::aws, Service::iam,
@@ -578,8 +628,7 @@ TEST_F(PolicyTest, Eval4) {
TEST_F(PolicyTest, Parse5) {
boost::optional<Policy> p;
- ASSERT_NO_THROW(p = Policy(cct.get(), arbitrary_tenant,
- bufferlist::static_from_string(example5), true));
+ ASSERT_NO_THROW(p = Policy(cct.get(), &arbitrary_tenant, example5, true));
ASSERT_TRUE(p);
EXPECT_EQ(p->text, example5);
EXPECT_EQ(p->version, Version::v2012_10_17);
@@ -591,7 +640,7 @@ TEST_F(PolicyTest, Parse5) {
EXPECT_TRUE(p->statements[0].noprinc.empty());
EXPECT_EQ(p->statements[0].effect, Effect::Allow);
Action_t act;
- for (auto i = s3All+1; i <= iamAll; i++)
+ for (auto i = s3objectlambdaAll+1; i <= iamAll; i++)
act[i] = 1;
EXPECT_EQ(p->statements[0].action, act);
EXPECT_EQ(p->statements[0].notaction, None);
@@ -607,8 +656,7 @@ TEST_F(PolicyTest, Parse5) {
}
TEST_F(PolicyTest, Eval5) {
- auto p = Policy(cct.get(), arbitrary_tenant,
- bufferlist::static_from_string(example5), true);
+ auto p = Policy(cct.get(), &arbitrary_tenant, example5, true);
Environment e;
ARN arn1(Partition::aws, Service::iam,
@@ -630,8 +678,7 @@ TEST_F(PolicyTest, Eval5) {
TEST_F(PolicyTest, Parse6) {
boost::optional<Policy> p;
- ASSERT_NO_THROW(p = Policy(cct.get(), arbitrary_tenant,
- bufferlist::static_from_string(example6), true));
+ ASSERT_NO_THROW(p = Policy(cct.get(), &arbitrary_tenant, example6, true));
ASSERT_TRUE(p);
EXPECT_EQ(p->text, example6);
EXPECT_EQ(p->version, Version::v2012_10_17);
@@ -643,7 +690,7 @@ TEST_F(PolicyTest, Parse6) {
EXPECT_TRUE(p->statements[0].noprinc.empty());
EXPECT_EQ(p->statements[0].effect, Effect::Allow);
Action_t act;
- for (auto i = 0U; i <= snsAll; i++)
+ for (auto i = 0U; i <= organizationsAll; i++)
act[i] = 1;
EXPECT_EQ(p->statements[0].action, act);
EXPECT_EQ(p->statements[0].notaction, None);
@@ -659,8 +706,7 @@ TEST_F(PolicyTest, Parse6) {
}
TEST_F(PolicyTest, Eval6) {
- auto p = Policy(cct.get(), arbitrary_tenant,
- bufferlist::static_from_string(example6), true);
+ auto p = Policy(cct.get(), &arbitrary_tenant, example6, true);
Environment e;
ARN arn1(Partition::aws, Service::iam,
@@ -677,8 +723,7 @@ TEST_F(PolicyTest, Eval6) {
TEST_F(PolicyTest, Parse7) {
boost::optional<Policy> p;
- ASSERT_NO_THROW(p = Policy(cct.get(), arbitrary_tenant,
- bufferlist::static_from_string(example7), true));
+ ASSERT_NO_THROW(p = Policy(cct.get(), &arbitrary_tenant, example7, true));
ASSERT_TRUE(p);
EXPECT_EQ(p->text, example7);
@@ -702,15 +747,14 @@ TEST_F(PolicyTest, Parse7) {
EXPECT_EQ(p->statements[0].resource.begin()->resource, "mybucket/*");
EXPECT_TRUE(p->statements[0].princ.begin()->is_user());
EXPECT_FALSE(p->statements[0].princ.begin()->is_wildcard());
- EXPECT_EQ(p->statements[0].princ.begin()->get_tenant(), "");
+ EXPECT_EQ(p->statements[0].princ.begin()->get_account(), "");
EXPECT_EQ(p->statements[0].princ.begin()->get_id(), "A:subA");
EXPECT_TRUE(p->statements[0].notresource.empty());
EXPECT_TRUE(p->statements[0].conditions.empty());
}
TEST_F(PolicyTest, Eval7) {
- auto p = Policy(cct.get(), arbitrary_tenant,
- bufferlist::static_from_string(example7), true);
+ auto p = Policy(cct.get(), &arbitrary_tenant, example7, true);
Environment e;
auto subacct = FakeIdentity(
@@ -736,6 +780,147 @@ TEST_F(PolicyTest, Eval7) {
Effect::Pass);
}
+
+class ManagedPolicyTest : public ::testing::Test {
+protected:
+ intrusive_ptr<CephContext> cct;
+public:
+ ManagedPolicyTest() : cct(new CephContext(CEPH_ENTITY_TYPE_CLIENT)) {}
+};
+
+TEST_F(ManagedPolicyTest, IAMFullAccess)
+{
+ auto p = get_managed_policy(cct.get(), "arn:aws:iam::aws:policy/IAMFullAccess");
+ ASSERT_TRUE(p);
+
+ Action_t act = iamAllValue | organizationsAllValue;
+ act[iamAll] = 1;
+ act[organizationsAll] = 1;
+ EXPECT_EQ(act, p->statements[0].action);
+}
+
+TEST_F(ManagedPolicyTest, IAMReadOnlyAccess)
+{
+ auto p = get_managed_policy(cct.get(), "arn:aws:iam::aws:policy/IAMReadOnlyAccess");
+ ASSERT_TRUE(p);
+
+ Action_t act;
+ act[iamGenerateCredentialReport] = 1;
+ act[iamGenerateServiceLastAccessedDetails] = 1;
+ act[iamGetUserPolicy] = 1;
+ act[iamGetRole] = 1;
+ act[iamGetRolePolicy] = 1;
+ act[iamGetOIDCProvider] = 1;
+ act[iamGetUser] = 1;
+ act[iamListUserPolicies] = 1;
+ act[iamListAttachedUserPolicies] = 1;
+ act[iamListRoles] = 1;
+ act[iamListRolePolicies] = 1;
+ act[iamListAttachedRolePolicies] = 1;
+ act[iamListOIDCProviders] = 1;
+ act[iamListRoleTags] = 1;
+ act[iamListUsers] = 1;
+ act[iamListAccessKeys] = 1;
+ act[iamGetGroup] = 1;
+ act[iamListGroups] = 1;
+ act[iamListGroupsForUser] = 1;
+ act[iamGetGroupPolicy] = 1;
+ act[iamListGroupPolicies] = 1;
+ act[iamListAttachedGroupPolicies] = 1;
+ act[iamSimulateCustomPolicy] = 1;
+ act[iamSimulatePrincipalPolicy] = 1;
+
+ EXPECT_EQ(act, p->statements[0].action);
+}
+
+TEST_F(ManagedPolicyTest, AmazonSNSFullAccess)
+{
+ auto p = get_managed_policy(cct.get(), "arn:aws:iam::aws:policy/AmazonSNSFullAccess");
+ ASSERT_TRUE(p);
+
+ Action_t act = snsAllValue;
+ act[snsAll] = 1;
+ EXPECT_EQ(act, p->statements[0].action);
+}
+
+TEST_F(ManagedPolicyTest, AmazonSNSReadOnlyAccess)
+{
+ auto p = get_managed_policy(cct.get(), "arn:aws:iam::aws:policy/AmazonSNSReadOnlyAccess");
+ ASSERT_TRUE(p);
+
+ Action_t act;
+ // sns:GetTopicAttributes
+ act[snsGetTopicAttributes] = 1;
+ // sns:List*
+ act[snsListTopics] = 1;
+
+ EXPECT_EQ(act, p->statements[0].action);
+}
+
+TEST_F(ManagedPolicyTest, AmazonS3FullAccess)
+{
+ auto p = get_managed_policy(cct.get(), "arn:aws:iam::aws:policy/AmazonS3FullAccess");
+ ASSERT_TRUE(p);
+
+ Action_t act = s3AllValue | s3objectlambdaAllValue;
+ act[s3All] = 1;
+ act[s3objectlambdaAll] = 1;
+ EXPECT_EQ(act, p->statements[0].action);
+}
+
+TEST_F(ManagedPolicyTest, AmazonS3ReadOnlyAccess)
+{
+ auto p = get_managed_policy(cct.get(), "arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess");
+ ASSERT_TRUE(p);
+
+ Action_t act;
+ // s3:Get*
+ act[s3GetObject] = 1;
+ act[s3GetObjectVersion] = 1;
+ act[s3GetObjectAcl] = 1;
+ act[s3GetObjectVersionAcl] = 1;
+ act[s3GetObjectTorrent] = 1;
+ act[s3GetObjectVersionTorrent] = 1;
+ act[s3GetAccelerateConfiguration] = 1;
+ act[s3GetBucketAcl] = 1;
+ act[s3GetBucketOwnershipControls] = 1;
+ act[s3GetBucketCORS] = 1;
+ act[s3GetBucketVersioning] = 1;
+ act[s3GetBucketRequestPayment] = 1;
+ act[s3GetBucketLocation] = 1;
+ act[s3GetBucketPolicy] = 1;
+ act[s3GetBucketNotification] = 1;
+ act[s3GetBucketLogging] = 1;
+ act[s3GetBucketTagging] = 1;
+ act[s3GetBucketWebsite] = 1;
+ act[s3GetLifecycleConfiguration] = 1;
+ act[s3GetReplicationConfiguration] = 1;
+ act[s3GetObjectTagging] = 1;
+ act[s3GetObjectVersionTagging] = 1;
+ act[s3GetBucketObjectLockConfiguration] = 1;
+ act[s3GetObjectRetention] = 1;
+ act[s3GetObjectLegalHold] = 1;
+ act[s3GetBucketPolicyStatus] = 1;
+ act[s3GetPublicAccessBlock] = 1;
+ act[s3GetBucketPublicAccessBlock] = 1;
+ act[s3GetBucketEncryption] = 1;
+ // s3:List*
+ act[s3ListMultipartUploadParts] = 1;
+ act[s3ListBucket] = 1;
+ act[s3ListBucketVersions] = 1;
+ act[s3ListAllMyBuckets] = 1;
+ act[s3ListBucketMultipartUploads] = 1;
+ // s3:Describe*
+ act[s3DescribeJob] = 1;
+ // s3-object-lambda:Get*
+ act[s3objectlambdaGetObject] = 1;
+ // s3-object-lambda:List*
+ act[s3objectlambdaListBucket] = 1;
+ act[s3objectlambdaAll] = 1;
+
+ EXPECT_EQ(act, p->statements[0].action);
+}
+
const string PolicyTest::arbitrary_tenant = "arbitrary_tenant";
string PolicyTest::example1 = R"(
{
@@ -958,8 +1143,7 @@ TEST_F(IPPolicyTest, ParseIPAddress) {
boost::optional<Policy> p;
ASSERT_NO_THROW(
- p = Policy(cct.get(), arbitrary_tenant,
- bufferlist::static_from_string(ip_address_full_example), true));
+ p = Policy(cct.get(), &arbitrary_tenant, ip_address_full_example, true));
ASSERT_TRUE(p);
EXPECT_EQ(p->text, ip_address_full_example);
@@ -1016,14 +1200,11 @@ TEST_F(IPPolicyTest, ParseIPAddress) {
TEST_F(IPPolicyTest, EvalIPAddress) {
auto allowp =
- Policy(cct.get(), arbitrary_tenant,
- bufferlist::static_from_string(ip_address_allow_example), true);
+ Policy(cct.get(), &arbitrary_tenant, ip_address_allow_example, true);
auto denyp =
- Policy(cct.get(), arbitrary_tenant,
- bufferlist::static_from_string(ip_address_deny_example), true);
+ Policy(cct.get(), &arbitrary_tenant, ip_address_deny_example, true);
auto fullp =
- Policy(cct.get(), arbitrary_tenant,
- bufferlist::static_from_string(ip_address_full_example), true);
+ Policy(cct.get(), &arbitrary_tenant, ip_address_full_example, true);
Environment e;
Environment allowedIP, blocklistedIP, allowedIPv6, blocklistedIPv6;
allowedIP.emplace("aws:SourceIp","192.168.1.2");
@@ -1032,7 +1213,7 @@ TEST_F(IPPolicyTest, EvalIPAddress) {
blocklistedIPv6.emplace("aws:SourceIp", "2001:0db8:85a3:0000:0000:8a2e:0370:7334");
auto trueacct = FakeIdentity(
- Principal::tenant("ACCOUNT-ID-WITHOUT-HYPHENS"));
+ Principal::account("ACCOUNT-ID-WITHOUT-HYPHENS"));
// Without an IP address in the environment then evaluation will always pass
ARN arn1(Partition::aws, Service::s3,
"", arbitrary_tenant, "example_bucket");
@@ -1309,14 +1490,13 @@ Action_t set_range_bits(std::uint64_t start, std::uint64_t end)
return result;
}
-using rgw::IAM::s3AllValue;
-using rgw::IAM::stsAllValue;
-using rgw::IAM::allValue;
-using rgw::IAM::iamAllValue;
TEST(set_cont_bits, iamconsts)
{
EXPECT_EQ(s3AllValue, set_range_bits(0, s3All));
- EXPECT_EQ(iamAllValue, set_range_bits(s3All+1, iamAll));
+ EXPECT_EQ(s3objectlambdaAllValue, set_range_bits(s3All+1, s3objectlambdaAll));
+ EXPECT_EQ(iamAllValue, set_range_bits(s3objectlambdaAll+1, iamAll));
EXPECT_EQ(stsAllValue, set_range_bits(iamAll+1, stsAll));
+ EXPECT_EQ(snsAllValue, set_range_bits(stsAll+1, snsAll));
+ EXPECT_EQ(organizationsAllValue, set_range_bits(snsAll+1, organizationsAll));
EXPECT_EQ(allValue , set_range_bits(0, allCount));
}
diff --git a/src/test/rgw/test_rgw_lua.cc b/src/test/rgw/test_rgw_lua.cc
index e6014513ba3..b2e11e442a2 100644
--- a/src/test/rgw/test_rgw_lua.cc
+++ b/src/test/rgw/test_rgw_lua.cc
@@ -33,15 +33,19 @@ class FakeIdentity : public Identity {
public:
FakeIdentity() = default;
+ ACLOwner get_aclowner() const override {
+ return {};
+ }
+
uint32_t get_perms_from_aclspec(const DoutPrefixProvider* dpp, const aclspec_t& aclspec) const override {
return 0;
};
- bool is_admin_of(const rgw_user& uid) const override {
+ bool is_admin_of(const rgw_owner& o) const override {
return false;
}
- bool is_owner_of(const rgw_user& uid) const override {
+ bool is_owner_of(const rgw_owner& uid) const override {
return false;
}
@@ -61,11 +65,21 @@ public:
return "";
}
+ const std::string& get_tenant() const override {
+ static std::string empty;
+ return empty;
+ }
+
+ const std::optional<RGWAccountInfo>& get_account() const override {
+ static const std::optional<RGWAccountInfo> empty;
+ return empty;
+ }
+
void to_str(std::ostream& out) const override {
return;
}
- bool is_identity(const flat_set<Principal>& ids) const override {
+ bool is_identity(const Principal& p) const override {
return false;
}
};
@@ -84,18 +98,6 @@ public:
return 0;
}
- virtual int read_stats(const DoutPrefixProvider *dpp, optional_yield y, RGWStorageStats* stats, ceph::real_time *last_stats_sync, ceph::real_time *last_stats_update) override {
- return 0;
- }
-
- virtual int read_stats_async(const DoutPrefixProvider *dpp, boost::intrusive_ptr<sal::ReadStatsCB> cb) override {
- return 0;
- }
-
- virtual int complete_flush_stats(const DoutPrefixProvider *dpp, optional_yield y) override {
- return 0;
- }
-
virtual int read_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch, uint32_t max_entries, bool *is_truncated, RGWUsageIter& usage_iter, map<rgw_user_bucket, rgw_usage_log_entry>& usage) override {
return 0;
}
@@ -121,6 +123,11 @@ public:
virtual int verify_mfa(const std::string& mfa_str, bool* verified, const DoutPrefixProvider* dpp, optional_yield y) override {
return 0;
}
+ int list_groups(const DoutPrefixProvider* dpp, optional_yield y,
+ std::string_view marker, uint32_t max_items,
+ rgw::sal::GroupList& listing) override {
+ return 0;
+ }
virtual ~TestUser() = default;
};
@@ -358,8 +365,7 @@ TEST(TestRGWLua, Bucket)
info.bucket.name = "myname";
info.bucket.marker = "mymarker";
info.bucket.bucket_id = "myid";
- info.owner.id = "myuser";
- info.owner.tenant = "mytenant";
+ info.owner = rgw_user{"mytenant", "myuser"};
s.bucket.reset(new sal::RadosBucket(nullptr, info));
const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, script);
@@ -663,8 +669,7 @@ TEST(TestRGWLua, Acl)
end
assert(Request.UserAcl.Owner.DisplayName == "jack black", Request.UserAcl.Owner.DisplayName)
- assert(Request.UserAcl.Owner.User.Id == "black", Request.UserAcl.Owner.User.Id)
- assert(Request.UserAcl.Owner.User.Tenant == "jack", Request.UserAcl.Owner.User.Tenant)
+ assert(Request.UserAcl.Owner.User == "jack$black", Request.UserAcl.Owner.User)
assert(#Request.UserAcl.Grants == 7)
print_grant("", Request.UserAcl.Grants[""])
for k, v in pairs(Request.UserAcl.Grants) do
@@ -726,8 +731,7 @@ TEST(TestRGWLua, UseFunction)
const std::string script = R"(
function print_owner(owner)
print("Owner Display Name: " .. owner.DisplayName)
- print("Owner Id: " .. owner.User.Id)
- print("Owner Tenanet: " .. owner.User.Tenant)
+ print("Owner Id: " .. owner.User)
end
print_owner(Request.ObjectOwner)
@@ -1577,8 +1581,7 @@ TEST(TestRGWLua, DifferentContextUser)
s.user.reset(new sal::RadosUser(nullptr, rgw_user("tenant1", "user1")));
RGWBucketInfo info;
info.bucket.name = "bucket1";
- info.owner.id = "user2";
- info.owner.tenant = "tenant2";
+ info.owner = rgw_user{"tenant2", "user2"};
s.bucket.reset(new sal::RadosBucket(nullptr, info));
const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, script);
diff --git a/src/tools/ceph-dencoder/rgw_types.h b/src/tools/ceph-dencoder/rgw_types.h
index 05484c78fc1..8cd0acfc624 100644
--- a/src/tools/ceph-dencoder/rgw_types.h
+++ b/src/tools/ceph-dencoder/rgw_types.h
@@ -126,6 +126,8 @@ TYPE(cls_user_bucket)
TYPE(cls_user_bucket_entry)
TYPE(cls_user_stats)
TYPE(cls_user_header)
+TYPE(cls_user_account_header)
+TYPE(cls_user_account_resource)
#include "cls/user/cls_user_ops.h"
TYPE(cls_user_set_buckets_op)
@@ -135,6 +137,12 @@ TYPE(cls_user_list_buckets_ret)
TYPE(cls_user_get_header_op)
TYPE(cls_user_get_header_ret)
TYPE(cls_user_complete_stats_sync_op)
+TYPE(cls_user_account_resource_add_op)
+TYPE(cls_user_account_resource_get_op)
+TYPE(cls_user_account_resource_get_ret)
+TYPE(cls_user_account_resource_rm_op)
+TYPE(cls_user_account_resource_list_op)
+TYPE(cls_user_account_resource_list_ret)
#include "cls/journal/cls_journal_types.h"
TYPE(cls::journal::ObjectPosition)
@@ -154,6 +162,8 @@ TYPE(obj_version)
TYPE(RGWAccessKey)
TYPE(RGWSubUser)
TYPE(RGWUserInfo)
+TYPE(RGWAccountInfo)
+TYPE(RGWGroupInfo)
TYPE(rgw_bucket)
TYPE(RGWBucketInfo)
TYPE(RGWBucketEnt)
@@ -237,4 +247,16 @@ TYPE(RGWUID)
#include "rgw_user_types.h"
TYPE(rgw_user)
+#include "rgw_oidc_provider.h"
+TYPE(RGWOIDCProviderInfo)
+
+#include "driver/rados/groups.h"
+TYPE(rgwrados::groups::resource_metadata)
+
+#include "driver/rados/roles.h"
+TYPE(rgwrados::roles::resource_metadata)
+
+#include "driver/rados/users.h"
+TYPE(rgwrados::users::resource_metadata)
+
#endif
diff --git a/src/vstart.sh b/src/vstart.sh
index a077ca2185e..a4065addd65 100755
--- a/src/vstart.sh
+++ b/src/vstart.sh
@@ -901,6 +901,8 @@ $CCLIENTDEBUG
rgw crypt s3 kms backend = testing
rgw crypt s3 kms encryption keys = testkey-1=YmluCmJvb3N0CmJvb3N0LWJ1aWxkCmNlcGguY29uZgo= testkey-2=aWIKTWFrZWZpbGUKbWFuCm91dApzcmMKVGVzdGluZwo=
rgw crypt require ssl = false
+ rgw sts key = abcdefghijklmnop
+ rgw s3 auth use sts = true
; uncomment the following to set LC days as the value in seconds;
; needed for passing lc time based s3-tests (can be verbose)
; rgw lc debug interval = 10
@@ -1814,12 +1816,13 @@ do_rgw_create_users()
# Create S3-test users
# See: https://github.com/ceph/s3-tests
debug echo "setting up s3-test users"
+
$CEPH_BIN/radosgw-admin user create \
--uid 0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef \
--access-key ABCDEFGHIJKLMNOPQRST \
--secret abcdefghijklmnopqrstuvwxyzabcdefghijklmn \
--display-name youruseridhere \
- --email s3@example.com --caps="user-policy=*" -c $conf_fn > /dev/null
+ --email s3@example.com --caps="roles=*;user-policy=*" -c $conf_fn > /dev/null
$CEPH_BIN/radosgw-admin user create \
--uid 56789abcdef0123456789abcdef0123456789abcdef0123456789abcdef01234 \
--access-key NOPQRSTUVWXYZABCDEFG \
@@ -1834,6 +1837,28 @@ do_rgw_create_users()
--display-name tenanteduser \
--email tenanteduser@example.com -c $conf_fn > /dev/null
+ if [ "$rgw_store" == "rados" ] ; then
+ # create accounts/users for iam s3tests
+ a1_akey='AAAAAAAAAAAAAAAAAAaa'
+ a1_skey='aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'
+ $CEPH_BIN/radosgw-admin account create --account-id RGW11111111111111111 --account-name Account1 --email account1@ceph.com -c $conf_fn > /dev/null
+ $CEPH_BIN/radosgw-admin user create --account-id RGW11111111111111111 --uid testacct1root --account-root \
+ --display-name 'Account1Root' --access-key $a1_akey --secret $a1_skey -c $conf_fn > /dev/null
+
+ a2_akey='BBBBBBBBBBBBBBBBBBbb'
+ a2_skey='bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb'
+ $CEPH_BIN/radosgw-admin account create --account-id RGW22222222222222222 --account-name Account2 --email account2@ceph.com -c $conf_fn > /dev/null
+ $CEPH_BIN/radosgw-admin user create --account-id RGW22222222222222222 --uid testacct2root --account-root \
+ --display-name 'Account2Root' --access-key $a2_akey --secret $a2_skey -c $conf_fn > /dev/null
+
+ a1u_akey='CCCCCCCCCCCCCCCCCCcc'
+ a1u_skey='cccccccccccccccccccccccccccccccccccccccc'
+ $CEPH_BIN/radosgw-admin user create --account-id RGW11111111111111111 --uid testacct1user \
+ --display-name 'Account1User' --access-key $a1u_akey --secret $a1u_skey -c $conf_fn > /dev/null
+ $CEPH_BIN/radosgw-admin user policy attach --uid testacct1user \
+ --policy-arn arn:aws:iam::aws:policy/AmazonS3FullAccess -c $conf_fn > /dev/null
+ fi
+
# Create Swift user
debug echo "setting up user tester"
$CEPH_BIN/radosgw-admin user create -c $conf_fn --subuser=test:tester --display-name=Tester-Subuser --key-type=swift --secret=testing --access=full > /dev/null