diff options
Diffstat (limited to 'src/test/rgw')
-rw-r--r-- | src/test/rgw/bucket_notification/api.py | 4 | ||||
-rw-r--r-- | src/test/rgw/bucket_notification/requirements.txt | 2 | ||||
-rw-r--r-- | src/test/rgw/bucket_notification/test_bn.py | 308 | ||||
-rw-r--r-- | src/test/rgw/rgw_multi/tests.py | 26 | ||||
-rw-r--r-- | src/test/rgw/test-rgw-common.sh | 4 | ||||
-rwxr-xr-x | src/test/rgw/test-rgw-multisite.sh | 58 | ||||
-rw-r--r-- | src/test/rgw/test_log_backing.cc | 1 | ||||
-rw-r--r-- | src/test/rgw/test_rgw_iam_policy.cc | 8 |
8 files changed, 382 insertions, 29 deletions
diff --git a/src/test/rgw/bucket_notification/api.py b/src/test/rgw/bucket_notification/api.py index e7ec31f1711..e84aa16edc7 100644 --- a/src/test/rgw/bucket_notification/api.py +++ b/src/test/rgw/bucket_notification/api.py @@ -247,12 +247,16 @@ def delete_all_topics(conn, tenant, cluster): if tenant == '': topics_result = admin(['topic', 'list'], cluster) topics_json = json.loads(topics_result[0]) + if 'topics' not in topics_json: + topics_json = topics_json.get('result',{}) for topic in topics_json['topics']: rm_result = admin(['topic', 'rm', '--topic', topic['name']], cluster) print(rm_result) else: topics_result = admin(['topic', 'list', '--tenant', tenant], cluster) topics_json = json.loads(topics_result[0]) + if 'topics' not in topics_json: + topics_json = topics_json.get('result',{}) for topic in topics_json['topics']: rm_result = admin(['topic', 'rm', '--tenant', tenant, '--topic', topic['name']], cluster) print(rm_result) diff --git a/src/test/rgw/bucket_notification/requirements.txt b/src/test/rgw/bucket_notification/requirements.txt index a3cff2bedab..bb74eceedc3 100644 --- a/src/test/rgw/bucket_notification/requirements.txt +++ b/src/test/rgw/bucket_notification/requirements.txt @@ -1,4 +1,4 @@ -nose >=1.0.0 +nose-py3 >=1.0.0 boto >=2.6.0 boto3 >=1.0.0 configparser >=5.0.0 diff --git a/src/test/rgw/bucket_notification/test_bn.py b/src/test/rgw/bucket_notification/test_bn.py index 359990b3531..665fbca7494 100644 --- a/src/test/rgw/bucket_notification/test_bn.py +++ b/src/test/rgw/bucket_notification/test_bn.py @@ -410,17 +410,25 @@ kafka_server = 'localhost' class KafkaReceiver(object): """class for receiving and storing messages on a topic from the kafka broker""" - def __init__(self, topic, security_type): + def __init__(self, topic, security_type, kafka_server='localhost'): from kafka import KafkaConsumer remaining_retries = 10 port = 9092 if security_type != 'PLAINTEXT': security_type = 'SSL' port = 9093 + + if kafka_server is None: + endpoint = "localhost" + ":" + str(port) + elif ":" not in kafka_server: + endpoint = kafka_server + ":" + str(port) + else: + endpoint = kafka_server + while remaining_retries > 0: try: self.consumer = KafkaConsumer(topic, - bootstrap_servers = kafka_server+':'+str(port), + bootstrap_servers=endpoint, security_protocol=security_type, consumer_timeout_ms=16000, auto_offset_reset='earliest') @@ -468,9 +476,9 @@ def kafka_receiver_thread_runner(receiver): print('Kafka receiver ended unexpectedly: ' + str(error)) -def create_kafka_receiver_thread(topic, security_type='PLAINTEXT'): +def create_kafka_receiver_thread(topic, security_type='PLAINTEXT', kafka_brokers=None): """create kafka receiver and thread""" - receiver = KafkaReceiver(topic, security_type) + receiver = KafkaReceiver(topic, security_type, kafka_server=kafka_brokers) task = threading.Thread(target=kafka_receiver_thread_runner, args=(receiver,)) task.daemon = True return task, receiver @@ -1304,7 +1312,7 @@ def test_ps_s3_notification_errors_on_master(): conn.delete_bucket(bucket_name) -def notification_push(endpoint_type, conn, account=None, cloudevents=False): +def notification_push(endpoint_type, conn, account=None, cloudevents=False, kafka_brokers=None): """ test pushinging notification """ zonegroup = get_config_zonegroup() # create bucket @@ -1359,11 +1367,13 @@ def notification_push(endpoint_type, conn, account=None, cloudevents=False): assert_equal(status/100, 2) elif endpoint_type == 'kafka': # start amqp receiver - task, receiver = create_kafka_receiver_thread(topic_name) + task, receiver = create_kafka_receiver_thread(topic_name, kafka_brokers=kafka_brokers) task.start() endpoint_address = 'kafka://' + kafka_server # without acks from broker endpoint_args = 'push-endpoint='+endpoint_address+'&kafka-ack-level=broker' + if kafka_brokers is not None: + endpoint_args += '&kafka-brokers=' + kafka_brokers # create s3 topic topic_conf = PSTopicS3(conn, topic_name, zonegroup, endpoint_args=endpoint_args) topic_arn = topic_conf.set_config() @@ -1581,6 +1591,20 @@ def test_notification_push_kafka(): notification_push('kafka', conn) +@attr('kafka_failover') +def test_notification_push_kafka_multiple_brokers_override(): + """ test pushing kafka s3 notification on master """ + conn = connection() + notification_push('kafka', conn, kafka_brokers='localhost:9092,localhost:19092') + + +@attr('kafka_failover') +def test_notification_push_kafka_multiple_brokers_append(): + """ test pushing kafka s3 notification on master """ + conn = connection() + notification_push('kafka', conn, kafka_brokers='localhost:19092') + + @attr('http_test') def test_ps_s3_notification_multi_delete_on_master(): """ test deletion of multiple keys on master """ @@ -2981,7 +3005,6 @@ def wait_for_queue_to_drain(topic_name, tenant=None, account=None, http_port=Non log.info('waited for %ds for queue %s to drain', time_diff, topic_name) -@attr('kafka_test') def persistent_topic_stats(conn, endpoint_type): zonegroup = get_config_zonegroup() @@ -2993,12 +3016,13 @@ def persistent_topic_stats(conn, endpoint_type): host = get_ip() task = None port = None + wrong_port = 1234 + endpoint_address = endpoint_type+'://'+host+':'+str(wrong_port) if endpoint_type == 'http': # create random port for the http server port = random.randint(10000, 20000) # start an http server in a separate thread receiver = HTTPServerWithEvents((host, port)) - endpoint_address = 'http://'+host+':'+str(port) endpoint_args = 'push-endpoint='+endpoint_address+'&persistent=true'+ \ '&retry_sleep_duration=1' elif endpoint_type == 'amqp': @@ -3006,23 +3030,18 @@ def persistent_topic_stats(conn, endpoint_type): exchange = 'ex1' task, receiver = create_amqp_receiver_thread(exchange, topic_name) task.start() - endpoint_address = 'amqp://' + host endpoint_args = 'push-endpoint='+endpoint_address+'&amqp-exchange='+exchange+'&amqp-ack-level=broker&persistent=true'+ \ '&retry_sleep_duration=1' elif endpoint_type == 'kafka': # start kafka receiver task, receiver = create_kafka_receiver_thread(topic_name) task.start() - endpoint_address = 'kafka://' + host endpoint_args = 'push-endpoint='+endpoint_address+'&kafka-ack-level=broker&persistent=true'+ \ '&retry_sleep_duration=1' else: return SkipTest('Unknown endpoint type: ' + endpoint_type) # create s3 topic - endpoint_address = 'kafka://' + host + ':1234' # wrong port - endpoint_args = 'push-endpoint='+endpoint_address+'&kafka-ack-level=broker&persistent=true'+ \ - '&retry_sleep_duration=1' topic_conf = PSTopicS3(conn, topic_name, zonegroup, endpoint_args=endpoint_args) topic_arn = topic_conf.set_config() # create s3 notification @@ -3070,9 +3089,19 @@ def persistent_topic_stats(conn, endpoint_type): get_stats_persistent_topic(topic_name, 2 * number_of_objects) # change the endpoint port - endpoint_address = 'kafka://' + host - endpoint_args = 'push-endpoint='+endpoint_address+'&kafka-ack-level=broker&persistent=true'+ \ - '&retry_sleep_duration=1' + if endpoint_type == 'http': + endpoint_address = endpoint_type+'://'+host+':'+str(port) + endpoint_args = 'push-endpoint='+endpoint_address+'&persistent=true'+ \ + '&retry_sleep_duration=1' + elif endpoint_type == 'amqp': + endpoint_address = endpoint_type+'://'+host + endpoint_args = 'push-endpoint='+endpoint_address+'&amqp-exchange='+exchange+'&amqp-ack-level=broker&persistent=true'+ \ + '&retry_sleep_duration=1' + elif endpoint_type == 'kafka': + endpoint_address = endpoint_type+'://'+host + endpoint_args = 'push-endpoint='+endpoint_address+'&kafka-ack-level=broker&persistent=true'+ \ + '&retry_sleep_duration=1' + topic_conf = PSTopicS3(conn, topic_name, zonegroup, endpoint_args=endpoint_args) topic_arn = topic_conf.set_config() @@ -3087,19 +3116,26 @@ def persistent_topic_stats(conn, endpoint_type): @attr('http_test') -def persistent_topic_stats_http(): +def test_persistent_topic_stats_http(): """ test persistent topic stats, http endpoint """ conn = connection() persistent_topic_stats(conn, 'http') @attr('kafka_test') -def persistent_topic_stats_kafka(): +def test_persistent_topic_stats_kafka(): """ test persistent topic stats, kafka endpoint """ conn = connection() persistent_topic_stats(conn, 'kafka') +@attr('amqp_test') +def test_persistent_topic_stats_amqp(): + """ test persistent topic stats, amqp endpoint """ + conn = connection() + persistent_topic_stats(conn, 'amqp') + + @attr('kafka_test') def test_persistent_topic_dump(): """ test persistent topic dump """ @@ -4359,6 +4395,242 @@ def test_ps_s3_multiple_topics_notification(): http_server.close() +@attr('data_path_v2_test') +def test_ps_s3_list_topics_migration(): + """ test list topics on migration""" + if get_config_cluster() == 'noname': + return SkipTest('realm is needed for migration test') + + # Initialize connections and configurations + conn1 = connection() + tenant = 'kaboom1' + conn2 = connect_random_user(tenant) + bucket_name = gen_bucket_name() + topics = [f"{bucket_name}{TOPIC_SUFFIX}{i}" for i in range(1, 7)] + tenant_topics = [f"{tenant}_{topic}" for topic in topics] + + # Define topic names with version + topic_versions = { + "topic1_v2": f"{topics[0]}_v2", + "topic2_v2": f"{topics[1]}_v2", + "topic3_v1": f"{topics[2]}_v1", + "topic4_v1": f"{topics[3]}_v1", + "topic5_v1": f"{topics[4]}_v1", + "topic6_v1": f"{topics[5]}_v1", + "tenant_topic1_v2": f"{tenant_topics[0]}_v2", + "tenant_topic2_v1": f"{tenant_topics[1]}_v1", + "tenant_topic3_v1": f"{tenant_topics[2]}_v1" + } + + # Get necessary configurations + host = get_ip() + http_port = random.randint(10000, 20000) + endpoint_address = 'http://' + host + ':' + str(http_port) + endpoint_args = 'push-endpoint=' + endpoint_address + '&persistent=true' + zonegroup = get_config_zonegroup() + conf_cluster = get_config_cluster() + + # Make sure there are no leftover topics on v2 + zonegroup_modify_feature(enable=True, feature_name=zonegroup_feature_notification_v2) + delete_all_topics(conn1, '', conf_cluster) + delete_all_topics(conn2, tenant, conf_cluster) + + # Start v1 notification + # Make sure there are no leftover topics on v1 + zonegroup_modify_feature(enable=False, feature_name=zonegroup_feature_notification_v2) + delete_all_topics(conn1, '', conf_cluster) + delete_all_topics(conn2, tenant, conf_cluster) + + # Create s3 - v1 topics + topic_conf = PSTopicS3(conn1, topic_versions['topic3_v1'], zonegroup, endpoint_args=endpoint_args) + topic_arn3 = topic_conf.set_config() + topic_conf = PSTopicS3(conn1, topic_versions['topic4_v1'], zonegroup, endpoint_args=endpoint_args) + topic_arn4 = topic_conf.set_config() + topic_conf = PSTopicS3(conn1, topic_versions['topic5_v1'], zonegroup, endpoint_args=endpoint_args) + topic_arn5 = topic_conf.set_config() + topic_conf = PSTopicS3(conn1, topic_versions['topic6_v1'], zonegroup, endpoint_args=endpoint_args) + topic_arn6 = topic_conf.set_config() + tenant_topic_conf = PSTopicS3(conn2, topic_versions['tenant_topic2_v1'], zonegroup, endpoint_args=endpoint_args) + tenant_topic_arn2 = tenant_topic_conf.set_config() + tenant_topic_conf = PSTopicS3(conn2, topic_versions['tenant_topic3_v1'], zonegroup, endpoint_args=endpoint_args) + tenant_topic_arn3 = tenant_topic_conf.set_config() + + # Start v2 notification + zonegroup_modify_feature(enable=True, feature_name=zonegroup_feature_notification_v2) + + # Create s3 - v2 topics + topic_conf = PSTopicS3(conn1, topic_versions['topic1_v2'], zonegroup, endpoint_args=endpoint_args) + topic_arn1 = topic_conf.set_config() + topic_conf = PSTopicS3(conn1, topic_versions['topic2_v2'], zonegroup, endpoint_args=endpoint_args) + topic_arn2 = topic_conf.set_config() + tenant_topic_conf = PSTopicS3(conn2, topic_versions['tenant_topic1_v2'], zonegroup, endpoint_args=endpoint_args) + tenant_topic_arn1 = tenant_topic_conf.set_config() + + # Verify topics list + try: + # Verify no tenant topics + res, status = topic_conf.get_list() + assert_equal(status // 100, 2) + listTopicsResponse = res.get('ListTopicsResponse', {}) + listTopicsResult = listTopicsResponse.get('ListTopicsResult', {}) + topics = listTopicsResult.get('Topics', {}) + member = topics['member'] if topics else [] + assert_equal(len(member), 6) + + # Verify tenant topics + res, status = tenant_topic_conf.get_list() + assert_equal(status // 100, 2) + listTopicsResponse = res.get('ListTopicsResponse', {}) + listTopicsResult = listTopicsResponse.get('ListTopicsResult', {}) + topics = listTopicsResult.get('Topics', {}) + member = topics['member'] if topics else [] + assert_equal(len(member), 3) + finally: + # Cleanup created topics + topic_conf.del_config(topic_arn1) + topic_conf.del_config(topic_arn2) + topic_conf.del_config(topic_arn3) + topic_conf.del_config(topic_arn4) + topic_conf.del_config(topic_arn5) + topic_conf.del_config(topic_arn6) + tenant_topic_conf.del_config(tenant_topic_arn1) + tenant_topic_conf.del_config(tenant_topic_arn2) + tenant_topic_conf.del_config(tenant_topic_arn3) + + +@attr('basic_test') +def test_ps_s3_list_topics(): + """ test list topics""" + + # Initialize connections, topic names and configurations + conn1 = connection() + tenant = 'kaboom1' + conn2 = connect_random_user(tenant) + bucket_name = gen_bucket_name() + topic_name1 = bucket_name + TOPIC_SUFFIX + '1' + topic_name2 = bucket_name + TOPIC_SUFFIX + '2' + topic_name3 = bucket_name + TOPIC_SUFFIX + '3' + tenant_topic_name1 = tenant + "_" + topic_name1 + tenant_topic_name2 = tenant + "_" + topic_name2 + host = get_ip() + http_port = random.randint(10000, 20000) + endpoint_address = 'http://' + host + ':' + str(http_port) + endpoint_args = 'push-endpoint=' + endpoint_address + '&persistent=true' + zonegroup = get_config_zonegroup() + + # Make sure there are no leftover topics + delete_all_topics(conn1, '', get_config_cluster()) + delete_all_topics(conn2, tenant, get_config_cluster()) + + # Create s3 - v2 topics + topic_conf = PSTopicS3(conn1, topic_name1, zonegroup, endpoint_args=endpoint_args) + topic_arn1 = topic_conf.set_config() + topic_conf = PSTopicS3(conn1, topic_name2, zonegroup, endpoint_args=endpoint_args) + topic_arn2 = topic_conf.set_config() + topic_conf = PSTopicS3(conn1, topic_name3, zonegroup, endpoint_args=endpoint_args) + topic_arn3 = topic_conf.set_config() + tenant_topic_conf = PSTopicS3(conn2, tenant_topic_name1, zonegroup, endpoint_args=endpoint_args) + tenant_topic_arn1 = tenant_topic_conf.set_config() + tenant_topic_conf = PSTopicS3(conn2, tenant_topic_name2, zonegroup, endpoint_args=endpoint_args) + tenant_topic_arn2 = tenant_topic_conf.set_config() + + # Verify topics list + try: + # Verify no tenant topics + res, status = topic_conf.get_list() + assert_equal(status // 100, 2) + listTopicsResponse = res.get('ListTopicsResponse', {}) + listTopicsResult = listTopicsResponse.get('ListTopicsResult', {}) + topics = listTopicsResult.get('Topics', {}) + member = topics['member'] if topics else [] # version 2 + assert_equal(len(member), 3) + + # Verify topics for tenant + res, status = tenant_topic_conf.get_list() + assert_equal(status // 100, 2) + listTopicsResponse = res.get('ListTopicsResponse', {}) + listTopicsResult = listTopicsResponse.get('ListTopicsResult', {}) + topics = listTopicsResult.get('Topics', {}) + member = topics['member'] if topics else [] + assert_equal(len(member), 2) + finally: + # Cleanup created topics + topic_conf.del_config(topic_arn1) + topic_conf.del_config(topic_arn2) + topic_conf.del_config(topic_arn3) + tenant_topic_conf.del_config(tenant_topic_arn1) + tenant_topic_conf.del_config(tenant_topic_arn2) + +@attr('data_path_v2_test') +def test_ps_s3_list_topics_v1(): + """ test list topics on v1""" + if get_config_cluster() == 'noname': + return SkipTest('realm is needed') + + # Initialize connections and configurations + conn1 = connection() + tenant = 'kaboom1' + conn2 = connect_random_user(tenant) + bucket_name = gen_bucket_name() + topic_name1 = bucket_name + TOPIC_SUFFIX + '1' + topic_name2 = bucket_name + TOPIC_SUFFIX + '2' + topic_name3 = bucket_name + TOPIC_SUFFIX + '3' + tenant_topic_name1 = tenant + "_" + topic_name1 + tenant_topic_name2 = tenant + "_" + topic_name2 + host = get_ip() + http_port = random.randint(10000, 20000) + endpoint_address = 'http://' + host + ':' + str(http_port) + endpoint_args = 'push-endpoint=' + endpoint_address + '&persistent=true' + zonegroup = get_config_zonegroup() + conf_cluster = get_config_cluster() + + # Make sure there are no leftover topics + delete_all_topics(conn1, '', conf_cluster) + delete_all_topics(conn2, tenant, conf_cluster) + + # Make sure that we disable v2 + zonegroup_modify_feature(enable=False, feature_name=zonegroup_feature_notification_v2) + + # Create s3 - v1 topics + topic_conf = PSTopicS3(conn1, topic_name1, zonegroup, endpoint_args=endpoint_args) + topic_arn1 = topic_conf.set_config() + topic_conf = PSTopicS3(conn1, topic_name2, zonegroup, endpoint_args=endpoint_args) + topic_arn2 = topic_conf.set_config() + topic_conf = PSTopicS3(conn1, topic_name3, zonegroup, endpoint_args=endpoint_args) + topic_arn3 = topic_conf.set_config() + tenant_topic_conf = PSTopicS3(conn2, tenant_topic_name1, zonegroup, endpoint_args=endpoint_args) + tenant_topic_arn1 = tenant_topic_conf.set_config() + tenant_topic_conf = PSTopicS3(conn2, tenant_topic_name2, zonegroup, endpoint_args=endpoint_args) + tenant_topic_arn2 = tenant_topic_conf.set_config() + + # Verify topics list + try: + # Verify no tenant topics + res, status = topic_conf.get_list() + assert_equal(status // 100, 2) + listTopicsResponse = res.get('ListTopicsResponse', {}) + listTopicsResult = listTopicsResponse.get('ListTopicsResult', {}) + topics = listTopicsResult.get('Topics', {}) + member = topics['member'] if topics else [] + assert_equal(len(member), 3) + + # Verify tenant topics + res, status = tenant_topic_conf.get_list() + assert_equal(status // 100, 2) + listTopicsResponse = res.get('ListTopicsResponse', {}) + listTopicsResult = listTopicsResponse.get('ListTopicsResult', {}) + topics = listTopicsResult.get('Topics', {}) + member = topics['member'] if topics else [] + assert_equal(len(member), 2) + finally: + # Cleanup created topics + topic_conf.del_config(topic_arn1) + topic_conf.del_config(topic_arn2) + topic_conf.del_config(topic_arn3) + tenant_topic_conf.del_config(tenant_topic_arn1) + tenant_topic_conf.del_config(tenant_topic_arn2) + + @attr('basic_test') def test_ps_s3_topic_permissions(): """ test s3 topic set/get/delete permissions """ diff --git a/src/test/rgw/rgw_multi/tests.py b/src/test/rgw/rgw_multi/tests.py index 2d49c7a0ce0..433cd034fe0 100644 --- a/src/test/rgw/rgw_multi/tests.py +++ b/src/test/rgw/rgw_multi/tests.py @@ -15,6 +15,7 @@ import boto import boto.s3.connection from boto.s3.website import WebsiteConfiguration from boto.s3.cors import CORSConfiguration +from botocore.exceptions import ClientError from nose.tools import eq_ as eq from nose.tools import assert_not_equal, assert_equal, assert_true, assert_false @@ -573,6 +574,7 @@ def create_bucket_per_zone_in_realm(): b, z = create_bucket_per_zone(zg_conn) buckets.extend(b) zone_bucket.extend(z) + realm_meta_checkpoint(realm) return buckets, zone_bucket def test_bucket_create(): @@ -1212,6 +1214,9 @@ def test_datalog_autotrim(): # wait for metadata and data sync to catch up zonegroup_meta_checkpoint(zonegroup) zonegroup_data_checkpoint(zonegroup_conns) + zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name) + time.sleep(config.checkpoint_delay) + zonegroup_data_checkpoint(zonegroup_conns) # trim each datalog for zone, _ in zone_bucket: @@ -3634,4 +3639,23 @@ def test_copy_object_different_bucket(): CopySource = source_bucket.name + '/' + objname) zonegroup_bucket_checkpoint(zonegroup_conns, dest_bucket.name) - + +def test_bucket_create_location_constraint(): + for zonegroup in realm.current_period.zonegroups: + zonegroup_conns = ZonegroupConns(zonegroup) + for zg in realm.current_period.zonegroups: + z = zonegroup_conns.rw_zones[0] + bucket_name = gen_bucket_name() + if zg.name == zonegroup.name: + # my zonegroup should pass + z.s3_client.create_bucket(Bucket=bucket_name, CreateBucketConfiguration={'LocationConstraint': zg.name}) + # check bucket location + response = z.s3_client.get_bucket_location(Bucket=bucket_name) + assert_equal(response['LocationConstraint'], zg.name) + else: + # other zonegroup should fail with 400 + e = assert_raises(ClientError, + z.s3_client.create_bucket, + Bucket=bucket_name, + CreateBucketConfiguration={'LocationConstraint': zg.name}) + assert e.response['ResponseMetadata']['HTTPStatusCode'] == 400 diff --git a/src/test/rgw/test-rgw-common.sh b/src/test/rgw/test-rgw-common.sh index 9129092898e..6798a15ba31 100644 --- a/src/test/rgw/test-rgw-common.sh +++ b/src/test/rgw/test-rgw-common.sh @@ -103,7 +103,7 @@ function init_first_zone { # create zonegroup, zone x $(rgw_admin $cid) zonegroup create --rgw-zonegroup=$zg --master --default - x $(rgw_admin $cid) zone create --rgw-zonegroup=$zg --rgw-zone=$zone --access-key=${access_key} --secret=${secret} --endpoints=$endpoints --default + x $(rgw_admin $cid) zone create --rgw-zonegroup=$zg --rgw-zone=$zone --access-key=${access_key} --secret=${secret} --endpoints=$endpoints --master --default x $(rgw_admin $cid) user create --uid=zone.user --display-name=ZoneUser --access-key=${access_key} --secret=${secret} --system x $(rgw_admin $cid) period update --commit @@ -128,7 +128,7 @@ function init_zone_in_existing_zg { x $(rgw_admin $cid) period update --commit } -function init_first_zone_in_slave_zg { +function init_first_zone_in_peer_zg { [ $# -ne 8 ] && echo "init_first_zone_in_slave_zg() needs 8 params" && exit 1 cid=$1 diff --git a/src/test/rgw/test-rgw-multisite.sh b/src/test/rgw/test-rgw-multisite.sh index a005b19e3da..d3a1b265ca6 100755 --- a/src/test/rgw/test-rgw-multisite.sh +++ b/src/test/rgw/test-rgw-multisite.sh @@ -1,11 +1,12 @@ #!/usr/bin/env bash -[ $# -lt 1 ] && echo "usage: $0 <num-clusters> [rgw parameters...]" && exit 1 +[ $# -lt 1 ] && echo "usage: $0 <num-zones> <num-zonegroups>[rgw parameters...]" && exit 1 -num_clusters=$1 +num_zones=$1 +num_zonegroups=$2 shift -[ $num_clusters -lt 1 ] && echo "clusters num must be at least 1" && exit 1 +[ $num_zones -lt 1 ] && echo "clusters num must be at least 1" && exit 1 . "`dirname $0`/test-rgw-common.sh" . "`dirname $0`/test-rgw-meta-sync.sh" @@ -53,7 +54,7 @@ echo realm_status=$output endpoints="" i=2 -while [ $i -le $num_clusters ]; do +while [ $i -le $num_zones ]; do x $(start_ceph_cluster c$i) -n $(get_mstart_parameters $i) j=1 endpoints="" @@ -74,10 +75,53 @@ while [ $i -le $num_clusters ]; do i=$((i+1)) done -i=2 -while [ $i -le $num_clusters ]; do - wait_for_meta_sync c1 c$i $realm_name +endpoints="" +k=2 +while [ $k -le $num_zonegroups ]; do + x $(start_ceph_cluster c$i) -n $(get_mstart_parameters $i) + j=1 + endpoints="" + while [ $j -le $rgws ]; do + port=$((8000+i*100+j)) + endpoints="$endpoints""$url:$port," + j=$((j+1)) + done + # create new zone, start rgw + init_first_zone_in_peer_zg c$i $realm_name zg${k} zg${k}-${i} 8101 $endpoints $system_access_key $system_secret + j=1 + while [ $j -le $rgws ]; do + port=$((8000+i*100+j)) + x $(rgw c$i "$port" "$@") + j="$((j+1))" + done +# bring up next clusters in zonegroup k i=$((i+1)) + + endpoints="" + l=2 + while [ $l -le $num_zones ]; do + x $(start_ceph_cluster c$i) -n $(get_mstart_parameters $i) + j=1 + endpoints="" + while [ $j -le $rgws ]; do + port=$((8000+i*100+j)) + endpoints="$endpoints""$url:$port," + j=$((j+1)) + done + + # create new zone, start rgw + init_zone_in_existing_zg c$i $realm_name zg${k} zg${k}-${i} 8101 $endpoints $zone_port $system_access_key $system_secret + j=1 + while [ $j -le $rgws ]; do + port=$((8000+i*100+j)) + x $(rgw c$i "$port" "$@") + j="$((j+1))" + done + l=$((l+1)) + i=$((i+1)) + done + + k=$((k+1)) done diff --git a/src/test/rgw/test_log_backing.cc b/src/test/rgw/test_log_backing.cc index e4109d535d1..a6de690af0f 100644 --- a/src/test/rgw/test_log_backing.cc +++ b/src/test/rgw/test_log_backing.cc @@ -20,6 +20,7 @@ #include <fmt/format.h> +#include "common/Clock.h" // for ceph_clock_now() #include "include/types.h" #include "include/rados/librados.hpp" diff --git a/src/test/rgw/test_rgw_iam_policy.cc b/src/test/rgw/test_rgw_iam_policy.cc index 7dadb7812ff..1d13c2aa013 100644 --- a/src/test/rgw/test_rgw_iam_policy.cc +++ b/src/test/rgw/test_rgw_iam_policy.cc @@ -75,6 +75,8 @@ using rgw::IAM::s3GetObjectTagging; using rgw::IAM::s3GetObjectVersion; using rgw::IAM::s3GetObjectVersionTagging; using rgw::IAM::s3GetObjectVersionTorrent; +using rgw::IAM::s3GetObjectAttributes; +using rgw::IAM::s3GetObjectVersionAttributes; using rgw::IAM::s3GetPublicAccessBlock; using rgw::IAM::s3GetReplicationConfiguration; using rgw::IAM::s3ListAllMyBuckets; @@ -419,6 +421,8 @@ TEST_F(PolicyTest, Parse3) { act2[s3GetObjectVersionAcl] = 1; act2[s3GetObjectTorrent] = 1; act2[s3GetObjectVersionTorrent] = 1; + act2[s3GetObjectAttributes] = 1; + act2[s3GetObjectVersionAttributes] = 1; act2[s3GetAccelerateConfiguration] = 1; act2[s3GetBucketAcl] = 1; act2[s3GetBucketOwnershipControls] = 1; @@ -487,6 +491,8 @@ TEST_F(PolicyTest, Eval3) { s3allow[s3GetObjectVersion] = 1; s3allow[s3GetObjectAcl] = 1; s3allow[s3GetObjectVersionAcl] = 1; + s3allow[s3GetObjectAttributes] = 1; + s3allow[s3GetObjectVersionAttributes] = 1; s3allow[s3GetObjectTorrent] = 1; s3allow[s3GetObjectVersionTorrent] = 1; s3allow[s3GetAccelerateConfiguration] = 1; @@ -883,6 +889,8 @@ TEST_F(ManagedPolicyTest, AmazonS3ReadOnlyAccess) act[s3GetObjectVersionAcl] = 1; act[s3GetObjectTorrent] = 1; act[s3GetObjectVersionTorrent] = 1; + act[s3GetObjectAttributes] = 1; + act[s3GetObjectVersionAttributes] = 1; act[s3GetAccelerateConfiguration] = 1; act[s3GetBucketAcl] = 1; act[s3GetBucketOwnershipControls] = 1; |