summaryrefslogtreecommitdiffstats
path: root/examples
diff options
context:
space:
mode:
authorYuval Lifshitz <ylifshit@redhat.com>2023-12-06 19:51:59 +0100
committerYuval Lifshitz <ylifshit@ibm.com>2024-12-08 17:20:50 +0100
commit790c38eacc52cc4c14beb48fca8b204235632793 (patch)
treeea374a6e768e99d544cdcd2954b92a3208f463d2 /examples
parentMerge pull request #60668 from ronen-fr/wip-rf-mconf (diff)
downloadceph-790c38eacc52cc4c14beb48fca8b204235632793.tar.xz
ceph-790c38eacc52cc4c14beb48fca8b204235632793.zip
rgw/logging: add support for GetBucketLogging and PutBucketLogging
this is based on AWS server access logs: - https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html - https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerLogs.html - https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLogging.html - https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLogging.html however, a new mode was added called "journal" where: - logs of PUT, COPY and MPU are guaranteed - we have logs of DELETE and multi-DELETE operations (not guaranteed) - log records hold only minimal amount of information Fixes: https://tracker.ceph.com/issues/984 Signed-off-by: Yuval Lifshitz <ylifshit@redhat.com>
Diffstat (limited to 'examples')
-rw-r--r--examples/rgw/boto3/bucket_logging.py50
-rw-r--r--examples/rgw/boto3/service-2.sdk-extras.json110
2 files changed, 149 insertions, 11 deletions
diff --git a/examples/rgw/boto3/bucket_logging.py b/examples/rgw/boto3/bucket_logging.py
new file mode 100644
index 00000000000..fdc219c5765
--- /dev/null
+++ b/examples/rgw/boto3/bucket_logging.py
@@ -0,0 +1,50 @@
+#!/usr/bin/python
+
+import boto3
+import sys
+
+if len(sys.argv) != 3:
+ print('Usage: ' + sys.argv[0] + ' <bucket> <target bucket>')
+ sys.exit(1)
+
+# bucket name as first argument
+bucket = sys.argv[1]
+# target bucket name as the 2nd argument
+target_bucket = sys.argv[2]
+
+# endpoint and keys from vstart
+endpoint = 'http://127.0.0.1:8000'
+access_key='0555b35654ad1656d804'
+secret_key='h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q==' # notsecret
+
+client = boto3.client('s3',
+ endpoint_url=endpoint,
+ aws_access_key_id=access_key,
+ aws_secret_access_key=secret_key)
+
+
+# create the source bucket
+response = client.create_bucket(Bucket=bucket)
+print(response)
+
+# create the target bucket
+response = client.create_bucket(Bucket=target_bucket)
+print(response)
+
+bucket_logging_conf = {'LoggingEnabled': {
+ 'TargetBucket': target_bucket,
+ 'TargetPrefix': 'log/',
+ 'TargetObjectKeyFormat': {
+ 'SimplePrefix': {}
+ },
+ 'ObjectRollTime': 60,
+ 'LoggingType': 'Journal',
+ }
+}
+
+response = client.put_bucket_logging(Bucket=bucket, BucketLoggingStatus=bucket_logging_conf)
+print(response)
+
+response = client.get_bucket_logging(Bucket=bucket)
+print(response)
+
diff --git a/examples/rgw/boto3/service-2.sdk-extras.json b/examples/rgw/boto3/service-2.sdk-extras.json
index 46fef1abdbb..5c22ee9f248 100644
--- a/examples/rgw/boto3/service-2.sdk-extras.json
+++ b/examples/rgw/boto3/service-2.sdk-extras.json
@@ -235,24 +235,112 @@
"UsageStatsSummary": {
"type": "structure",
"members": {
- "QuotaMaxBytes":{"shape":"QuotaMaxBytes"},
- "QuotaMaxBuckets":{"shape": "QuotaMaxBuckets"},
- "QuotaMaxObjCount":{"shape":"QuotaMaxObjCount"},
- "QuotaMaxBytesPerBucket":{"shape":"QuotaMaxBytesPerBucket"},
+ "QuotaMaxBytes":{"shape":"QuotaMaxBytes"},
+ "QuotaMaxBuckets":{"shape": "QuotaMaxBuckets"},
+ "QuotaMaxObjCount":{"shape":"QuotaMaxObjCount"},
+ "QuotaMaxBytesPerBucket":{"shape":"QuotaMaxBytesPerBucket"},
"QuotaMaxObjCountPerBucket":{"shape":"QuotaMaxObjCountPerBucket"},
- "TotalBytes":{"shape":"TotalBytes"},
+ "TotalBytes":{"shape":"TotalBytes"},
"TotalBytesRounded":{"shape":"TotalBytesRounded"},
"TotalEntries":{"shape":"TotalEntries"}
}
},
"QuotaMaxBytes":{"type":"integer"},
- "QuotaMaxBuckets":{"type": "integer"},
- "QuotaMaxObjCount":{"type":"integer"},
- "QuotaMaxBytesPerBucket":{"type":"integer"},
- "QuotaMaxObjCountPerBucket":{"type":"integer"},
- "TotalBytesRounded":{"type":"integer"},
+ "QuotaMaxBuckets":{"type": "integer"},
+ "QuotaMaxObjCount":{"type":"integer"},
+ "QuotaMaxBytesPerBucket":{"type":"integer"},
+ "QuotaMaxObjCountPerBucket":{"type":"integer"},
+ "TotalBytesRounded":{"type":"integer"},
"TotalBytes":{"type":"integer"},
- "TotalEntries":{"type":"integer"}
+ "TotalEntries":{"type":"integer"},
+ "LoggingEnabled":{
+ "type":"structure",
+ "required":[
+ "TargetBucket",
+ "TargetPrefix"
+ ],
+ "members":{
+ "TargetBucket":{
+ "shape":"TargetBucket",
+ "documentation":"<p>Specifies the bucket where you want to store server access logs. You can have your logs delivered to any bucket that you own. You can also configure multiple buckets to deliver their logs to the same target bucket. In this case, you should choose a different <code>TargetPrefix</code> for each source bucket so that the delivered log files can be distinguished by key.</p>"
+ },
+ "TargetGrants":{
+ "shape":"TargetGrants",
+ "documentation":"<p>Container for granting information.</p> <p>Should be used when the write permissions to the tagert bucket should eb different than the permissions of the user performing the operation thta needs to be logged. This is usually used in cased of batched logging. see: <code>RecordBatchSize</code>.</p>"
+ },
+ "TargetPrefix":{
+ "shape":"TargetPrefix",
+ "documentation":"<p>A prefix for all log object keys. If you store log files from multiple buckets in a single bucket, you can use a prefix to distinguish which log files came from which bucket.</p>"
+ },
+ "TargetObjectKeyFormat":{
+ "shape":"TargetObjectKeyFormat",
+ "documentation":"<p>key format for log objects.</p>"
+ },
+ "ObjectRollTime":{
+ "shape":"ObjectRollTime",
+ "documentation":"<p>time in seconds to move the log object to the target bucket and start another log object.</p>"
+ },
+ "LoggingType":{
+ "shape":"LoggingType",
+ "documentation":"<p>use Standard log type to log all bucket operations i nthe standard format. use Journal log type to log only creations and deletion of objects in more compact format.</p>"
+ },
+ "RecordsBatchSize":{
+ "shape":"RecordsBatchSize",
+ "documentation":"indicates how many records to batch in memory before writing to the object. if set to zero, records are written syncronously to the object. if <code>ObjectRollTime</code>e is reached, the batch of records will be written to the object regardless of the number of records. </p>"
+ }
+ },
+ "documentation":"<p>Describes where logs are stored the prefix assigned to all log object keys for a bucket, and their format. also, the level the delivery guarantee of the records.</p>"
+ },
+ "TargetObjectKeyFormat":{
+ "type":"structure",
+ "members":{
+ "SimplePrefix":{
+ "shape":"SimplePrefix",
+ "documentation":"<p>To use the simple format for S3 keys for log objects. To specify SimplePrefix format, set SimplePrefix to {}.</p>",
+ "locationName":"SimplePrefix"
+ },
+ "PartitionedPrefix":{
+ "shape":"PartitionedPrefix",
+ "documentation":"<p>Partitioned S3 key for log objects.</p>",
+ "locationName":"PartitionedPrefix"
+ }
+ },
+ "documentation":"<p>Key format for log objects. Only one format, PartitionedPrefix or SimplePrefix, is allowed.</p>"
+ },
+ "SimplePrefix":{
+ "type":"structure",
+ "members":{
+ },
+ "documentation":"<p>To use simple format for S3 keys for log objects, set SimplePrefix to an empty object.</p> <p> <code>[DestinationPrefix][YYYY]-[MM]-[DD]-[hh]-[mm]-[ss]-[UniqueString]</code> </p>",
+ "locationName":"SimplePrefix"
+ },
+ "PartitionDateSource":{
+ "type":"string",
+ "enum":[
+ "EventTime",
+ "DeliveryTime"
+ ]
+ },
+ "PartitionedPrefix":{
+ "type":"structure",
+ "members":{
+ "PartitionDateSource":{
+ "shape":"PartitionDateSource",
+ "documentation":"<p>Specifies the partition date source for the partitioned prefix. PartitionDateSource can be EventTime or DeliveryTime.</p>"
+ }
+ },
+ "documentation":"<p>Amazon S3 keys for log objects are partitioned in the following format:</p> <p> <code>[DestinationPrefix][SourceAccountId]/[SourceRegion]/[SourceBucket]/[YYYY]/[MM]/[DD]/[YYYY]-[MM]-[DD]-[hh]-[mm]-[ss]-[UniqueString]</code> </p> <p>PartitionedPrefix defaults to EventTime delivery when server access logs are delivered.</p>",
+ "locationName":"PartitionedPrefix"
+ },
+ "ObjectRollTime":{"type":"integer"},
+ "RecordsBatchSize":{"type":"integer"},
+ "LoggingType":{
+ "type":"string",
+ "enum": [
+ "Standard",
+ "Journal"
+ ]
+ }
},
"documentation":"<p/>"
}