diff options
Diffstat (limited to 'examples/rgw/lua')
-rw-r--r-- | examples/rgw/lua/config/prometheus.yml | 19 | ||||
-rw-r--r-- | examples/rgw/lua/elasticsearch_adapter.lua | 114 | ||||
-rw-r--r-- | examples/rgw/lua/elasticsearch_adapter.md | 59 | ||||
-rw-r--r-- | examples/rgw/lua/img/prometheus.png | bin | 0 -> 414931 bytes | |||
-rw-r--r-- | examples/rgw/lua/nats_adapter.lua | 93 | ||||
-rw-r--r-- | examples/rgw/lua/nats_adapter.md | 101 | ||||
-rw-r--r-- | examples/rgw/lua/prometheus_adapter.lua | 23 | ||||
-rw-r--r-- | examples/rgw/lua/prometheus_adapter.md | 59 | ||||
-rw-r--r-- | examples/rgw/lua/storage_class.lua | 19 | ||||
-rw-r--r-- | examples/rgw/lua/storage_class.md | 49 |
10 files changed, 536 insertions, 0 deletions
diff --git a/examples/rgw/lua/config/prometheus.yml b/examples/rgw/lua/config/prometheus.yml new file mode 100644 index 00000000000..37deee67cfc --- /dev/null +++ b/examples/rgw/lua/config/prometheus.yml @@ -0,0 +1,19 @@ +global: + scrape_interval: 2s # By default, scrape targets every 15 seconds. + + # Attach these labels to any time series or alerts when communicating with + # external systems (federation, remote storage, Alertmanager). + external_labels: + monitor: 'codelab-monitor' + +# A scrape configuration containing exactly one endpoint to scrape: +# Here it's Prometheus itself. +scrape_configs: + # The job name is added as a label `job=<job_name>` to any timeseries scraped from this config. + - job_name: 'rgw' + + # Override the global default and scrape targets from this job every 5 seconds. + scrape_interval: 1s + + static_configs: + - targets: ['127.0.0.1:9091']
\ No newline at end of file diff --git a/examples/rgw/lua/elasticsearch_adapter.lua b/examples/rgw/lua/elasticsearch_adapter.lua new file mode 100644 index 00000000000..a0c542fed88 --- /dev/null +++ b/examples/rgw/lua/elasticsearch_adapter.lua @@ -0,0 +1,114 @@ +local elasticsearch = require ("elasticsearch") +local json = require ("lunajson") + +local client = elasticsearch.client{ + hosts = { + { + host = "localhost", + port = "9200" + } + } +} + +local copyfrom = {} +if (Request.CopyFrom ~= nil) then + copyfrom = { + Tenant = Request.CopyFrom.Tenant, + Bucket = Request.CopyFrom.Bucket, + Object = { + Name = Request.CopyFrom.Object.Name, + Instance = Request.CopyFrom.Object.Instance, + Id = Request.CopyFrom.Object.Id, + Size = Request.CopyFrom.Object.Size, + MTime = Request.CopyFrom.Object.MTime + } + } +end + +local res, status = client:index{ + index = "rgw", + type = "Request", + id = Request.Id, + body = + { + RGWOp = Request.RGWOp, + DecodedURI = Request.DecodedURI, + ContentLength = Request.ContentLength, + GenericAttributes = json.encode(Request.GenericAttributes), + Response = { + HTTPStatusCode = Request.Response.HTTPStatusCode, + HTTPStatus = Request.Response.HTTPStatus, + RGWCode = Request.Response.RGWCode, + Message = Request.Response.Message + }, + SwiftAccountName = Request.SwiftAccountName, + Bucket = { + Tenant = Request.Bucket.Tenant, + Name = Request.Bucket.Name, + Marker = Request.Bucket.Marker, + Id = Request.Bucket.Id, + Count = Request.Bucket.Count, + Size = Request.Bucket.Size, + ZoneGroupId = Request.Bucket.ZoneGroupId, + CreationTime = Request.Bucket.CreationTime, + MTime = Request.Bucket.MTime, + Quota = { + MaxSize = Request.Bucket.Quota.MaxSize, + MaxObjects = Request.Bucket.Quota.MaxObjects, + Enabled = Request.Bucket.Quota.Enabled, + Rounded = Request.Bucket.Quota.Rounded + }, + PlacementRule = { + Name = Request.Bucket.PlacementRule.Name, + StorageClass = Request.Bucket.PlacementRule.StorageClass + }, + User = { + Tenant = Request.Bucket.User.Tenant, + Id = Request.Bucket.User.Id + } + }, + Object = { + Name = Request.Object.Name, + Instance = Request.Object.Instance, + Id = Request.Object.Id, + Size = Request.Object.Size, + MTime = Request.Object.MTime + }, + CopyFrom = copyfrom, + ObjectOwner = { + DisplayName = Request.ObjectOwner.DisplayName, + User = { + Tenant = Request.ObjectOwner.User.Tenant, + Id = Request.ObjectOwner.User.Id + } + }, + ZoneGroup = { + Name = Request.ZoneGroup.Name, + Endpoint = Request.ZoneGroup.Endpoint + }, + Environment = json.encode(Request.Environment), + Policy = json.encode(Request.Policy), + UserPolicies = json.encode(Request.UserPolicies), + RGWId = Request.RGWId, + HTTP = { + Parameters = json.encode(Request.HTTP.Parameters), + Resources = json.encode(Request.HTTP.Resources), + Metadata = json.encode(Request.HTTP.Metadata), + Host = Request.HTTP.Host, + Method = Request.HTTP.Method, + URI = Request.HTTP.URI, + QueryString = Request.HTTP.QueryString, + Domain = Request.HTTP.Domain + }, + Time = Request.Time, + Dialect = Request.Dialect, + Id = Request.Id, + TransactionId = Request.TransactionId, + Tags = json.encode(Request.Tags), + User = { + Tenant = Request.User.Tenant, + Id = Request.User.Id + } + } +} + diff --git a/examples/rgw/lua/elasticsearch_adapter.md b/examples/rgw/lua/elasticsearch_adapter.md new file mode 100644 index 00000000000..a32b5d36f64 --- /dev/null +++ b/examples/rgw/lua/elasticsearch_adapter.md @@ -0,0 +1,59 @@ +# Introduction + +This directory contains an example `elasticsearch_adapter.lua` on how to +use [Lua Scripting](https://docs.ceph.com/en/latest/radosgw/lua-scripting/) +to push fields of the RGW requests +to [Elasticsearch](https://www.elastic.co/elasticsearch/). + +## Elasticsearch + +Install and run Elasticsearch using docker: +```bash +docker network create elastic +docker pull elasticsearch:2.4.6 +docker run --net elastic -p 9200:9200 -p 9300:9300 -e "discovery.type=single-node" elasticsearch:2.4.6 +``` + +[Full documentation for Elasticsearch installation](https://www.elastic.co/guide/en/elasticsearch/reference/current/setup.html) + +## Usage + +* Upload the script: + +```bash +radosgw-admin script put --infile=elasticsearch_adapter.lua --context=postRequest +``` + +* Add the packages used in the script: + +```bash +radosgw-admin script-package add --package='elasticsearch 1.0.0-1' --allow-compilation +radosgw-admin script-package add --package='lunajson' --allow-compilation +radosgw-admin script-package add --package='lua-cjson 2.1.0-1' --allow-compilation +``` + +* Restart radosgw. + +* Send a request: +```bash +s3cmd --host=localhost:8000 --host-bucket="localhost:8000/%(bucket)" --access_key=0555b35654ad1656d804 --secret_key=h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q== mb s3://mybucket +s3cmd --host=localhost:8000 --host-bucket="localhost:8000/%(bucket)" --access_key=0555b35654ad1656d804 --secret_key=h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q== put -P /etc/hosts s3://mybucket +curl http://localhost:8000/mybucket/hosts +``` + +* Search by bucket id from Elasticsearch: +```bash +curl -X GET "localhost:9200/rgw/_search?pretty" -H 'Content-Type: application/json' -d' +{ + "query": { + "match": { + "Bucket.Id": "05382336-b2db-409f-82dc-f28ab5fef978.4471.4471" + } + } +} +' +``` + +## Requirements +* Lua 5.3 + diff --git a/examples/rgw/lua/img/prometheus.png b/examples/rgw/lua/img/prometheus.png Binary files differnew file mode 100644 index 00000000000..7a3b63f647a --- /dev/null +++ b/examples/rgw/lua/img/prometheus.png diff --git a/examples/rgw/lua/nats_adapter.lua b/examples/rgw/lua/nats_adapter.lua new file mode 100644 index 00000000000..38264dd4685 --- /dev/null +++ b/examples/rgw/lua/nats_adapter.lua @@ -0,0 +1,93 @@ + local json = require ("lunajson") + local nats = require ("nats") + + function nats_connect(nats_host, nats_port) + local nats_params = { + host = nats_host, + port = nats_port, + } + client = nats.connect(nats_params) + client:connect() + end + + function toJson(request, eventName, opaqueData, configure) + supported_event = true + local notification = { + ["Records"] = { + ["eventVersion"] = "2.1", + ["eventSource"] = "ceph:s3", + ["awsRegion"] = request.ZoneGroup.Name, + ["eventTime"] = request.Time, + ["eventName"] = eventName, + ["userIdentity"] = { + ["principalId"] = request.User.Id + }, + ["requestParameters"] = { + ["sourceIPAddress"] = "" + }, + ["responseElements"] = { + ["x-amz-request-id"] = request.Id, + ["x-amz-id-2"] = request.RGWId + }, + ["s3"] = { + ["s3SchemaVersion"] = "1.0", + ["configurationId"] = configure, + ["bucket"] = { + ["name"] = request.Bucket.Name, + ["ownerIdentity"] = { + ["principalId"] = request.Bucket.User.Id + }, + ["arn"] = "arn:aws:s3:" .. request.ZoneGroup.Name .. "::" .. request.Bucket.Name, + ["id"] = request.Bucket.Id + }, + ["object"] = { + ["key"] = request.Object.Name, + ["size"] = request.Object.Size, + ["eTag"] = "", -- eTag is not supported yet + ["versionId"] = request.Object.Instance, + ["sequencer"] = string.format("%x", os.time()), + ["metadata"] = { + json.encode(request.HTTP.Metadata) + }, + ["tags"] = { + json.encode(request.Tags) + } + } + }, + ["eventId"] = "", + ["opaqueData"] = opaqueData + } + } + return notification + end + + supported_event = false + configure = "mynotif1" + opaqueData = "me@example.com" + topic = "Bucket_Notification" + bucket_name = "mybucket" + nats_host = '0.0.0.0' + nats_port = 4222 + + if bucket_name == Request.Bucket.Name then + --Object Created + if Request.RGWOp == "put_obj" then + notification = toJson(Request ,'ObjectCreated:Put', opaqueData, configure) + elseif Request.RGWOp == "post_obj" then + notification = toJson(Request ,'ObjectCreated:Post', opaqueData, configure) + + elseif Request.RGWOp == "copy_obj" then + notification = toJson(Request ,'ObjectCreated:Copy', opaqueData, configure) + + --Object Removed + elseif Request.RGWOp == "delete_obj" then + notification = toJson(Request ,'ObjectRemoved:Delete', opaqueData, configure) + end + + if supported_event == true then + nats_connect() + local payload = json.encode(notification) + client:publish(topic, payload) + RGWDebugLog("bucket notification sent to nats://" .. nats_host .. ":" .. nats_port .. "/" .. topic) + end + end diff --git a/examples/rgw/lua/nats_adapter.md b/examples/rgw/lua/nats_adapter.md new file mode 100644 index 00000000000..35c1780c718 --- /dev/null +++ b/examples/rgw/lua/nats_adapter.md @@ -0,0 +1,101 @@ +# Introduction + +This directory contains examples on how to use [Lua Scripting](https://docs.ceph.com/en/latest/radosgw/lua-scripting/) together with a [NATS Lua client](https://github.com/dawnangel/lua-nats) to add NATS to the list of bucket notifications endpoints. + +## NATS +To test your setup: +* Install [NATS](https://docs.nats.io/nats-server/installation) and start a nats-server. + +* Subscribe to the NATS server using a [nats subscriber](https://github.com/nats-io/go-nats-examples/tree/master/patterns/publish-subscribe), choosing the topic to be 'Bucket_Notification' (as defined in the [script]()) + + +```bash +nats-sub "Bucket_Notification" +``` + + +[Full documentation for subscribing](https://docs.nats.io/nats-server/clients). + +Alternatively, configure the script to point to an existing NATS broker by editing the following part in the script to match the parameters of your existing nats server. + +``` +nats_host = '{host}', +nats_port = {port}, +``` + +## Usage + +* Upload the [script](): + +```bash +radosgw-admin script put --infile=nats_adapter.lua --context=postRequest +``` +* Add the packages used in the script: + +```bash +radosgw-admin script-package add --package=nats --allow-compilation +radosgw-admin script-package add --package=lunajson --allow-compilation +radosgw-admin script-package add --package='lua-cjson 2.1.0-1' --allow-compilation +``` +* Restart radosgw. +* create a bucket: +``` +s3cmd --host=localhost:8000 --host-bucket="localhost:8000/%(bucket)" mb s3://mybucket +``` +* upload a file to the bucket and make sure that the nats server received the notification + +``` +s3cmd --host=localhost:8000 --host-bucket="localhost:8000/%(bucket)" put hello.txt s3://mybucket +``` + +Expected output: +``` +Received on [Bucket_Notification]: + {"Records":[ + { + "eventVersion":"2.1", + "eventSource":"ceph:s3", + "awsRegion":"default", + "eventTime":"2019-11-22T13:47:35.124724Z", + "eventName":"ObjectCreated:Put", + "userIdentity":{ + "principalId":"tester" + }, + "requestParameters":{ + "sourceIPAddress":"" + }, + "responseElements":{ + "x-amz-request-id":"503a4c37-85eb-47cd-8681-2817e80b4281.5330.903595", + "x-amz-id-2":"14d2-zone1-zonegroup1" + }, + "s3":{ + "s3SchemaVersion":"1.0", + "configurationId":"mynotif1", + "bucket":{ + "name":"mybucket", + "ownerIdentity":{ + "principalId":"tester" + }, + "arn":"arn:aws:s3:us-east-1::mybucket1", + "id":"503a4c37-85eb-47cd-8681-2817e80b4281.5332.38" + }, + "object":{ + "key":"hello.txt", + "size":"1024", + "eTag":"", + "versionId":"", + "sequencer": "F7E6D75DC742D108", + "metadata":[], + "tags":[] + } + }, + "eventId":"", + "opaqueData":"me@example.com" + } + ]} + +``` + +## Requirements +* Lua 5.3 (or higher) +* Luarocks diff --git a/examples/rgw/lua/prometheus_adapter.lua b/examples/rgw/lua/prometheus_adapter.lua new file mode 100644 index 00000000000..4f0af9a3b91 --- /dev/null +++ b/examples/rgw/lua/prometheus_adapter.lua @@ -0,0 +1,23 @@ +local http = require("socket.http") +local ltn12 = require("ltn12") + +local respbody = {} +local op = "rgw_other_request_content_length" +if (Request.RGWOp == "put_obj") then + op = "rgw_put_request_content_length" +elseif (Request.RGWOp == "get_obj") then + op = "rgw_get_request_content_length" +end +local field = op .. " " .. tostring(Request.ContentLength) .. "\n" + +local body, code, headers, status = http.request{ + url = "http://127.0.0.1:9091/metrics/job/rgw", + method = "POST", + headers = { + ["Content-Type"] = "application/x-www-form-urlencoded", + ["Content-Length"] = string.len(field) + }, + source = ltn12.source.string(field), + sink = ltn12.sink.table(respbody), +} + diff --git a/examples/rgw/lua/prometheus_adapter.md b/examples/rgw/lua/prometheus_adapter.md new file mode 100644 index 00000000000..eae1d81515e --- /dev/null +++ b/examples/rgw/lua/prometheus_adapter.md @@ -0,0 +1,59 @@ +# Introduction + +This directory contains an example `prometheus_adapter.lua` on how to +use [Lua Scripting](https://docs.ceph.com/en/latest/radosgw/lua-scripting/) +to push metrics from the RGW requests to [Prometheus](https://prometheus.io/), +specifically to collect information on object sizes. + +## Prometheus + +As every single run of a lua script is short-lived, +so [Pushgateway](https://github.com/prometheus/pushgateway) +should be used as an intermediate service to enable Prometheus to scrape data +from RGW. + +* Install and run Pushgateway using docker: + +```bash +docker pull prom/pushgateway +docker run -p 9091:9091 -it prom/pushgateway +``` + +* Install and run Prometheus using docker: + +```bash +docker pull prom/prometheus +docker run --network host -v ${CEPH_DIR}/examples/lua/config/prometheus.yml:/etc/prometheus/prometheus.yml prom/prometheus +``` + +[Full documentation for Prometheus installation](https://prometheus.io/docs/prometheus/latest/installation/) + +## Usage + +* Upload the script: + +```bash +radosgw-admin script put --infile=prometheus_adapter.lua --context=postRequest +``` + +* Add the packages used in the script: + +```bash +radosgw-admin script-package add --package='luasocket' --allow-compilation +``` + +* Restart radosgw. + +* Send a request: +```bash +s3cmd --host=localhost:8000 --host-bucket="localhost:8000/%(bucket)" --access_key=0555b35654ad1656d804 --secret_key=h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q== mb s3://mybucket +s3cmd --host=localhost:8000 --host-bucket="localhost:8000/%(bucket)" --access_key=0555b35654ad1656d804 --secret_key=h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q== put -P /etc/hosts s3://mybucket +curl http://localhost:8000/mybucket/hosts +``` + +* Open `http://localhost:9090` by browser and search for `rgw_request_content_length` +![](img/prometheus.png) + +## Requirements +* Lua 5.3 or higher + diff --git a/examples/rgw/lua/storage_class.lua b/examples/rgw/lua/storage_class.lua new file mode 100644 index 00000000000..08d41094c8c --- /dev/null +++ b/examples/rgw/lua/storage_class.lua @@ -0,0 +1,19 @@ +local function isempty(input) + return input == nil or input == '' +end + +if Request.RGWOp == 'put_obj' then + RGWDebugLog("Put_Obj with StorageClass: " .. Request.HTTP.StorageClass ) + if (isempty(Request.HTTP.StorageClass)) then + if (Request.ContentLength >= 65536) then + RGWDebugLog("No StorageClass for Object and size >= threshold: " .. Request.Object.Name .. " adding QLC StorageClass") + Request.HTTP.StorageClass = "QLC_CLASS" + else + RGWDebugLog("No StorageClass for Object and size < threshold: " .. Request.Object.Name .. " adding STANDARD StorageClass") + Request.HTTP.StorageClass = "STANDARD" + end + else + RGWDebugLog("Storage Class Header Present on Object: " .. Request.Object.Name .. " with StorageClass: " .. Request.HTTP.StorageClass) + end +end + diff --git a/examples/rgw/lua/storage_class.md b/examples/rgw/lua/storage_class.md new file mode 100644 index 00000000000..8da92ccc38b --- /dev/null +++ b/examples/rgw/lua/storage_class.md @@ -0,0 +1,49 @@ +# Introduction + +This directory contains an example `storage_class.lua` on how to +use [Lua Scripting](https://docs.ceph.com/en/latest/radosgw/lua-scripting/) +to read and write the Storage Class field of a put request. + +## Usage - following examples based on vstart environment built in ceph/build and commands invoked from ceph/build + +* Create Zonegroup placement info for a Storage Class (QLC_CLASS in this example) and point class to a data pool (qlc_pool in this example) +NOTE: RGW will need restarted due to the Zonegroup placement info change. +See: https://docs.ceph.com/en/latest/radosgw/placement/#zonegroup-zone-configuration for more information. + +```bash +# Create Storage Class +./bin/radosgw-admin zonegroup placement add --rgw-zonegroup default --placement-id default-placement --storage-class QLC_CLASS +# Steer objects in QLC_CLASS to the qlc_pool data pool +./bin/radosgw-admin zone placement add --rgw-zone default --placement-id default-placement --storage-class QLC_CLASS --data-pool qlc_pool +``` +* Restart radosgw for Zone/ZoneGroup placement changes to take effect. + +* Upload the script: + +```bash +./bin/radosgw-admin script put --infile=storage_class.lua --context=preRequest +``` + +* Create a bucket and put and object with a Storage Class header (no modification will occur): +```bash +aws --profile=ceph --endpoint=http://localhost:8000 s3api create-bucket --bucket test-bucket +aws --profile=ceph --endpoint=http://localhost:8000 s3api put-object --bucket test-bucket --key truv-0 --body ./64KiB_object.bin --storage-class STANDARD +``` + +* Send a request without a Storage Class header (Storage Class will be changed to QLC_CLASS by Lua script): +```bash +aws --profile=ceph --endpoint=http://localhost:8000 s3api put-object --bucket test-bucket --key truv-0 --body ./64KiB_object.bin +``` +NOTE: If you use s3cmd instead of aws command-line, s3cmd adds "STANDARD" StorageClass to any put request so the example Lua script will not modify it. + +* Verify S3 object had its StorageClass header added +```bash +grep Lua ceph/build/out/radosgw.8000.log + +2021-11-01T17:10:14.048-0400 7f9c7f697640 20 Lua INFO: Put_Obj with StorageClass: +2021-11-01T17:10:14.048-0400 7f9c7f697640 20 Lua INFO: No StorageClass for Object and size >= threshold: truv-0 adding QLC StorageClass +``` + +## Requirements +* Lua 5.3 + |