summaryrefslogtreecommitdiffstats
path: root/monitoring/ceph-mixin
diff options
context:
space:
mode:
authorArthur Outhenin-Chalandre <arthur.outhenin-chalandre@cern.ch>2022-05-17 09:42:29 +0200
committerArthur Outhenin-Chalandre <arthur.outhenin-chalandre@cern.ch>2022-05-17 09:42:29 +0200
commitc8f086c182b87f1a813cb37fd58ad1e753a6b0bf (patch)
tree78bb2793f306e24c0fb02cd04d773ea990b292e2 /monitoring/ceph-mixin
parentceph-mixin: don't add cluster matcher if showcluster is disabled (diff)
downloadceph-c8f086c182b87f1a813cb37fd58ad1e753a6b0bf.tar.xz
ceph-c8f086c182b87f1a813cb37fd58ad1e753a6b0bf.zip
ceph-mixin: fix test with rate and label changes
Signed-off-by: Arthur Outhenin-Chalandre <arthur.outhenin-chalandre@cern.ch>
Diffstat (limited to 'monitoring/ceph-mixin')
-rw-r--r--monitoring/ceph-mixin/tests_dashboards/features/host-details.feature110
-rw-r--r--monitoring/ceph-mixin/tests_dashboards/features/hosts_overview.feature4
-rw-r--r--monitoring/ceph-mixin/tests_dashboards/features/osd-device-details.feature28
-rw-r--r--monitoring/ceph-mixin/tests_dashboards/features/radosgw-detail.feature18
-rw-r--r--monitoring/ceph-mixin/tests_dashboards/features/radosgw_overview.feature76
-rw-r--r--monitoring/ceph-mixin/tests_dashboards/util.py5
6 files changed, 123 insertions, 118 deletions
diff --git a/monitoring/ceph-mixin/tests_dashboards/features/host-details.feature b/monitoring/ceph-mixin/tests_dashboards/features/host-details.feature
index 5784ecbb270..51e3c5819ba 100644
--- a/monitoring/ceph-mixin/tests_dashboards/features/host-details.feature
+++ b/monitoring/ceph-mixin/tests_dashboards/features/host-details.feature
@@ -3,9 +3,9 @@ Feature: Host Details Dashboard
Scenario: "Test OSD"
Given the following series:
| metrics | values |
- | ceph_osd_metadata{back_iface="",ceph_daemon="osd.0",cluster_addr="192.168.1.12",device_class="hdd",front_iface="",hostname="127.0.0.1",objectstore="bluestore",public_addr="192.168.1.12",ceph_version="ceph version 17.0.0-8967-g6932a4f702a (6932a4f702a0d557fc36df3ca7a3bca70de42667) quincy (dev)"} | 1.0 |
- | ceph_osd_metadata{back_iface="",ceph_daemon="osd.1",cluster_addr="192.168.1.12",device_class="hdd",front_iface="",hostname="127.0.0.1",objectstore="bluestore",public_addr="192.168.1.12",ceph_version="ceph version 17.0.0-8967-g6932a4f702a (6932a4f702a0d557fc36df3ca7a3bca70de42667) quincy (dev)"} | 1.0 |
- | ceph_osd_metadata{back_iface="",ceph_daemon="osd.2",cluster_addr="192.168.1.12",device_class="hdd",front_iface="",hostname="127.0.0.1",objectstore="bluestore",public_addr="192.168.1.12",ceph_version="ceph version 17.0.0-8967-g6932a4f702a (6932a4f702a0d557fc36df3ca7a3bca70de42667) quincy (dev)"} | 1.0 |
+ | ceph_osd_metadata{job="ceph",back_iface="",ceph_daemon="osd.0",cluster_addr="192.168.1.12",device_class="hdd",front_iface="",hostname="127.0.0.1",objectstore="bluestore",public_addr="192.168.1.12",ceph_version="ceph version 17.0.0-8967-g6932a4f702a (6932a4f702a0d557fc36df3ca7a3bca70de42667) quincy (dev)"} | 1.0 |
+ | ceph_osd_metadata{job="ceph",back_iface="",ceph_daemon="osd.1",cluster_addr="192.168.1.12",device_class="hdd",front_iface="",hostname="127.0.0.1",objectstore="bluestore",public_addr="192.168.1.12",ceph_version="ceph version 17.0.0-8967-g6932a4f702a (6932a4f702a0d557fc36df3ca7a3bca70de42667) quincy (dev)"} | 1.0 |
+ | ceph_osd_metadata{job="ceph",back_iface="",ceph_daemon="osd.2",cluster_addr="192.168.1.12",device_class="hdd",front_iface="",hostname="127.0.0.1",objectstore="bluestore",public_addr="192.168.1.12",ceph_version="ceph version 17.0.0-8967-g6932a4f702a (6932a4f702a0d557fc36df3ca7a3bca70de42667) quincy (dev)"} | 1.0 |
When variable `ceph_hosts` is `127.0.0.1`
Then Grafana panel `OSDs` with legend `EMPTY` shows:
| metrics | values |
@@ -16,54 +16,54 @@ Scenario: "Test OSD"
Scenario: "Test Disk IOPS - Writes - Several OSDs per device"
Given the following series:
| metrics | values |
- | node_disk_writes_completed_total{device="sda",instance="localhost:9100"} | 10+60x1 |
- | node_disk_writes_completed_total{device="sdb",instance="localhost:9100"} | 10+60x1 |
- | ceph_disk_occupation_human{ceph_daemon="osd.0 osd.1 osd.2",device="/dev/sda",instance="localhost:9283"} | 1.0 |
- | ceph_disk_occupation_human{ceph_daemon="osd.3 osd.4 osd.5",device="/dev/sdb",instance="localhost:9283"} | 1.0 |
+ | node_disk_writes_completed_total{job="ceph",device="sda",instance="localhost:9100"} | 10+60x1 |
+ | node_disk_writes_completed_total{job="ceph",device="sdb",instance="localhost:9100"} | 10+60x1 |
+ | ceph_disk_occupation_human{job="ceph",ceph_daemon="osd.0 osd.1 osd.2",device="/dev/sda",instance="localhost:9283"} | 1.0 |
+ | ceph_disk_occupation_human{job="ceph",ceph_daemon="osd.3 osd.4 osd.5",device="/dev/sdb",instance="localhost:9283"} | 1.0 |
When variable `ceph_hosts` is `localhost`
Then Grafana panel `$ceph_hosts Disk IOPS` with legend `{{device}}({{ceph_daemon}}) writes` shows:
| metrics | values |
- | {ceph_daemon="osd.0 osd.1 osd.2", device="sda", instance="localhost"} | 1 |
- | {ceph_daemon="osd.3 osd.4 osd.5", device="sdb", instance="localhost"} | 1 |
+ | {job="ceph",ceph_daemon="osd.0 osd.1 osd.2", device="sda", instance="localhost"} | 1 |
+ | {job="ceph",ceph_daemon="osd.3 osd.4 osd.5", device="sdb", instance="localhost"} | 1 |
Scenario: "Test Disk IOPS - Writes - Single OSD per device"
Given the following series:
| metrics | values |
- | node_disk_writes_completed_total{device="sda",instance="localhost:9100"} | 10+60x1 |
- | node_disk_writes_completed_total{device="sdb",instance="localhost:9100"} | 10+60x1 |
- | ceph_disk_occupation_human{ceph_daemon="osd.0",device="/dev/sda",instance="localhost:9283"} | 1.0 |
- | ceph_disk_occupation_human{ceph_daemon="osd.1",device="/dev/sdb",instance="localhost:9283"} | 1.0 |
+ | node_disk_writes_completed_total{job="ceph",device="sda",instance="localhost:9100"} | 10+60x1 |
+ | node_disk_writes_completed_total{job="ceph",device="sdb",instance="localhost:9100"} | 10+60x1 |
+ | ceph_disk_occupation_human{job="ceph",ceph_daemon="osd.0",device="/dev/sda",instance="localhost:9283"} | 1.0 |
+ | ceph_disk_occupation_human{job="ceph",ceph_daemon="osd.1",device="/dev/sdb",instance="localhost:9283"} | 1.0 |
When variable `ceph_hosts` is `localhost`
Then Grafana panel `$ceph_hosts Disk IOPS` with legend `{{device}}({{ceph_daemon}}) writes` shows:
| metrics | values |
- | {ceph_daemon="osd.0", device="sda", instance="localhost"} | 1 |
- | {ceph_daemon="osd.1", device="sdb", instance="localhost"} | 1 |
+ | {job="ceph",ceph_daemon="osd.0", device="sda", instance="localhost"} | 1 |
+ | {job="ceph",ceph_daemon="osd.1", device="sdb", instance="localhost"} | 1 |
Scenario: "Test Disk IOPS - Reads - Several OSDs per device"
Given the following series:
| metrics | values |
- | node_disk_reads_completed_total{device="sda",instance="localhost:9100"} | 10+60x1 |
- | node_disk_reads_completed_total{device="sdb",instance="localhost:9100"} | 10+60x1 |
- | ceph_disk_occupation_human{ceph_daemon="osd.0 osd.1 osd.2",device="/dev/sda",instance="localhost:9283"} | 1.0 |
- | ceph_disk_occupation_human{ceph_daemon="osd.3 osd.4 osd.5",device="/dev/sdb",instance="localhost:9283"} | 1.0 |
+ | node_disk_reads_completed_total{job="ceph",device="sda",instance="localhost:9100"} | 10+60x1 |
+ | node_disk_reads_completed_total{job="ceph",device="sdb",instance="localhost:9100"} | 10+60x1 |
+ | ceph_disk_occupation_human{job="ceph",ceph_daemon="osd.0 osd.1 osd.2",device="/dev/sda",instance="localhost:9283"} | 1.0 |
+ | ceph_disk_occupation_human{job="ceph",ceph_daemon="osd.3 osd.4 osd.5",device="/dev/sdb",instance="localhost:9283"} | 1.0 |
When variable `ceph_hosts` is `localhost`
Then Grafana panel `$ceph_hosts Disk IOPS` with legend `{{device}}({{ceph_daemon}}) reads` shows:
| metrics | values |
- | {ceph_daemon="osd.0 osd.1 osd.2", device="sda", instance="localhost"} | 1 |
- | {ceph_daemon="osd.3 osd.4 osd.5", device="sdb", instance="localhost"} | 1 |
+ | {job="ceph",ceph_daemon="osd.0 osd.1 osd.2", device="sda", instance="localhost"} | 1 |
+ | {job="ceph",ceph_daemon="osd.3 osd.4 osd.5", device="sdb", instance="localhost"} | 1 |
Scenario: "Test Disk IOPS - Reads - Single OSD per device"
Given the following series:
| metrics | values |
- | node_disk_reads_completed_total{device="sda",instance="localhost:9100"} | 10+60x1 |
- | node_disk_reads_completed_total{device="sdb",instance="localhost:9100"} | 10+60x1 |
- | ceph_disk_occupation_human{ceph_daemon="osd.0",device="/dev/sda",instance="localhost:9283"} | 1.0 |
- | ceph_disk_occupation_human{ceph_daemon="osd.1",device="/dev/sdb",instance="localhost:9283"} | 1.0 |
+ | node_disk_reads_completed_total{job="ceph",device="sda",instance="localhost:9100"} | 10+60x1 |
+ | node_disk_reads_completed_total{job="ceph",device="sdb",instance="localhost:9100"} | 10+60x1 |
+ | ceph_disk_occupation_human{job="ceph",ceph_daemon="osd.0",device="/dev/sda",instance="localhost:9283"} | 1.0 |
+ | ceph_disk_occupation_human{job="ceph",ceph_daemon="osd.1",device="/dev/sdb",instance="localhost:9283"} | 1.0 |
When variable `ceph_hosts` is `localhost`
Then Grafana panel `$ceph_hosts Disk IOPS` with legend `{{device}}({{ceph_daemon}}) reads` shows:
| metrics | values |
- | {ceph_daemon="osd.0", device="sda", instance="localhost"} | 1 |
- | {ceph_daemon="osd.1", device="sdb", instance="localhost"} | 1 |
+ | {job="ceph",ceph_daemon="osd.0", device="sda", instance="localhost"} | 1 |
+ | {job="ceph",ceph_daemon="osd.1", device="sdb", instance="localhost"} | 1 |
# IOPS Panel - end
@@ -72,44 +72,44 @@ Scenario: "Test Disk IOPS - Reads - Single OSD per device"
Scenario: "Test disk throughput - read"
Given the following series:
| metrics | values |
- | node_disk_read_bytes_total{device="sda",instance="localhost:9100"} | 10+60x1 |
- | node_disk_read_bytes_total{device="sdb",instance="localhost:9100"} | 100+600x1 |
- | ceph_disk_occupation_human{ceph_daemon="osd.0",device="/dev/sda",instance="localhost:9283"} | 1.0 |
- | ceph_disk_occupation_human{ceph_daemon="osd.1",device="/dev/sdb",instance="localhost:9283"} | 1.0 |
+ | node_disk_read_bytes_total{job="ceph",device="sda",instance="localhost:9100"} | 10+60x1 |
+ | node_disk_read_bytes_total{job="ceph",device="sdb",instance="localhost:9100"} | 100+600x1 |
+ | ceph_disk_occupation_human{job="ceph",ceph_daemon="osd.0",device="/dev/sda",instance="localhost:9283"} | 1.0 |
+ | ceph_disk_occupation_human{job="ceph",ceph_daemon="osd.1",device="/dev/sdb",instance="localhost:9283"} | 1.0 |
When variable `ceph_hosts` is `localhost`
Then Grafana panel `$ceph_hosts Throughput by Disk` with legend `{{device}}({{ceph_daemon}}) read` shows:
| metrics | values |
- | {ceph_daemon="osd.0", device="sda", instance="localhost"} | 1 |
- | {ceph_daemon="osd.1", device="sdb", instance="localhost"} | 10 |
+ | {job="ceph",ceph_daemon="osd.0", device="sda", instance="localhost"} | 1 |
+ | {job="ceph",ceph_daemon="osd.1", device="sdb", instance="localhost"} | 10 |
Scenario: "Test disk throughput - write"
Given the following series:
| metrics | values |
- | node_disk_written_bytes_total{device="sda",instance="localhost:9100"} | 10+60x1 |
- | node_disk_written_bytes_total{device="sdb",instance="localhost:9100"} | 100+600x1 |
- | ceph_disk_occupation_human{ceph_daemon="osd.0",device="/dev/sda",instance="localhost:9283"} | 1.0 |
- | ceph_disk_occupation_human{ceph_daemon="osd.1",device="/dev/sdb",instance="localhost:9283"} | 1.0 |
+ | node_disk_written_bytes_total{job="ceph",device="sda",instance="localhost:9100"} | 10+60x1 |
+ | node_disk_written_bytes_total{job="ceph",device="sdb",instance="localhost:9100"} | 100+600x1 |
+ | ceph_disk_occupation_human{job="ceph",ceph_daemon="osd.0",device="/dev/sda",instance="localhost:9283"} | 1.0 |
+ | ceph_disk_occupation_human{job="ceph",ceph_daemon="osd.1",device="/dev/sdb",instance="localhost:9283"} | 1.0 |
When variable `ceph_hosts` is `localhost`
Then Grafana panel `$ceph_hosts Throughput by Disk` with legend `{{device}}({{ceph_daemon}}) write` shows:
| metrics | values |
- | {ceph_daemon="osd.0", device="sda", instance="localhost"} | 1 |
- | {ceph_daemon="osd.1", device="sdb", instance="localhost"} | 10 |
+ | {job="ceph",ceph_daemon="osd.0", device="sda", instance="localhost"} | 1 |
+ | {job="ceph",ceph_daemon="osd.1", device="sdb", instance="localhost"} | 10 |
# Node disk bytes written/read panel - end
Scenario: "Test $ceph_hosts Disk Latency panel"
Given the following series:
| metrics | values |
- | node_disk_write_time_seconds_total{device="sda",instance="localhost:9100"} | 10+60x1 |
- | node_disk_write_time_seconds_total{device="sdb",instance="localhost:9100"} | 10+60x1 |
- | node_disk_writes_completed_total{device="sda",instance="localhost:9100"} | 10+60x1 |
- | node_disk_writes_completed_total{device="sdb",instance="localhost:9100"} | 10+60x1 |
- | node_disk_read_time_seconds_total{device="sda",instance="localhost:9100"} | 10+60x1 |
- | node_disk_read_time_seconds_total{device="sdb",instance="localhost:9100"} | 10+60x1 |
- | node_disk_reads_completed_total{device="sda",instance="localhost:9100"} | 10+60x1 |
- | node_disk_reads_completed_total{device="sdb",instance="localhost:9100"} | 10+60x1 |
- | ceph_disk_occupation_human{ceph_daemon="osd.0",device="/dev/sda",instance="localhost:9283"} | 1.0 |
- | ceph_disk_occupation_human{ceph_daemon="osd.1",device="/dev/sdb",instance="localhost:9283"} | 1.0 |
+ | node_disk_write_time_seconds_total{job="ceph",device="sda",instance="localhost:9100"} | 10+60x1 |
+ | node_disk_write_time_seconds_total{job="ceph",device="sdb",instance="localhost:9100"} | 10+60x1 |
+ | node_disk_writes_completed_total{job="ceph",device="sda",instance="localhost:9100"} | 10+60x1 |
+ | node_disk_writes_completed_total{job="ceph",device="sdb",instance="localhost:9100"} | 10+60x1 |
+ | node_disk_read_time_seconds_total{job="ceph",device="sda",instance="localhost:9100"} | 10+60x1 |
+ | node_disk_read_time_seconds_total{job="ceph",device="sdb",instance="localhost:9100"} | 10+60x1 |
+ | node_disk_reads_completed_total{job="ceph",device="sda",instance="localhost:9100"} | 10+60x1 |
+ | node_disk_reads_completed_total{job="ceph",device="sdb",instance="localhost:9100"} | 10+60x1 |
+ | ceph_disk_occupation_human{job="ceph",ceph_daemon="osd.0",device="/dev/sda",instance="localhost:9283"} | 1.0 |
+ | ceph_disk_occupation_human{job="ceph",ceph_daemon="osd.1",device="/dev/sdb",instance="localhost:9283"} | 1.0 |
When variable `ceph_hosts` is `localhost`
Then Grafana panel `$ceph_hosts Disk Latency` with legend `{{device}}({{ceph_daemon}})` shows:
| metrics | values |
@@ -119,13 +119,13 @@ Scenario: "Test $ceph_hosts Disk Latency panel"
Scenario: "Test $ceph_hosts Disk utilization"
Given the following series:
| metrics | values |
- | node_disk_io_time_seconds_total{device="sda",instance="localhost:9100"} | 10+60x1 |
- | node_disk_io_time_seconds_total{device="sdb",instance="localhost:9100"} | 10+60x1 |
- | ceph_disk_occupation_human{ceph_daemon="osd.0",device="/dev/sda",instance="localhost:9283"} | 1.0 |
- | ceph_disk_occupation_human{ceph_daemon="osd.1",device="/dev/sdb",instance="localhost:9283"} | 1.0 |
+ | node_disk_io_time_seconds_total{job="ceph",device="sda",instance="localhost:9100"} | 10+60x1 |
+ | node_disk_io_time_seconds_total{job="ceph",device="sdb",instance="localhost:9100"} | 10+60x1 |
+ | ceph_disk_occupation_human{job="ceph",ceph_daemon="osd.0",device="/dev/sda",instance="localhost:9283"} | 1.0 |
+ | ceph_disk_occupation_human{job="ceph",ceph_daemon="osd.1",device="/dev/sdb",instance="localhost:9283"} | 1.0 |
When variable `ceph_hosts` is `localhost`
Then Grafana panel `$ceph_hosts Disk utilization` with legend `{{device}}({{ceph_daemon}})` shows:
| metrics | values |
- | {ceph_daemon="osd.0", device="sda", instance="localhost"} | 100 |
- | {ceph_daemon="osd.1", device="sdb", instance="localhost"} | 100 |
+ | {job="ceph",ceph_daemon="osd.0", device="sda", instance="localhost"} | 100 |
+ | {job="ceph",ceph_daemon="osd.1", device="sdb", instance="localhost"} | 100 |
diff --git a/monitoring/ceph-mixin/tests_dashboards/features/hosts_overview.feature b/monitoring/ceph-mixin/tests_dashboards/features/hosts_overview.feature
index 595f2330964..6c5eceaed3f 100644
--- a/monitoring/ceph-mixin/tests_dashboards/features/hosts_overview.feature
+++ b/monitoring/ceph-mixin/tests_dashboards/features/hosts_overview.feature
@@ -33,8 +33,8 @@ Scenario: "Test AVG Disk Utilization"
| node_disk_io_time_seconds_total{device="sda",instance="localhost:9100"} | 10+60x1 |
| node_disk_io_time_seconds_total{device="sdb",instance="localhost:9100"} | 10+60x1 |
| node_disk_io_time_seconds_total{device="sdc",instance="localhost:9100"} | 10 2000 |
- | ceph_disk_occupation_human{ceph_daemon="osd.0",device="sda",instance="localhost:9283"} | 1.0 |
- | ceph_disk_occupation_human{ceph_daemon="osd.1",device="/dev/sdb",instance="localhost:9283"} | 1.0 |
+ | ceph_disk_occupation_human{job="ceph",ceph_daemon="osd.0",device="sda",instance="localhost:9283"} | 1.0 |
+ | ceph_disk_occupation_human{job="ceph",ceph_daemon="osd.1",device="/dev/sdb",instance="localhost:9283"} | 1.0 |
When variable `osd_hosts` is `localhost`
Then Grafana panel `AVG Disk Utilization` with legend `EMPTY` shows:
| metrics | values |
diff --git a/monitoring/ceph-mixin/tests_dashboards/features/osd-device-details.feature b/monitoring/ceph-mixin/tests_dashboards/features/osd-device-details.feature
index 07af8692c8c..0d6ca8b1715 100644
--- a/monitoring/ceph-mixin/tests_dashboards/features/osd-device-details.feature
+++ b/monitoring/ceph-mixin/tests_dashboards/features/osd-device-details.feature
@@ -7,8 +7,8 @@ Scenario: "Test Physical Device Latency for $osd - Reads"
| node_disk_reads_completed_total{device="sdb",instance="localhost"} | 10 60 |
| node_disk_read_time_seconds_total{device="sda",instance="localhost"} | 100 600 |
| node_disk_read_time_seconds_total{device="sdb",instance="localhost"} | 100 600 |
- | ceph_disk_occupation_human{ceph_daemon="osd.0",device="/dev/sda",instance="localhost:9283"} | 1.0 |
- | ceph_disk_occupation_human{ceph_daemon="osd.1",device="/dev/sdb",instance="localhost:9283"} | 1.0 |
+ | ceph_disk_occupation_human{job="ceph",ceph_daemon="osd.0",device="/dev/sda",instance="localhost:9283"} | 1.0 |
+ | ceph_disk_occupation_human{job="ceph",ceph_daemon="osd.1",device="/dev/sdb",instance="localhost:9283"} | 1.0 |
When variable `osd` is `osd.0`
Then Grafana panel `Physical Device Latency for $osd` with legend `{{instance}}/{{device}} Reads` shows:
| metrics | values |
@@ -21,8 +21,8 @@ Scenario: "Test Physical Device Latency for $osd - Writes"
| node_disk_writes_completed_total{device="sdb",instance="localhost"} | 10 60 |
| node_disk_write_time_seconds_total{device="sda",instance="localhost"} | 100 600 |
| node_disk_write_time_seconds_total{device="sdb",instance="localhost"} | 100 600 |
- | ceph_disk_occupation_human{ceph_daemon="osd.0",device="/dev/sda",instance="localhost:9283"} | 1.0 |
- | ceph_disk_occupation_human{ceph_daemon="osd.1",device="/dev/sdb",instance="localhost:9283"} | 1.0 |
+ | ceph_disk_occupation_human{job="ceph",ceph_daemon="osd.0",device="/dev/sda",instance="localhost:9283"} | 1.0 |
+ | ceph_disk_occupation_human{job="ceph",ceph_daemon="osd.1",device="/dev/sdb",instance="localhost:9283"} | 1.0 |
When variable `osd` is `osd.0`
Then Grafana panel `Physical Device Latency for $osd` with legend `{{instance}}/{{device}} Writes` shows:
| metrics | values |
@@ -33,8 +33,8 @@ Scenario: "Test Physical Device R/W IOPS for $osd - Writes"
| metrics | values |
| node_disk_writes_completed_total{device="sda",instance="localhost"} | 10 100 |
| node_disk_writes_completed_total{device="sdb",instance="localhost"} | 10 100 |
- | ceph_disk_occupation_human{ceph_daemon="osd.0",device="/dev/sda",instance="localhost:9283"} | 1.0 |
- | ceph_disk_occupation_human{ceph_daemon="osd.1",device="/dev/sdb",instance="localhost:9283"} | 1.0 |
+ | ceph_disk_occupation_human{job="ceph",ceph_daemon="osd.0",device="/dev/sda",instance="localhost:9283"} | 1.0 |
+ | ceph_disk_occupation_human{job="ceph",ceph_daemon="osd.1",device="/dev/sdb",instance="localhost:9283"} | 1.0 |
When variable `osd` is `osd.0`
Then Grafana panel `Physical Device R/W IOPS for $osd` with legend `{{device}} on {{instance}} Writes` shows:
| metrics | values |
@@ -45,8 +45,8 @@ Scenario: "Test Physical Device R/W IOPS for $osd - Reads"
| metrics | values |
| node_disk_reads_completed_total{device="sda",instance="localhost"} | 10 100 |
| node_disk_reads_completed_total{device="sdb",instance="localhost"} | 10 100 |
- | ceph_disk_occupation_human{ceph_daemon="osd.0",device="/dev/sda",instance="localhost:9283"} | 1.0 |
- | ceph_disk_occupation_human{ceph_daemon="osd.1",device="/dev/sdb",instance="localhost:9283"} | 1.0 |
+ | ceph_disk_occupation_human{job="ceph",ceph_daemon="osd.0",device="/dev/sda",instance="localhost:9283"} | 1.0 |
+ | ceph_disk_occupation_human{job="ceph",ceph_daemon="osd.1",device="/dev/sdb",instance="localhost:9283"} | 1.0 |
When variable `osd` is `osd.0`
Then Grafana panel `Physical Device R/W IOPS for $osd` with legend `{{device}} on {{instance}} Reads` shows:
| metrics | values |
@@ -57,8 +57,8 @@ Scenario: "Test Physical Device R/W Bytes for $osd - Reads"
| metrics | values |
| node_disk_reads_completed_total{device="sda",instance="localhost"} | 10 100 |
| node_disk_reads_completed_total{device="sdb",instance="localhost"} | 10 100 |
- | ceph_disk_occupation_human{ceph_daemon="osd.0",device="/dev/sda",instance="localhost:9283"} | 1.0 |
- | ceph_disk_occupation_human{ceph_daemon="osd.1",device="/dev/sdb",instance="localhost:9283"} | 1.0 |
+ | ceph_disk_occupation_human{job="ceph",ceph_daemon="osd.0",device="/dev/sda",instance="localhost:9283"} | 1.0 |
+ | ceph_disk_occupation_human{job="ceph",ceph_daemon="osd.1",device="/dev/sdb",instance="localhost:9283"} | 1.0 |
When variable `osd` is `osd.0`
Then Grafana panel `Physical Device R/W IOPS for $osd` with legend `{{device}} on {{instance}} Reads` shows:
| metrics | values |
@@ -69,8 +69,8 @@ Scenario: "Test Physical Device R/W Bytes for $osd - Writes"
| metrics | values |
| node_disk_writes_completed_total{device="sda",instance="localhost"} | 10 100 |
| node_disk_writes_completed_total{device="sdb",instance="localhost"} | 10 100 |
- | ceph_disk_occupation_human{ceph_daemon="osd.0",device="/dev/sda",instance="localhost:9283"} | 1.0 |
- | ceph_disk_occupation_human{ceph_daemon="osd.1",device="/dev/sdb",instance="localhost:9283"} | 1.0 |
+ | ceph_disk_occupation_human{job="ceph",ceph_daemon="osd.0",device="/dev/sda",instance="localhost:9283"} | 1.0 |
+ | ceph_disk_occupation_human{job="ceph",ceph_daemon="osd.1",device="/dev/sdb",instance="localhost:9283"} | 1.0 |
When variable `osd` is `osd.0`
Then Grafana panel `Physical Device R/W IOPS for $osd` with legend `{{device}} on {{instance}} Writes` shows:
| metrics | values |
@@ -80,8 +80,8 @@ Scenario: "Test Physical Device Util% for $osd"
Given the following series:
| metrics | values |
| node_disk_io_time_seconds_total{device="sda",instance="localhost:9100"} | 10 100 |
- | ceph_disk_occupation_human{ceph_daemon="osd.0",device="/dev/sda",instance="localhost:9283"} | 1.0 |
- | ceph_disk_occupation_human{ceph_daemon="osd.1",device="/dev/sdb",instance="localhost:9283"} | 1.0 |
+ | ceph_disk_occupation_human{job="ceph",ceph_daemon="osd.0",device="/dev/sda",instance="localhost:9283"} | 1.0 |
+ | ceph_disk_occupation_human{job="ceph",ceph_daemon="osd.1",device="/dev/sdb",instance="localhost:9283"} | 1.0 |
When variable `osd` is `osd.0`
Then Grafana panel `Physical Device Util% for $osd` with legend `{{device}} on {{instance}}` shows:
| metrics | values |
diff --git a/monitoring/ceph-mixin/tests_dashboards/features/radosgw-detail.feature b/monitoring/ceph-mixin/tests_dashboards/features/radosgw-detail.feature
index bcc793a21a5..e0016c5077d 100644
--- a/monitoring/ceph-mixin/tests_dashboards/features/radosgw-detail.feature
+++ b/monitoring/ceph-mixin/tests_dashboards/features/radosgw-detail.feature
@@ -10,7 +10,7 @@ Scenario: "Test $rgw_servers GET/PUT Latencies - GET"
And variable `rgw_servers` is `rgw.foo`
Then Grafana panel `$rgw_servers GET/PUT Latencies` with legend `GET {{ceph_daemon}}` shows:
| metrics | values |
- | {ceph_daemon="rgw.foo", instance_id="58892247"} | 2.5000000000000004 |
+ | {ceph_daemon="rgw.foo", instance_id="58892247"} | 1.5 |
Scenario: "Test $rgw_servers GET/PUT Latencies - PUT"
Given the following series:
@@ -33,7 +33,7 @@ Scenario: "Test Bandwidth by HTTP Operation - GET"
And variable `rgw_servers` is `rgw.1`
Then Grafana panel `Bandwidth by HTTP Operation` with legend `GETs {{ceph_daemon}}` shows:
| metrics | values |
- | {ceph_daemon="rgw.1", instance="127.0.0.1", instance_id="92806566", job="ceph"} | 1.6666666666666667 |
+ | {ceph_daemon="rgw.1", instance="127.0.0.1", instance_id="92806566", job="ceph"} | 1.5 |
Scenario: "Test Bandwidth by HTTP Operation - PUT"
Given the following series:
@@ -44,7 +44,7 @@ Scenario: "Test Bandwidth by HTTP Operation - PUT"
And variable `rgw_servers` is `rgw.1`
Then Grafana panel `Bandwidth by HTTP Operation` with legend `PUTs {{ceph_daemon}}` shows:
| metrics | values |
- | {ceph_daemon="rgw.1", instance="127.0.0.1", instance_id="92806566", job="ceph"} | 1 |
+ | {ceph_daemon="rgw.1", instance="127.0.0.1", instance_id="92806566", job="ceph"} | 7.5E-01 |
Scenario: "Test HTTP Request Breakdown - Requests Failed"
Given the following series:
@@ -55,7 +55,7 @@ Scenario: "Test HTTP Request Breakdown - Requests Failed"
And variable `rgw_servers` is `rgw.foo`
Then Grafana panel `HTTP Request Breakdown` with legend `Requests Failed {{ceph_daemon}}` shows:
| metrics | values |
- | {ceph_daemon="rgw.foo", instance="127.0.0.1", instance_id="58892247", job="ceph"} | 6.666666666666667e-02 |
+ | {ceph_daemon="rgw.foo", instance="127.0.0.1", instance_id="58892247", job="ceph"} | 1E-01 |
Scenario: "Test HTTP Request Breakdown - GET"
Given the following series:
@@ -66,7 +66,7 @@ Scenario: "Test HTTP Request Breakdown - GET"
And variable `rgw_servers` is `rgw.foo`
Then Grafana panel `HTTP Request Breakdown` with legend `GETs {{ceph_daemon}}` shows:
| metrics | values |
- | {ceph_daemon="rgw.foo", instance="127.0.0.1", instance_id="58892247", job="ceph"} | .6666666666666666 |
+ | {ceph_daemon="rgw.foo", instance="127.0.0.1", instance_id="58892247", job="ceph"} | 1.1666666666666667 |
Scenario: "Test HTTP Request Breakdown - PUT"
Given the following series:
@@ -77,7 +77,7 @@ Scenario: "Test HTTP Request Breakdown - PUT"
And variable `rgw_servers` is `rgw.foo`
Then Grafana panel `HTTP Request Breakdown` with legend `PUTs {{ceph_daemon}}` shows:
| metrics | values |
- | {ceph_daemon="rgw.foo", instance="127.0.0.1", instance_id="58892247", job="ceph"} | 2.3333333333333335 |
+ | {ceph_daemon="rgw.foo", instance="127.0.0.1", instance_id="58892247", job="ceph"} | 1.5 |
Scenario: "Test HTTP Request Breakdown - Other"
Given the following series:
@@ -101,7 +101,7 @@ Scenario: "Test Workload Breakdown - Failures"
And variable `rgw_servers` is `rgw.foo`
Then Grafana panel `Workload Breakdown` with legend `Failures {{ceph_daemon}}` shows:
| metrics | values |
- | {ceph_daemon="rgw.foo", instance="127.0.0.1", instance_id="58892247", job="ceph"} | 6.666666666666667e-02 |
+ | {ceph_daemon="rgw.foo", instance="127.0.0.1", instance_id="58892247", job="ceph"} | 1E-01 |
Scenario: "Test Workload Breakdown - GETs"
Given the following series:
@@ -112,7 +112,7 @@ Scenario: "Test Workload Breakdown - GETs"
And variable `rgw_servers` is `rgw.foo`
Then Grafana panel `Workload Breakdown` with legend `GETs {{ceph_daemon}}` shows:
| metrics | values |
- | {ceph_daemon="rgw.foo", instance="127.0.0.1", instance_id="58892247", job="ceph"} | .6666666666666666 |
+ | {ceph_daemon="rgw.foo", instance="127.0.0.1", instance_id="58892247", job="ceph"} | 1.1666666666666667 |
Scenario: "Test Workload Breakdown - PUTs"
Given the following series:
@@ -123,7 +123,7 @@ Scenario: "Test Workload Breakdown - PUTs"
And variable `rgw_servers` is `rgw.foo`
Then Grafana panel `Workload Breakdown` with legend `PUTs {{ceph_daemon}}` shows:
| metrics | values |
- | {ceph_daemon="rgw.foo", instance="127.0.0.1", instance_id="58892247", job="ceph"} | 2.3333333333333335 |
+ | {ceph_daemon="rgw.foo", instance="127.0.0.1", instance_id="58892247", job="ceph"} | 1.5 |
Scenario: "Test Workload Breakdown - Other"
Given the following series:
diff --git a/monitoring/ceph-mixin/tests_dashboards/features/radosgw_overview.feature b/monitoring/ceph-mixin/tests_dashboards/features/radosgw_overview.feature
index 69e46b1d511..b095392a21b 100644
--- a/monitoring/ceph-mixin/tests_dashboards/features/radosgw_overview.feature
+++ b/monitoring/ceph-mixin/tests_dashboards/features/radosgw_overview.feature
@@ -9,7 +9,7 @@ Scenario: "Test Average GET Latencies"
When interval is `30s`
Then Grafana panel `Average GET/PUT Latencies` with legend `GET AVG` shows:
| metrics | values |
- | {ceph_daemon="rgw.foo",instance="127.0.0.1", instance_id="58892247", job="ceph"} | 2.5000000000000004 |
+ | {ceph_daemon="rgw.foo",instance="127.0.0.1", instance_id="58892247", job="ceph"} | 1.5 |
Scenario: "Test Average PUT Latencies"
Given the following series:
@@ -30,7 +30,7 @@ Scenario: "Test Total Requests/sec by RGW Instance"
When interval is `30s`
Then Grafana panel `Total Requests/sec by RGW Instance` with legend `{{rgw_host}}` shows:
| metrics | values |
- | {rgw_host="1"} | 1.6666666666666667 |
+ | {rgw_host="1"} | 1.5 |
Scenario: "Test GET Latencies by RGW Instance"
Given the following series:
@@ -41,7 +41,7 @@ Scenario: "Test GET Latencies by RGW Instance"
When interval is `30s`
Then Grafana panel `GET Latencies by RGW Instance` with legend `{{rgw_host}}` shows:
| metrics | values |
- | {ceph_daemon="rgw.foo", instance="127.0.0.1", instance_id="58892247", job="ceph", rgw_host="foo"} | 2.5000000000000004 |
+ | {ceph_daemon="rgw.foo", instance="127.0.0.1", instance_id="58892247", job="ceph", rgw_host="foo"} | 1.5 |
Scenario: "Test Bandwidth Consumed by Type- GET"
Given the following series:
@@ -51,7 +51,7 @@ Scenario: "Test Bandwidth Consumed by Type- GET"
And interval is `30s`
Then Grafana panel `Bandwidth Consumed by Type` with legend `GETs` shows:
| metrics | values |
- | {} | 1.6666666666666667 |
+ | {} | 1.5 |
Scenario: "Test Bandwidth Consumed by Type- PUT"
Given the following series:
@@ -61,7 +61,7 @@ Scenario: "Test Bandwidth Consumed by Type- PUT"
And interval is `30s`
Then Grafana panel `Bandwidth Consumed by Type` with legend `PUTs` shows:
| metrics | values |
- | {} | 1 |
+ | {} | 7.5E-01 |
Scenario: "Test Bandwidth by RGW Instance"
Given the following series:
@@ -73,7 +73,7 @@ Scenario: "Test Bandwidth by RGW Instance"
And interval is `30s`
Then Grafana panel `Bandwidth by RGW Instance` with legend `{{rgw_host}}` shows:
| metrics | values |
- | {ceph_daemon="rgw.1", instance_id="92806566", rgw_host="1"} | 2.666666666666667 |
+ | {ceph_daemon="rgw.1", instance_id="92806566", rgw_host="1"} | 2.25 |
Scenario: "Test PUT Latencies by RGW Instance"
Given the following series:
@@ -90,8 +90,8 @@ Scenario: "Test PUT Latencies by RGW Instance"
Scenario: "Test Total backend responses by HTTP code"
Given the following series:
| metrics | values |
- | haproxy_backend_http_responses_total{code="200",instance="ingress.rgw.1",proxy="backend"} | 10 100 |
- | haproxy_backend_http_responses_total{code="404",instance="ingress.rgw.1",proxy="backend"} | 20 200 |
+ | haproxy_backend_http_responses_total{job="haproxy",code="200",instance="ingress.rgw.1",proxy="backend"} | 10 100 |
+ | haproxy_backend_http_responses_total{job="haproxy",code="404",instance="ingress.rgw.1",proxy="backend"} | 20 200 |
When variable `ingress_service` is `ingress.rgw.1`
When variable `code` is `200`
Then Grafana panel `Total responses by HTTP code` with legend `Backend {{ code }}` shows:
@@ -101,8 +101,8 @@ Scenario: "Test Total backend responses by HTTP code"
Scenario: "Test Total frontend responses by HTTP code"
Given the following series:
| metrics | values |
- | haproxy_frontend_http_responses_total{code="200",instance="ingress.rgw.1",proxy="frontend"} | 10 100 |
- | haproxy_frontend_http_responses_total{code="404",instance="ingress.rgw.1",proxy="frontend"} | 20 200 |
+ | haproxy_frontend_http_responses_total{job="haproxy",code="200",instance="ingress.rgw.1",proxy="frontend"} | 10 100 |
+ | haproxy_frontend_http_responses_total{job="haproxy",code="404",instance="ingress.rgw.1",proxy="frontend"} | 20 200 |
When variable `ingress_service` is `ingress.rgw.1`
When variable `code` is `200`
Then Grafana panel `Total responses by HTTP code` with legend `Frontend {{ code }}` shows:
@@ -112,8 +112,8 @@ Scenario: "Test Total frontend responses by HTTP code"
Scenario: "Test Total http frontend requests by instance"
Given the following series:
| metrics | values |
- | haproxy_frontend_http_requests_total{proxy="frontend",instance="ingress.rgw.1"} | 10 100 |
- | haproxy_frontend_http_requests_total{proxy="frontend",instance="ingress.rgw.1"} | 20 200 |
+ | haproxy_frontend_http_requests_total{job="haproxy",proxy="frontend",instance="ingress.rgw.1"} | 10 100 |
+ | haproxy_frontend_http_requests_total{job="haproxy",proxy="frontend",instance="ingress.rgw.1"} | 20 200 |
When variable `ingress_service` is `ingress.rgw.1`
Then Grafana panel `Total requests / responses` with legend `Requests` shows:
| metrics | values |
@@ -122,8 +122,8 @@ Scenario: "Test Total http frontend requests by instance"
Scenario: "Test Total backend response errors by instance"
Given the following series:
| metrics | values |
- | haproxy_backend_response_errors_total{proxy="backend",instance="ingress.rgw.1"} | 10 100 |
- | haproxy_backend_response_errors_total{proxy="backend",instance="ingress.rgw.1"} | 20 200 |
+ | haproxy_backend_response_errors_total{job="haproxy",proxy="backend",instance="ingress.rgw.1"} | 10 100 |
+ | haproxy_backend_response_errors_total{job="haproxy",proxy="backend",instance="ingress.rgw.1"} | 20 200 |
When variable `ingress_service` is `ingress.rgw.1`
Then Grafana panel `Total requests / responses` with legend `Response errors` shows:
| metrics | values |
@@ -132,8 +132,8 @@ Scenario: "Test Total backend response errors by instance"
Scenario: "Test Total frontend requests errors by instance"
Given the following series:
| metrics | values |
- | haproxy_frontend_request_errors_total{proxy="frontend",instance="ingress.rgw.1"} | 10 100 |
- | haproxy_frontend_request_errors_total{proxy="frontend",instance="ingress.rgw.1"} | 20 200 |
+ | haproxy_frontend_request_errors_total{job="haproxy",proxy="frontend",instance="ingress.rgw.1"} | 10 100 |
+ | haproxy_frontend_request_errors_total{job="haproxy",proxy="frontend",instance="ingress.rgw.1"} | 20 200 |
When variable `ingress_service` is `ingress.rgw.1`
Then Grafana panel `Total requests / responses` with legend `Requests errors` shows:
| metrics | values |
@@ -142,8 +142,8 @@ Scenario: "Test Total frontend requests errors by instance"
Scenario: "Test Total backend redispatch warnings by instance"
Given the following series:
| metrics | values |
- | haproxy_backend_redispatch_warnings_total{proxy="backend",instance="ingress.rgw.1"} | 10 100 |
- | haproxy_backend_redispatch_warnings_total{proxy="backend",instance="ingress.rgw.1"} | 20 200 |
+ | haproxy_backend_redispatch_warnings_total{job="haproxy",proxy="backend",instance="ingress.rgw.1"} | 10 100 |
+ | haproxy_backend_redispatch_warnings_total{job="haproxy",proxy="backend",instance="ingress.rgw.1"} | 20 200 |
When variable `ingress_service` is `ingress.rgw.1`
Then Grafana panel `Total requests / responses` with legend `Backend redispatch` shows:
| metrics | values |
@@ -152,8 +152,8 @@ Scenario: "Test Total backend redispatch warnings by instance"
Scenario: "Test Total backend retry warnings by instance"
Given the following series:
| metrics | values |
- | haproxy_backend_retry_warnings_total{proxy="backend",instance="ingress.rgw.1"} | 10 100 |
- | haproxy_backend_retry_warnings_total{proxy="backend",instance="ingress.rgw.1"} | 20 200 |
+ | haproxy_backend_retry_warnings_total{job="haproxy",proxy="backend",instance="ingress.rgw.1"} | 10 100 |
+ | haproxy_backend_retry_warnings_total{job="haproxy",proxy="backend",instance="ingress.rgw.1"} | 20 200 |
When variable `ingress_service` is `ingress.rgw.1`
Then Grafana panel `Total requests / responses` with legend `Backend retry` shows:
| metrics | values |
@@ -162,8 +162,8 @@ Scenario: "Test Total backend retry warnings by instance"
Scenario: "Test Total frontend requests denied by instance"
Given the following series:
| metrics | values |
- | haproxy_frontend_requests_denied_total{proxy="frontend",instance="ingress.rgw.1"} | 10 100 |
- | haproxy_frontend_requests_denied_total{proxy="frontend",instance="ingress.rgw.1"} | 20 200 |
+ | haproxy_frontend_requests_denied_total{job="haproxy",proxy="frontend",instance="ingress.rgw.1"} | 10 100 |
+ | haproxy_frontend_requests_denied_total{job="haproxy",proxy="frontend",instance="ingress.rgw.1"} | 20 200 |
When variable `ingress_service` is `ingress.rgw.1`
Then Grafana panel `Total requests / responses` with legend `Request denied` shows:
| metrics | values |
@@ -172,8 +172,8 @@ Scenario: "Test Total frontend requests denied by instance"
Scenario: "Test Total backend current queue by instance"
Given the following series:
| metrics | values |
- | haproxy_backend_current_queue{proxy="backend",instance="ingress.rgw.1"} | 10 100 |
- | haproxy_backend_current_queue{proxy="backend",instance="ingress.rgw.1"} | 20 200 |
+ | haproxy_backend_current_queue{job="haproxy",proxy="backend",instance="ingress.rgw.1"} | 10 100 |
+ | haproxy_backend_current_queue{job="haproxy",proxy="backend",instance="ingress.rgw.1"} | 20 200 |
When variable `ingress_service` is `ingress.rgw.1`
Then Grafana panel `Total requests / responses` with legend `Backend Queued` shows:
| metrics | values |
@@ -182,8 +182,8 @@ Scenario: "Test Total backend current queue by instance"
Scenario: "Test Total frontend connections by instance"
Given the following series:
| metrics | values |
- | haproxy_frontend_connections_total{proxy="frontend",instance="ingress.rgw.1"} | 10 100 |
- | haproxy_frontend_connections_total{proxy="frontend",instance="ingress.rgw.1"} | 20 200 |
+ | haproxy_frontend_connections_total{job="haproxy",proxy="frontend",instance="ingress.rgw.1"} | 10 100 |
+ | haproxy_frontend_connections_total{job="haproxy",proxy="frontend",instance="ingress.rgw.1"} | 20 200 |
When variable `ingress_service` is `ingress.rgw.1`
Then Grafana panel `Total number of connections` with legend `Front` shows:
| metrics | values |
@@ -192,8 +192,8 @@ Scenario: "Test Total frontend connections by instance"
Scenario: "Test Total backend connections attempts by instance"
Given the following series:
| metrics | values |
- | haproxy_backend_connection_attempts_total{proxy="backend",instance="ingress.rgw.1"} | 10 100 |
- | haproxy_backend_connection_attempts_total{proxy="backend",instance="ingress.rgw.1"} | 20 200 |
+ | haproxy_backend_connection_attempts_total{job="haproxy",proxy="backend",instance="ingress.rgw.1"} | 10 100 |
+ | haproxy_backend_connection_attempts_total{job="haproxy",proxy="backend",instance="ingress.rgw.1"} | 20 200 |
When variable `ingress_service` is `ingress.rgw.1`
Then Grafana panel `Total number of connections` with legend `Back` shows:
| metrics | values |
@@ -202,8 +202,8 @@ Scenario: "Test Total backend connections attempts by instance"
Scenario: "Test Total backend connections error by instance"
Given the following series:
| metrics | values |
- | haproxy_backend_connection_errors_total{proxy="backend",instance="ingress.rgw.1"} | 10 100 |
- | haproxy_backend_connection_errors_total{proxy="backend",instance="ingress.rgw.1"} | 20 200 |
+ | haproxy_backend_connection_errors_total{job="haproxy",proxy="backend",instance="ingress.rgw.1"} | 10 100 |
+ | haproxy_backend_connection_errors_total{job="haproxy",proxy="backend",instance="ingress.rgw.1"} | 20 200 |
When variable `ingress_service` is `ingress.rgw.1`
Then Grafana panel `Total number of connections` with legend `Back errors` shows:
| metrics | values |
@@ -212,8 +212,8 @@ Scenario: "Test Total backend connections error by instance"
Scenario: "Test Total frontend bytes incoming by instance"
Given the following series:
| metrics | values |
- | haproxy_frontend_bytes_in_total{proxy="frontend",instance="ingress.rgw.1"} | 10 100 |
- | haproxy_frontend_bytes_in_total{proxy="frontend",instance="ingress.rgw.1"} | 20 200 |
+ | haproxy_frontend_bytes_in_total{job="haproxy",proxy="frontend",instance="ingress.rgw.1"} | 10 100 |
+ | haproxy_frontend_bytes_in_total{job="haproxy",proxy="frontend",instance="ingress.rgw.1"} | 20 200 |
When variable `ingress_service` is `ingress.rgw.1`
Then Grafana panel `Current total of incoming / outgoing bytes` with legend `IN Front` shows:
| metrics | values |
@@ -222,8 +222,8 @@ Scenario: "Test Total frontend bytes incoming by instance"
Scenario: "Test Total frontend bytes outgoing by instance"
Given the following series:
| metrics | values |
- | haproxy_frontend_bytes_out_total{proxy="frontend",instance="ingress.rgw.1"} | 10 100 |
- | haproxy_frontend_bytes_out_total{proxy="frontend",instance="ingress.rgw.1"} | 20 200 |
+ | haproxy_frontend_bytes_out_total{job="haproxy",proxy="frontend",instance="ingress.rgw.1"} | 10 100 |
+ | haproxy_frontend_bytes_out_total{job="haproxy",proxy="frontend",instance="ingress.rgw.1"} | 20 200 |
When variable `ingress_service` is `ingress.rgw.1`
Then Grafana panel `Current total of incoming / outgoing bytes` with legend `OUT Front` shows:
| metrics | values |
@@ -232,8 +232,8 @@ Scenario: "Test Total frontend bytes outgoing by instance"
Scenario: "Test Total backend bytes incoming by instance"
Given the following series:
| metrics | values |
- | haproxy_backend_bytes_in_total{proxy="backend",instance="ingress.rgw.1"} | 10 100 |
- | haproxy_backend_bytes_in_total{proxy="backend",instance="ingress.rgw.1"} | 20 200 |
+ | haproxy_backend_bytes_in_total{job="haproxy",proxy="backend",instance="ingress.rgw.1"} | 10 100 |
+ | haproxy_backend_bytes_in_total{job="haproxy",proxy="backend",instance="ingress.rgw.1"} | 20 200 |
When variable `ingress_service` is `ingress.rgw.1`
Then Grafana panel `Current total of incoming / outgoing bytes` with legend `IN Back` shows:
| metrics | values |
@@ -242,8 +242,8 @@ Scenario: "Test Total backend bytes incoming by instance"
Scenario: "Test Total backend bytes outgoing by instance"
Given the following series:
| metrics | values |
- | haproxy_backend_bytes_out_total{proxy="backend",instance="ingress.rgw.1"} | 10 100 |
- | haproxy_backend_bytes_out_total{proxy="backend",instance="ingress.rgw.1"} | 20 200 |
+ | haproxy_backend_bytes_out_total{job="haproxy",proxy="backend",instance="ingress.rgw.1"} | 10 100 |
+ | haproxy_backend_bytes_out_total{job="haproxy",proxy="backend",instance="ingress.rgw.1"} | 20 200 |
When variable `ingress_service` is `ingress.rgw.1`
Then Grafana panel `Current total of incoming / outgoing bytes` with legend `OUT Back` shows:
| metrics | values |
diff --git a/monitoring/ceph-mixin/tests_dashboards/util.py b/monitoring/ceph-mixin/tests_dashboards/util.py
index 4310eb207f0..1fce6559dfb 100644
--- a/monitoring/ceph-mixin/tests_dashboards/util.py
+++ b/monitoring/ceph-mixin/tests_dashboards/util.py
@@ -30,6 +30,7 @@ def get_dashboards_data() -> Dict[str, Any]:
data['stats'][str(file)] = {'total': 0, 'tested': 0}
add_dashboard_queries(data, dashboard_data, str(file))
add_dashboard_variables(data, dashboard_data)
+ add_default_dashboards_variables(data)
return data
@@ -76,6 +77,10 @@ def add_dashboard_variables(data: Dict[str, Any], dashboard_data: Dict[str, Any]
if 'name' in variable:
data['variables'][variable['name']] = 'UNSET VARIABLE'
+def add_default_dashboards_variables(data: Dict[str, Any]) -> None:
+ data['variables']['job'] = 'ceph'
+ data['variables']['job_haproxy'] = 'haproxy'
+ data['variables']['__rate_interval'] = '1m'
def replace_grafana_expr_variables(expr: str, variable: str, value: Any) -> str:
""" Replace grafana variables in expression with a value