summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVladimír Čunát <vladimir.cunat@nic.cz>2024-06-14 11:12:16 +0200
committerVladimír Čunát <vladimir.cunat@nic.cz>2024-06-14 11:12:16 +0200
commitcce4acea89d6ae08dfc79e9c93b66b2a09834abd (patch)
treef4aa998704d2347eef2bbd9697fff3d2c0c23956
parentdefer: add new KRU instance and async queues (diff)
downloadknot-resolver-cce4acea89d6ae08dfc79e9c93b66b2a09834abd.tar.xz
knot-resolver-cce4acea89d6ae08dfc79e9c93b66b2a09834abd.zip
treewide: utilize _Alignas, as it's standard C11
-rw-r--r--daemon/defer.c6
-rw-r--r--daemon/ratelimiting.c4
-rw-r--r--daemon/ratelimiting.test/tests.inc.c2
-rw-r--r--lib/generic/lru.h11
-rw-r--r--lib/kru.h8
-rw-r--r--lib/kru.inc.c5
6 files changed, 14 insertions, 22 deletions
diff --git a/daemon/defer.c b/daemon/defer.c
index 5d0ad222..7c09751c 100644
--- a/daemon/defer.c
+++ b/daemon/defer.c
@@ -24,7 +24,7 @@ struct defer {
size_t capacity;
kru_price_t max_decay;
bool using_avx2;
- uint8_t kru[] ALIGNED(64);
+ _Alignas(64) uint8_t kru[];
};
struct defer *defer = NULL;
struct mmapped defer_mmapped = {0};
@@ -46,7 +46,7 @@ static bool using_avx2(void)
/// Increment KRU counters by given time.
void defer_account(uint64_t nsec, union kr_sockaddr addr) {
- uint8_t key[16] ALIGNED(16) = {0, };
+ _Alignas(16) uint8_t key[16] = {0, };
uint16_t max_load = 0;
if (defer_sample_state.addr.ip.sa_family == AF_INET6) {
struct sockaddr_in6 *ipv6 = (struct sockaddr_in6 *)&defer_sample_state.addr.ip;
@@ -86,7 +86,7 @@ static enum protolayer_iter_cb_result pl_defer_unwrap(
defer_sample_addr((const union kr_sockaddr *)ctx->comm->comm_addr);
- uint8_t key[16] ALIGNED(16) = {0, };
+ _Alignas(16) uint8_t key[16] = {0, };
uint16_t max_load = 0;
if (ctx->comm->comm_addr->sa_family == AF_INET6) {
struct sockaddr_in6 *ipv6 = (struct sockaddr_in6 *)ctx->comm->comm_addr;
diff --git a/daemon/ratelimiting.c b/daemon/ratelimiting.c
index e6b13430..cd15dc8c 100644
--- a/daemon/ratelimiting.c
+++ b/daemon/ratelimiting.c
@@ -22,7 +22,7 @@ struct ratelimiting {
bool using_avx2;
kru_price_t v4_prices[V4_PREFIXES_CNT];
kru_price_t v6_prices[V6_PREFIXES_CNT];
- uint8_t kru[] ALIGNED(64);
+ _Alignas(64) uint8_t kru[];
};
struct ratelimiting *ratelimiting = NULL;
struct mmapped ratelimiting_mmapped = {0};
@@ -107,7 +107,7 @@ bool ratelimiting_request_begin(struct kr_request *req)
return false; // don't consider internal requests
uint8_t limited = 0; // 0: not limited, 1: truncated, 2: no answer
if (ratelimiting) {
- uint8_t key[16] ALIGNED(16) = {0, };
+ _Alignas(16) uint8_t key[16] = {0, };
uint8_t limited_prefix;
uint16_t max_final_load = 0;
if (req->qsource.addr->sa_family == AF_INET6) {
diff --git a/daemon/ratelimiting.test/tests.inc.c b/daemon/ratelimiting.test/tests.inc.c
index 157da558..467473c0 100644
--- a/daemon/ratelimiting.test/tests.inc.c
+++ b/daemon/ratelimiting.test/tests.inc.c
@@ -68,7 +68,7 @@ struct kru_generic {
// ...
};
struct kru_avx2 {
- char hash_key[48] ALIGNED(32);
+ _Alignas(32) char hash_key[48];
// ...
};
diff --git a/lib/generic/lru.h b/lib/generic/lru.h
index 1c1dd81a..b78888fc 100644
--- a/lib/generic/lru.h
+++ b/lib/generic/lru.h
@@ -172,11 +172,7 @@ enum lru_apply_do {
enum lru_apply_do (*(name))(const char *key, uint len, val_type *val, void *baton)
typedef lru_apply_fun_g(lru_apply_fun, void);
-#if __GNUC__ >= 4
- #define CACHE_ALIGNED __attribute__((aligned(64)))
-#else
- #define CACHE_ALIGNED
-#endif
+#define CACHE_ALIGNED _Alignas(64)
struct lru;
void lru_free_items_impl(struct lru *lru);
@@ -198,10 +194,11 @@ struct lru_item;
#define LRU_TRACKED ((64 - sizeof(size_t) * LRU_ASSOC) / 4 - 1)
struct lru_group {
+ CACHE_ALIGNED
uint16_t counts[LRU_TRACKED+1]; /*!< Occurrence counters; the last one is special. */
uint16_t hashes[LRU_TRACKED+1]; /*!< Top halves of hashes; the last one is unused. */
struct lru_item *items[LRU_ASSOC]; /*!< The full items. */
-} CACHE_ALIGNED;
+};
/* The sizes are chosen so lru_group just fits into a single x86 cache line. */
static_assert(64 == sizeof(struct lru_group)
@@ -213,7 +210,7 @@ struct lru {
*mm_array; /**< Memory context to use for this structure itself. */
uint log_groups; /**< Logarithm of the number of LRU groups. */
uint val_alignment; /**< Alignment for the values. */
- struct lru_group groups[] CACHE_ALIGNED; /**< The groups of items. */
+ CACHE_ALIGNED struct lru_group groups[]; /**< The groups of items. */
};
/** @internal See lru_free. */
diff --git a/lib/kru.h b/lib/kru.h
index a443e419..0a3a8e80 100644
--- a/lib/kru.h
+++ b/lib/kru.h
@@ -20,13 +20,7 @@
#include <stddef.h>
#include <stdint.h>
-#if __GNUC__ >= 4 || __clang_major__ >= 4
- #define ALIGNED_CPU_CACHE __attribute__((aligned(64)))
- #define ALIGNED(_bytes) __attribute__((aligned(_bytes)))
-#else
- #define ALIGNED_CPU_CACHE
- #define ALIGNED(_bytes)
-#endif
+#define ALIGNED_CPU_CACHE _Alignas(64)
// An unsigned integral type used for prices, blocking occurs when sum of prices overflows.
// Greater than 16-bit type enables randomized fractional incrementing as the internal counters are still 16-bit.
diff --git a/lib/kru.inc.c b/lib/kru.inc.c
index 9909cfd7..b67d3237 100644
--- a/lib/kru.inc.c
+++ b/lib/kru.inc.c
@@ -74,11 +74,12 @@ typedef uint64_t hash_t;
/// Block of loads sharing the same time, so that we're more space-efficient.
/// It's exactly a single cache line.
struct load_cl {
+ ALIGNED_CPU_CACHE
_Atomic uint32_t time;
#define LOADS_LEN 15
uint16_t ids[LOADS_LEN];
uint16_t loads[LOADS_LEN];
-} ALIGNED_CPU_CACHE;
+};
static_assert(64 == sizeof(struct load_cl), "bad size of struct load_cl");
/// Parametrization for speed of decay.
@@ -96,7 +97,7 @@ struct kru {
#if USE_AES
/// Hashing secret. Random but shared by all users of the table.
/// Let's not make it too large, so that header fits into 64 Bytes.
- char hash_key[48] ALIGNED(32);
+ _Alignas(32) char hash_key[48];
#else
/// Hashing secret. Random but shared by all users of the table.
SIPHASH_KEY hash_key;