summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--src/contrib/atomic.h78
-rw-r--r--src/knot/catalog/catalog_update.c2
-rw-r--r--src/knot/ctl/commands.c8
-rw-r--r--src/knot/dnssec/zone-events.c6
-rw-r--r--src/knot/events/handlers/backup.c4
-rw-r--r--src/knot/modules/cookies/cookies.c26
-rw-r--r--src/knot/modules/probe/probe.c6
-rw-r--r--src/knot/nameserver/query_module.c13
-rw-r--r--src/knot/server/server.c2
-rw-r--r--src/knot/updates/zone-update.c2
-rw-r--r--src/knot/zone/backup.c2
-rw-r--r--src/knot/zone/contents.c3
-rw-r--r--src/knot/zone/zone.c4
-rw-r--r--src/libknot/quic/quic_conn.c7
-rw-r--r--src/libknot/quic/tls_common.c9
-rw-r--r--src/utils/knotd/main.c4
-rw-r--r--src/utils/kxdpgun/main.c8
-rw-r--r--tests/contrib/test_atomic.c28
18 files changed, 152 insertions, 60 deletions
diff --git a/src/contrib/atomic.h b/src/contrib/atomic.h
index f564d2bdb..b8c34dd18 100644
--- a/src/contrib/atomic.h
+++ b/src/contrib/atomic.h
@@ -21,10 +21,10 @@
#pragma once
#ifdef HAVE_C11_ATOMIC /* C11 */
- #define KNOT_HAVE_ATOMIC
-
#include <stdatomic.h>
+ #define ATOMIC_INIT(dst, val) atomic_store_explicit(&(dst), (val), memory_order_relaxed)
+ #define ATOMIC_DEINIT(dst)
#define ATOMIC_SET(dst, val) atomic_store_explicit(&(dst), (val), memory_order_relaxed)
#define ATOMIC_GET(src) atomic_load_explicit(&(src), memory_order_relaxed)
#define ATOMIC_ADD(dst, val) (void)atomic_fetch_add_explicit(&(dst), (val), memory_order_relaxed)
@@ -37,12 +37,12 @@
typedef _Atomic (void *) knot_atomic_ptr_t;
typedef atomic_bool knot_atomic_bool;
#elif defined(HAVE_GCC_ATOMIC) /* GCC __atomic */
- #define KNOT_HAVE_ATOMIC
-
#include <stdint.h>
#include <stdbool.h>
#include <stddef.h>
+ #define ATOMIC_INIT(dst, val) __atomic_store_n(&(dst), (val), __ATOMIC_RELAXED)
+ #define ATOMIC_DEINIT(dst)
#define ATOMIC_SET(dst, val) __atomic_store_n(&(dst), (val), __ATOMIC_RELAXED)
#define ATOMIC_GET(src) __atomic_load_n(&(src), __ATOMIC_RELAXED)
#define ATOMIC_ADD(dst, val) __atomic_add_fetch(&(dst), (val), __ATOMIC_RELAXED)
@@ -54,22 +54,66 @@
typedef size_t knot_atomic_size_t;
typedef void* knot_atomic_ptr_t;
typedef bool knot_atomic_bool;
-#else /* Fallback, non-atomic. */
- #warning "Atomic operations not availabe, using unreliable replacement."
-
+#else /* Fallback using spinlocks. Much slower. */
#include <stdint.h>
#include <stdbool.h>
#include <stddef.h>
- #define ATOMIC_SET(dst, val) ((dst) = (val))
- #define ATOMIC_GET(src) (src)
- #define ATOMIC_ADD(dst, val) ((dst) += (val))
- #define ATOMIC_SUB(dst, val) ((dst) -= (val))
- #define ATOMIC_XCHG(dst, val) ({ __typeof__ (dst) _z = (dst); (dst) = (val); _z; })
+ #include "contrib/spinlock.h"
- typedef uint16_t knot_atomic_uint16_t;
- typedef uint64_t knot_atomic_uint64_t;
- typedef size_t knot_atomic_size_t;
- typedef void* knot_atomic_ptr_t;
- typedef bool knot_atomic_bool;
+ #define ATOMIC_SET(dst, val) ({ \
+ knot_spin_lock((knot_spin_t *)&(dst).lock); \
+ (dst).value.vol = (val); \
+ knot_spin_unlock((knot_spin_t *)&(dst).lock); \
+ })
+
+ #define ATOMIC_INIT(dst, val) ({ \
+ knot_spin_init((knot_spin_t *)&(dst).lock); \
+ ATOMIC_SET(dst, val); \
+ })
+
+ #define ATOMIC_DEINIT(dst) ({ \
+ knot_spin_destroy((knot_spin_t *)&(dst).lock); \
+ })
+
+ #define ATOMIC_GET(src) ({ \
+ knot_spin_lock((knot_spin_t *)&(src).lock); \
+ typeof((src).value.non_vol) _z = (typeof((src).value.non_vol))(src).value.vol; \
+ knot_spin_unlock((knot_spin_t *)&(src).lock); \
+ _z; \
+ })
+
+ #define ATOMIC_ADD(dst, val) ({ \
+ knot_spin_lock((knot_spin_t *)&(dst).lock); \
+ (dst).value.vol += (val); \
+ knot_spin_unlock((knot_spin_t *)&(dst).lock); \
+ })
+
+ #define ATOMIC_SUB(dst, val) ({ \
+ knot_spin_lock((knot_spin_t *)&(dst).lock); \
+ (dst).value.vol -= (val); \
+ knot_spin_unlock((knot_spin_t *)&(dst).lock); \
+ })
+
+ #define ATOMIC_XCHG(dst, val) ({ \
+ knot_spin_lock((knot_spin_t *)&(dst).lock); \
+ typeof((dst).value.non_vol) _z = (typeof((dst).value.non_vol))(dst).value.vol; \
+ (dst).value.vol = (val); \
+ knot_spin_unlock((knot_spin_t *)&(dst).lock); \
+ _z; \
+ })
+
+ #define ATOMIC_T(x) struct { \
+ knot_spin_t lock; \
+ union { \
+ volatile x vol; \
+ x non_vol; \
+ } value; \
+ }
+
+ typedef ATOMIC_T(uint16_t) knot_atomic_uint16_t;
+ typedef ATOMIC_T(uint64_t) knot_atomic_uint64_t;
+ typedef ATOMIC_T(size_t) knot_atomic_size_t;
+ typedef ATOMIC_T(void*) knot_atomic_ptr_t;
+ typedef ATOMIC_T(bool) knot_atomic_bool;
#endif
diff --git a/src/knot/catalog/catalog_update.c b/src/knot/catalog/catalog_update.c
index 71b7dbe0f..a4470225b 100644
--- a/src/knot/catalog/catalog_update.c
+++ b/src/knot/catalog/catalog_update.c
@@ -405,7 +405,7 @@ int catalog_zone_purge(server_t *server, conf_t *conf, const knot_dname_t *zone)
int ret = catalog_update_del_all(&server->catalog_upd, &server->catalog, zone, &members);
if (ret == KNOT_EOK && members > 0) {
log_zone_info(zone, "catalog zone purged, %zd member zones deconfigured", members);
- server->catalog_upd_signal = true;
+ ATOMIC_SET(server->catalog_upd_signal, true);
if (kill(getpid(), SIGUSR1) != 0) {
ret = knot_map_errno();
}
diff --git a/src/knot/ctl/commands.c b/src/knot/ctl/commands.c
index 2bc271c7f..8879faa77 100644
--- a/src/knot/ctl/commands.c
+++ b/src/knot/ctl/commands.c
@@ -662,7 +662,7 @@ static int zone_backup_cmd(zone_t *zone, ctl_args_t *args)
int ret = KNOT_EOK;
pthread_mutex_lock(&zone->cu_lock);
- if (zone->backup_ctx != NULL) {
+ if (ATOMIC_GET(zone->backup_ctx) != NULL) {
log_zone_warning(zone->name, "backup or restore already in progress, skipping zone");
ctx->failed = true;
ret = KNOT_EPROGRESS;
@@ -675,7 +675,7 @@ static int zone_backup_cmd(zone_t *zone, ctl_args_t *args)
}
if (ret == KNOT_EOK) {
- zone->backup_ctx = ctx;
+ ATOMIC_SET(zone->backup_ctx, ctx);
}
pthread_mutex_unlock(&zone->cu_lock);
@@ -699,7 +699,7 @@ static int zone_backup_cmd(zone_t *zone, ctl_args_t *args)
}
if (ret != KNOT_EOK || finish) {
- zone->backup_ctx = NULL;
+ ATOMIC_SET(zone->backup_ctx, NULL);
return ret;
}
@@ -874,7 +874,7 @@ static int zone_txn_begin_l(zone_t *zone, _unused_ ctl_args_t *args)
return KNOT_TXN_EEXISTS;
}
- struct zone_backup_ctx *backup_ctx = zone->backup_ctx;
+ struct zone_backup_ctx *backup_ctx = ATOMIC_GET(zone->backup_ctx);
if (backup_ctx != NULL && backup_ctx->restore_mode) {
log_zone_warning(zone->name, "zone restore pending, try opening control transaction later");
return KNOT_EAGAIN;
diff --git a/src/knot/dnssec/zone-events.c b/src/knot/dnssec/zone-events.c
index 811b3dc14..87acb32fd 100644
--- a/src/knot/dnssec/zone-events.c
+++ b/src/knot/dnssec/zone-events.c
@@ -317,7 +317,7 @@ done:
if (result == KNOT_EOK) {
reschedule->next_sign = schedule_next(&ctx, &keyset, ctx.offline_next_time, ctx.stats->expire);
reschedule->plan_dnskey_sync = ctx.policy->has_dnskey_sync;
- update->new_cont->dnssec_expire = ctx.stats->expire;
+ ATOMIC_SET(update->new_cont->dnssec_expire, ctx.stats->expire);
update->flags |= UPDATE_SIGNED_FULL;
} else {
reschedule->next_sign = knot_dnssec_failover_delay(&ctx);
@@ -457,7 +457,9 @@ done:
if (ctx.policy->has_dnskey_sync) {
zone_events_schedule_now(update->zone, ZONE_EVENT_DNSKEY_SYNC);
}
- update->new_cont->dnssec_expire = knot_time_min(update->zone->contents->dnssec_expire, ctx.stats->expire);
+ ATOMIC_SET(update->new_cont->dnssec_expire,
+ (uint64_t)knot_time_min(ATOMIC_GET(update->zone->contents->dnssec_expire),
+ ctx.stats->expire));
}
free_zone_keys(&keyset);
diff --git a/src/knot/events/handlers/backup.c b/src/knot/events/handlers/backup.c
index bbafe7e16..f267ea49a 100644
--- a/src/knot/events/handlers/backup.c
+++ b/src/knot/events/handlers/backup.c
@@ -26,7 +26,7 @@ int event_backup(conf_t *conf, zone_t *zone)
{
assert(zone);
- zone_backup_ctx_t *ctx = zone->backup_ctx;
+ zone_backup_ctx_t *ctx = ATOMIC_GET(zone->backup_ctx);
if (ctx == NULL) {
return KNOT_EINVAL;
}
@@ -66,6 +66,6 @@ int event_backup(conf_t *conf, zone_t *zone)
done:
ret_deinit = zone_backup_deinit(ctx);
- zone->backup_ctx = NULL;
+ ATOMIC_SET(zone->backup_ctx, NULL);
return (ret != KNOT_EOK) ? ret : ret_deinit;
}
diff --git a/src/knot/modules/cookies/cookies.c b/src/knot/modules/cookies/cookies.c
index d5508ac81..218973df5 100644
--- a/src/knot/modules/cookies/cookies.c
+++ b/src/knot/modules/cookies/cookies.c
@@ -248,8 +248,11 @@ int cookies_load(knotd_mod_t *mod)
return KNOT_ENOMEM;
}
- // Initialize BADCOOKIE counter.
- ctx->badcookie_ctr = BADCOOKIE_CTR_INIT;
+ // Initialize atomic variables.
+ ATOMIC_INIT(ctx->badcookie_ctr, BADCOOKIE_CTR_INIT);
+ for (int i = 0; i < 2; ++i) {
+ ATOMIC_INIT(ctx->secret[i].variable, 0);
+ }
// Set up configurable items.
knotd_conf_t conf = knotd_conf_mod(mod, MOD_BADCOOKIE_SLIP);
@@ -276,16 +279,22 @@ int cookies_load(knotd_mod_t *mod)
ctx->secret_cnt = conf.count;
for (int i = 0; i < ctx->secret_cnt; ++i) {
assert(conf.multi[i].data_len == KNOT_EDNS_COOKIE_SECRET_SIZE);
- memcpy(&ctx->secret[i], conf.multi[i].data, conf.multi[i].data_len);
+ uint64_t conf_secret[2];
+ memcpy(conf_secret, conf.multi[i].data, conf.multi[i].data_len);
+ ATOMIC_SET(ctx->secret[i].variable, conf_secret[0]);
+ ctx->secret[i].constant = conf_secret[1];
assert(ctx->secret_lifetime == 0);
}
knotd_conf_free(&conf);
if (ctx->secret_cnt == 0) {
- ret = dnssec_random_buffer((uint8_t *)&ctx->secret[0], sizeof(ctx->secret[0]));
+ uint64_t gen_secret[2];
+ ret = dnssec_random_buffer((uint8_t *)gen_secret, sizeof(gen_secret));
if (ret != KNOT_EOK) {
free(ctx);
return ret;
}
+ ATOMIC_SET(ctx->secret[0].variable, gen_secret[0]);
+ ctx->secret[0].constant = gen_secret[1];
ctx->secret_cnt = 1;
conf = knotd_conf_mod(mod, MOD_SECRET_LIFETIME);
@@ -299,11 +308,6 @@ int cookies_load(knotd_mod_t *mod)
}
}
-#ifndef KNOT_HAVE_ATOMIC
- knotd_mod_log(mod, LOG_WARNING, "the module might work slightly wrong on this platform");
- ctx->badcookie_slip = 1;
-#endif
-
return knotd_mod_hook(mod, KNOTD_STAGE_BEGIN, cookies_process);
}
@@ -314,6 +318,10 @@ void cookies_unload(knotd_mod_t *mod)
(void)pthread_cancel(ctx->update_secret);
(void)pthread_join(ctx->update_secret, NULL);
}
+ ATOMIC_DEINIT(ctx->badcookie_ctr);
+ for (int i = 0; i < 2; ++i) {
+ ATOMIC_DEINIT(ctx->secret[i].variable);
+ }
memzero(&ctx->secret, sizeof(ctx->secret));
free(ctx);
}
diff --git a/src/knot/modules/probe/probe.c b/src/knot/modules/probe/probe.c
index 3e0a646b5..f9eb177ac 100644
--- a/src/knot/modules/probe/probe.c
+++ b/src/knot/modules/probe/probe.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2023 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>
+/* Copyright (C) 2024 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -47,6 +47,7 @@ static void free_probe_ctx(probe_ctx_t *ctx)
{
for (int i = 0; ctx->probes != NULL && i < ctx->probe_count; ++i) {
knot_probe_free(ctx->probes[i]);
+ ATOMIC_DEINIT(ctx->last_times[i]);
}
free(ctx->probes);
free(ctx->last_times);
@@ -137,6 +138,9 @@ int probe_load(knotd_mod_t *mod)
free_probe_ctx(ctx);
return KNOT_ENOMEM;
}
+ for (int i = 0; i < ctx->probe_count; i++) {
+ ATOMIC_INIT(ctx->last_times[i], 0);
+ }
ctx->min_diff_ns = 0;
conf = knotd_conf_mod(mod, MOD_MAX_RATE);
diff --git a/src/knot/nameserver/query_module.c b/src/knot/nameserver/query_module.c
index 0708b1704..8b7d7958f 100644
--- a/src/knot/nameserver/query_module.c
+++ b/src/knot/nameserver/query_module.c
@@ -284,6 +284,10 @@ int knotd_mod_stats_add(knotd_mod_t *mod, const char *ctr_name, uint32_t idx_cou
knotd_mod_stats_free(mod);
return KNOT_ENOMEM;
}
+
+ for (unsigned j = 0; j < idx_count; j++) {
+ ATOMIC_INIT(mod->stats_vals[i][j], 0);
+ }
}
} else {
for (uint32_t i = 0; i < mod->stats_count; i++) {
@@ -311,9 +315,9 @@ int knotd_mod_stats_add(knotd_mod_t *mod, const char *ctr_name, uint32_t idx_cou
return KNOT_ENOMEM;
}
mod->stats_vals[i] = new_vals;
- new_vals += offset;
- for (uint32_t j = 0; j < idx_count; j++) {
- *new_vals++ = 0;
+
+ for (unsigned j = 0; j < idx_count; j++) {
+ ATOMIC_INIT(mod->stats_vals[i][offset + j], 0);
}
}
}
@@ -338,6 +342,9 @@ void knotd_mod_stats_free(knotd_mod_t *mod)
if (mod->stats_vals != NULL) {
unsigned threads = knotd_mod_threads(mod);
for (unsigned i = 0; i < threads; i++) {
+ for (unsigned j = 0; j < mod->stats_info->count; j++) {
+ ATOMIC_DEINIT(mod->stats_vals[i][j]);
+ }
free(mod->stats_vals[i]);
}
}
diff --git a/src/knot/server/server.c b/src/knot/server/server.c
index 627ebd281..40c873fbd 100644
--- a/src/knot/server/server.c
+++ b/src/knot/server/server.c
@@ -849,6 +849,7 @@ int server_init(server_t *server, int bg_workers)
evsched_deinit(&server->sched);
return ret;
}
+ ATOMIC_INIT(server->catalog_upd_signal, false);
pthread_rwlock_init(&server->ctl_lock, NULL);
@@ -913,6 +914,7 @@ void server_deinit(server_t *server)
catalog_update_clear(&server->catalog_upd);
catalog_update_deinit(&server->catalog_upd);
catalog_deinit(&server->catalog);
+ ATOMIC_DEINIT(server->catalog_upd_signal);
/* Close persistent timers DB. */
knot_lmdb_deinit(&server->timerdb);
diff --git a/src/knot/updates/zone-update.c b/src/knot/updates/zone-update.c
index 45c874ebc..0de40d53b 100644
--- a/src/knot/updates/zone-update.c
+++ b/src/knot/updates/zone-update.c
@@ -773,7 +773,7 @@ static int update_catalog(conf_t *conf, zone_update_t *update)
if (ret == KNOT_EOK) {
log_zone_info(update->zone->name, "enqueued %zd catalog updates", upd_count);
- update->zone->server->catalog_upd_signal = true;
+ ATOMIC_SET(update->zone->server->catalog_upd_signal, true);
if (kill(getpid(), SIGUSR1) != 0) {
ret = knot_map_errno();
}
diff --git a/src/knot/zone/backup.c b/src/knot/zone/backup.c
index 5c3038a50..5bf9b5903 100644
--- a/src/knot/zone/backup.c
+++ b/src/knot/zone/backup.c
@@ -444,7 +444,7 @@ static int backup_kaspdb(zone_backup_ctx_t *ctx, conf_t *conf, zone_t *zone,
int zone_backup(conf_t *conf, zone_t *zone)
{
- zone_backup_ctx_t *ctx = zone->backup_ctx;
+ zone_backup_ctx_t *ctx = ATOMIC_GET(zone->backup_ctx);
if (ctx == NULL) {
return KNOT_EINVAL;
}
diff --git a/src/knot/zone/contents.c b/src/knot/zone/contents.c
index 8a32cc87e..e2d00d4c5 100644
--- a/src/knot/zone/contents.c
+++ b/src/knot/zone/contents.c
@@ -192,6 +192,7 @@ zone_contents_t *zone_contents_new(const knot_dname_t *apex_name, bool use_binod
}
contents->apex->flags |= NODE_FLAGS_APEX;
contents->max_ttl = UINT32_MAX;
+ ATOMIC_INIT(contents->dnssec_expire, 0);
return contents;
@@ -522,6 +523,8 @@ void zone_contents_free(zone_contents_t *contents)
dnssec_nsec3_params_free(&contents->nsec3_params);
additionals_tree_free(contents->adds_tree);
+ ATOMIC_DEINIT(contents->dnssec_expire);
+
free(contents);
}
diff --git a/src/knot/zone/zone.c b/src/knot/zone/zone.c
index 1302dc9a5..16f645968 100644
--- a/src/knot/zone/zone.c
+++ b/src/knot/zone/zone.c
@@ -194,6 +194,8 @@ zone_t* zone_new(const knot_dname_t *name)
init_list(&zone->internal_notify);
+ ATOMIC_INIT(zone->backup_ctx, NULL);
+
return zone;
}
@@ -243,6 +245,8 @@ void zone_free(zone_t **zone_ptr)
ptrlist_free(&zone->internal_notify, NULL);
+ ATOMIC_DEINIT(zone->backup_ctx);
+
free(zone);
*zone_ptr = NULL;
}
diff --git a/src/libknot/quic/quic_conn.c b/src/libknot/quic/quic_conn.c
index 4cd9d03e3..85cce5849 100644
--- a/src/libknot/quic/quic_conn.c
+++ b/src/libknot/quic/quic_conn.c
@@ -60,6 +60,7 @@ knot_quic_table_t *knot_quic_table_new(size_t max_conns, size_t max_ibufs, size_
res->max_conns = max_conns;
res->ibufs_max = max_ibufs;
res->obufs_max = max_obufs;
+ ATOMIC_INIT(res->obufs_size, 0);
res->udp_payload_limit = udp_payload;
int ret = gnutls_priority_init2(&res->priority, KNOT_TLS_PRIORITIES, NULL,
@@ -99,7 +100,9 @@ void knot_quic_table_free(knot_quic_table_t *table)
assert(table->usage == 0);
assert(table->pointers == 0);
assert(table->ibufs_size == 0);
- assert(table->obufs_size == 0);
+ assert(ATOMIC_GET(table->obufs_size) == 0);
+
+ ATOMIC_DEINIT(table->obufs_size);
gnutls_priority_deinit(table->priority);
heap_deinit(table->expiry_heap);
@@ -134,7 +137,7 @@ void knot_quic_table_sweep(knot_quic_table_t *table, struct knot_quic_reply *swe
knot_sweep_stats_incr(stats, KNOT_SWEEP_CTR_LIMIT_CONN);
send_excessive_load(c, sweep_reply, table);
knot_quic_table_rem(c, table);
- } else if (table->obufs_size > table->obufs_max) {
+ } else if (ATOMIC_GET(table->obufs_size) > table->obufs_max) {
knot_sweep_stats_incr(stats, KNOT_SWEEP_CTR_LIMIT_OBUF);
send_excessive_load(c, sweep_reply, table);
knot_quic_table_rem(c, table);
diff --git a/src/libknot/quic/tls_common.c b/src/libknot/quic/tls_common.c
index a1158e5ed..45f02bc72 100644
--- a/src/libknot/quic/tls_common.c
+++ b/src/libknot/quic/tls_common.c
@@ -196,7 +196,7 @@ struct knot_creds *knot_creds_init_peer(const struct knot_creds *local_creds,
if (local_creds != NULL) {
creds->peer = true;
- creds->cert_creds = ATOMIC_GET(local_creds->cert_creds);
+ ATOMIC_INIT(creds->cert_creds, ATOMIC_GET(local_creds->cert_creds));
} else {
gnutls_certificate_credentials_t new_creds;
int ret = gnutls_certificate_allocate_credentials(&new_creds);
@@ -204,7 +204,7 @@ struct knot_creds *knot_creds_init_peer(const struct knot_creds *local_creds,
free(creds);
return NULL;
}
- creds->cert_creds = new_creds;
+ ATOMIC_INIT(creds->cert_creds, new_creds);
}
if (peer_pin_len > 0 && peer_pin != NULL) {
@@ -345,8 +345,9 @@ void knot_creds_free(struct knot_creds *creds)
return;
}
- if (!creds->peer && creds->cert_creds != NULL) {
- gnutls_certificate_free_credentials(creds->cert_creds);
+ if (!creds->peer && ATOMIC_GET(creds->cert_creds) != NULL) {
+ gnutls_certificate_free_credentials(ATOMIC_GET(creds->cert_creds));
+ ATOMIC_DEINIT(creds->cert_creds);
if (creds->cert_creds_prev != NULL) {
gnutls_certificate_free_credentials(creds->cert_creds_prev);
}
diff --git a/src/utils/knotd/main.c b/src/utils/knotd/main.c
index 55ac0f419..e96377b33 100644
--- a/src/utils/knotd/main.c
+++ b/src/utils/knotd/main.c
@@ -488,9 +488,9 @@ static void event_loop(server_t *server, const char *socket, bool daemonize,
}
if (sig_req_zones_reload && !sig_req_stop) {
sig_req_zones_reload = false;
- reload_t mode = server->catalog_upd_signal ? RELOAD_CATALOG : RELOAD_ZONES;
+ reload_t mode = ATOMIC_GET(server->catalog_upd_signal) ? RELOAD_CATALOG : RELOAD_ZONES;
pthread_rwlock_wrlock(&server->ctl_lock);
- server->catalog_upd_signal = false;
+ ATOMIC_SET(server->catalog_upd_signal, false);
server_update_zones(conf(), server, mode);
pthread_rwlock_unlock(&server->ctl_lock);
}
diff --git a/src/utils/kxdpgun/main.c b/src/utils/kxdpgun/main.c
index 3c036034a..8698adbcd 100644
--- a/src/utils/kxdpgun/main.c
+++ b/src/utils/kxdpgun/main.c
@@ -58,8 +58,8 @@
volatile int xdp_trigger = KXDPGUN_WAIT;
-volatile knot_atomic_uint64_t stats_trigger = 0;
-volatile knot_atomic_bool stats_switch = STATS_SUM;
+knot_atomic_uint64_t stats_trigger;
+knot_atomic_bool stats_switch;
unsigned global_cpu_aff_start = 0;
unsigned global_cpu_aff_step = 1;
@@ -1349,6 +1349,8 @@ static bool get_opts(int argc, char *argv[], xdp_gun_ctx_t *ctx)
int main(int argc, char *argv[])
{
int ecode = EXIT_FAILURE;
+ ATOMIC_INIT(stats_trigger, 0);
+ ATOMIC_INIT(stats_switch, STATS_SUM);
xdp_gun_ctx_t ctx = ctx_defaults, *thread_ctxs = NULL;
ctx.msgid = time(NULL) % UINT16_MAX;
@@ -1425,6 +1427,8 @@ int main(int argc, char *argv[])
ecode = EXIT_SUCCESS;
err:
+ ATOMIC_DEINIT(stats_trigger);
+ ATOMIC_DEINIT(stats_switch);
free(ctx.rss_conf);
free(thread_ctxs);
free(threads);
diff --git a/tests/contrib/test_atomic.c b/tests/contrib/test_atomic.c
index 88cae40c0..df531c385 100644
--- a/tests/contrib/test_atomic.c
+++ b/tests/contrib/test_atomic.c
@@ -30,10 +30,10 @@
#define UPPER_PTR ((void *) UPPER)
#define LOWER_PTR ((void *) LOWER)
-static volatile knot_atomic_uint64_t counter_add = 0;
-static volatile knot_atomic_uint64_t counter_sub = 0;
-static volatile knot_atomic_uint64_t atomic_var;
-static volatile knot_atomic_ptr_t atomic_var2;
+static knot_atomic_uint64_t counter_add;
+static knot_atomic_uint64_t counter_sub;
+static knot_atomic_uint64_t atomic_var;
+static knot_atomic_ptr_t atomic_var2;
static int errors = 0;
static int uppers;
static int lowers;
@@ -110,6 +110,11 @@ int main(int argc, char *argv[])
{
plan_lazy();
+ ATOMIC_INIT(counter_add, 0);
+ ATOMIC_INIT(counter_sub, 0);
+ ATOMIC_INIT(atomic_var, 0);
+ ATOMIC_INIT(atomic_var2, NULL);
+
// Register service and signal handler
struct sigaction sa;
sa.sa_handler = interrupt_handle;
@@ -123,8 +128,8 @@ int main(int argc, char *argv[])
dt_join(unit);
dt_delete(&unit);
- is_int(THREADS * CYCLES1 * 7, counter_add, "atomicity of ATOMIC_ADD");
- is_int(THREADS * CYCLES1 * 7, -counter_sub, "atomicity of ATOMIC_SUB");
+ is_int(THREADS * CYCLES1 * 7, ATOMIC_GET(counter_add), "atomicity of ATOMIC_ADD");
+ is_int(THREADS * CYCLES1 * 7, -ATOMIC_GET(counter_sub), "atomicity of ATOMIC_SUB");
// Test for atomicity of ATOMIC_SET and ATOMIC_GET.
unit = dt_create(THREADS, thread_set, NULL, NULL);
@@ -139,7 +144,7 @@ int main(int argc, char *argv[])
uppers = 0; // Initialize in code so as to calm down Coverity.
lowers = 0; // Idem.
- atomic_var2 = UPPER_PTR;
+ ATOMIC_SET(atomic_var2, UPPER_PTR);
uppers++;
pthread_mutex_init(&mx, NULL);
@@ -149,9 +154,9 @@ int main(int argc, char *argv[])
dt_delete(&unit);
pthread_mutex_destroy(&mx);
- if (atomic_var2 == UPPER_PTR) {
+ if (ATOMIC_GET(atomic_var2) == UPPER_PTR) {
uppers_count++;
- } else if (atomic_var2 == LOWER_PTR) {
+ } else if (ATOMIC_GET(atomic_var2) == LOWER_PTR) {
lowers_count++;
} else {
errors++;
@@ -161,5 +166,10 @@ int main(int argc, char *argv[])
is_int(uppers, uppers_count, "atomicity of ATOMIC_XCHG");
is_int(lowers, lowers_count, "atomicity of ATOMIC_XCHG");
+ ATOMIC_DEINIT(counter_add);
+ ATOMIC_DEINIT(counter_sub);
+ ATOMIC_DEINIT(atomic_var);
+ ATOMIC_DEINIT(atomic_var2);
+
return 0;
}