summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--daemon/bindings/cache.c6
-rw-r--r--daemon/bindings/net.c18
-rw-r--r--daemon/engine.c12
-rw-r--r--daemon/ffimodule.c2
-rw-r--r--daemon/http.c6
-rw-r--r--daemon/io.c25
-rw-r--r--daemon/lua/kres-gen.lua4
-rwxr-xr-xdaemon/lua/kres-gen.sh4
-rw-r--r--daemon/lua/sandbox.lua.in8
-rw-r--r--daemon/network.c20
-rw-r--r--daemon/session.c24
-rw-r--r--daemon/tls.c52
-rw-r--r--daemon/tls_session_ticket-srv.c27
-rw-r--r--daemon/udp_queue.c2
-rw-r--r--daemon/worker.c136
-rw-r--r--daemon/zimport.c6
-rw-r--r--doc/config-debugging.rst15
-rw-r--r--lib/cache/api.c40
-rw-r--r--lib/cache/cdb_lmdb.c22
-rw-r--r--lib/cache/entry_list.c18
-rw-r--r--lib/cache/entry_pkt.c10
-rw-r--r--lib/cache/entry_rr.c14
-rw-r--r--lib/cache/impl.h6
-rw-r--r--lib/cache/knot_pkt.c2
-rw-r--r--lib/cache/nsec1.c42
-rw-r--r--lib/cache/nsec3.c30
-rw-r--r--lib/cache/peek.c54
-rw-r--r--lib/cookies/alg_sha.c2
-rw-r--r--lib/cookies/helper.c16
-rw-r--r--lib/dnssec.c10
-rw-r--r--lib/dnssec/nsec.c14
-rw-r--r--lib/dnssec/nsec3.c26
-rw-r--r--lib/dnssec/signature.c12
-rw-r--r--lib/generic/lru.c18
-rw-r--r--lib/generic/pack.h12
-rw-r--r--lib/generic/queue.c2
-rw-r--r--lib/generic/trie.c8
-rw-r--r--lib/layer/iterate.c14
-rw-r--r--lib/layer/validate.c8
-rw-r--r--lib/module.c2
-rw-r--r--lib/resolve.c30
-rw-r--r--lib/resolve.h4
-rw-r--r--lib/rplan.c2
-rw-r--r--lib/selection.c16
-rw-r--r--lib/selection_forward.c2
-rw-r--r--lib/selection_iter.c6
-rw-r--r--lib/utils.c44
-rw-r--r--lib/utils.h37
-rw-r--r--lib/zonecut.c20
-rw-r--r--meson.build6
-rw-r--r--modules/cookies/cookiectl.c36
-rw-r--r--modules/cookies/cookiemonster.c16
-rw-r--r--modules/cookies/cookies.c2
-rw-r--r--modules/dnstap/dnstap.c8
-rw-r--r--modules/hints/hints.c4
-rw-r--r--modules/stats/stats.c2
-rw-r--r--utils/cache_gc/db.c6
-rw-r--r--utils/cache_gc/kr_cache_gc.c4
-rw-r--r--utils/cache_gc/main.c4
59 files changed, 505 insertions, 493 deletions
diff --git a/daemon/bindings/cache.c b/daemon/bindings/cache.c
index cf6512f3..3ab1e532 100644
--- a/daemon/bindings/cache.c
+++ b/daemon/bindings/cache.c
@@ -10,7 +10,7 @@
static struct kr_cache * cache_assert_open(lua_State *L)
{
struct kr_cache *cache = &the_worker->engine->resolver.cache;
- if (!kr_assume(cache) || !kr_cache_is_open(cache))
+ if (kr_fails_assert(cache) || !kr_cache_is_open(cache))
lua_error_p(L, "no cache is open yet, use cache.open() or cache.size, etc.");
return cache;
}
@@ -286,7 +286,7 @@ static void cache_dump(lua_State *L, knot_db_val_t keyval[])
}
ret = !knot_dname_to_str(name, dname, sizeof(name));
- if (!kr_assume(!ret)) return;
+ if (kr_fails_assert(!ret)) return;
/* If name typemap doesn't exist yet, create it */
lua_getfield(L, -1, name);
@@ -367,7 +367,7 @@ static void cache_zone_import_cb(int state, void *param)
{
(void)state;
struct worker_ctx *worker = param;
- if (!kr_assume(worker && worker->z_import)) return;
+ if (kr_fails_assert(worker && worker->z_import)) return;
zi_free(worker->z_import);
worker->z_import = NULL;
}
diff --git a/daemon/bindings/net.c b/daemon/bindings/net.c
index 440b809d..8906f5e2 100644
--- a/daemon/bindings/net.c
+++ b/daemon/bindings/net.c
@@ -49,7 +49,7 @@ static int net_list_add(const char *key, void *val, void *ext)
lua_pushliteral(L, "unix");
break;
default:
- (void)!kr_assume(false);
+ kr_assert(false);
lua_pushliteral(L, "invalid");
}
lua_setfield(L, -2, "family");
@@ -79,7 +79,7 @@ static int net_list_add(const char *key, void *val, void *ext)
} else if (ep->flags.sock_type == SOCK_DGRAM) {
lua_pushliteral(L, "udp");
} else {
- (void)!kr_assume(false);
+ kr_assert(false);
lua_pushliteral(L, "invalid");
}
lua_setfield(L, -2, "protocol");
@@ -108,7 +108,7 @@ static int net_list(lua_State *L)
* \return success */
static bool net_listen_addrs(lua_State *L, int port, endpoint_flags_t flags, int16_t nic_queue)
{
- if (!kr_assume(flags.xdp || nic_queue == -1))
+ if (kr_fails_assert(flags.xdp || nic_queue == -1))
return false;
/* Case: table with 'addr' field; only follow that field directly. */
@@ -510,7 +510,7 @@ static int tls_params2lua(lua_State *L, trie_t *params)
} else if (ia_len == 2 + sizeof(struct in6_addr)) {
af = AF_INET6;
}
- if (!kr_assume(key && af != AF_UNSPEC))
+ if (kr_fails_assert(key && af != AF_UNSPEC))
lua_error_p(L, "internal error: bad IP address");
uint16_t port;
memcpy(&port, key, sizeof(port));
@@ -518,7 +518,7 @@ static int tls_params2lua(lua_State *L, trie_t *params)
const char *ia = key + sizeof(port);
char str[INET6_ADDRSTRLEN + 1 + 5 + 1];
size_t len = sizeof(str);
- if (!kr_assume(kr_ntop_str(af, ia, port, str, &len) == kr_ok()))
+ if (kr_fails_assert(kr_ntop_str(af, ia, port, str, &len) == kr_ok()))
lua_error_p(L, "internal error: bad IP address conversion");
/* ...and push it as [1]. */
lua_pushinteger(L, 1);
@@ -526,7 +526,7 @@ static int tls_params2lua(lua_State *L, trie_t *params)
lua_settable(L, -3);
const tls_client_param_t *e = *trie_it_val(it);
- if (!kr_assume(e))
+ if (kr_fails_assert(e))
lua_error_p(L, "internal problem - NULL entry for %s", str);
/* .hostname = */
@@ -553,7 +553,7 @@ static int tls_params2lua(lua_State *L, trie_t *params)
uint8_t pin_base64[TLS_SHA256_BASE64_BUFLEN];
int err = kr_base64_encode(e->pins.at[i], TLS_SHA256_RAW_LEN,
pin_base64, sizeof(pin_base64));
- if (!kr_assume(err >= 0))
+ if (kr_fails_assert(err >= 0))
lua_error_p(L,
"internal problem when converting pin_sha256: %s",
kr_strerror(err));
@@ -687,7 +687,7 @@ static int net_tls_client(lua_State *L)
ERROR("pin_sha256 is not a string");
uint8_t *pin_raw = malloc(TLS_SHA256_RAW_LEN);
/* Push the string early to simplify error processing. */
- if (!kr_assume(pin_raw && array_push(newcfg->pins, pin_raw) >= 0)) {
+ if (kr_fails_assert(pin_raw && array_push(newcfg->pins, pin_raw) >= 0)) {
free(pin_raw);
ERROR("%s", kr_strerror(ENOMEM));
}
@@ -943,7 +943,7 @@ static int net_outgoing(lua_State *L, int family)
lua_pushnil(L);
return 1;
}
- if (!kr_assume(addr->ip.sa_family == family))
+ if (kr_fails_assert(addr->ip.sa_family == family))
lua_error_p(L, "bad address family");
char addr_buf[INET6_ADDRSTRLEN];
int err;
diff --git a/daemon/engine.c b/daemon/engine.c
index c63ea2a0..3ad80f47 100644
--- a/daemon/engine.c
+++ b/daemon/engine.c
@@ -473,11 +473,11 @@ static void init_measurement(struct engine *engine)
"})\n"
"jit.off()\n", statspath
);
- if (!kr_assume(ret > 0))
+ if (kr_fails_assert(ret > 0))
return;
ret = luaL_loadstring(engine->L, snippet);
- if (!kr_assume(ret == 0)) {
+ if (kr_fails_assert(ret == 0)) {
free(snippet);
return;
}
@@ -562,7 +562,7 @@ static void engine_unload(struct engine *engine, struct kr_module *module)
void engine_deinit(struct engine *engine)
{
- if (!engine || !kr_assume(engine->L))
+ if (!engine || kr_fails_assert(engine->L))
return;
/* Only close sockets and services; no need to clean up mempool. */
@@ -627,7 +627,7 @@ int engine_load_sandbox(struct engine *engine)
int engine_loadconf(struct engine *engine, const char *config_path)
{
- if (!kr_assume(config_path))
+ if (kr_fails_assert(config_path))
return kr_error(EINVAL);
char cwd[PATH_MAX];
@@ -675,7 +675,7 @@ static size_t module_find(module_array_t *mod_list, const char *name)
int engine_register(struct engine *engine, const char *name, const char *precedence, const char* ref)
{
- if (!kr_assume(engine && name))
+ if (kr_fails_assert(engine && name))
return kr_error(EINVAL);
/* Make sure module is unloaded */
(void) engine_unregister(engine, name);
@@ -715,7 +715,7 @@ int engine_register(struct engine *engine, const char *name, const char *precede
} else {
ret = engine_pcall(L, 1);
}
- if (!kr_assume(ret == 0)) { /* probably not critical, but weird */
+ if (kr_fails_assert(ret == 0)) { /* probably not critical, but weird */
kr_log_error("[system] internal error when loading C module %s: %s\n",
module->name, lua_tostring(L, -1));
lua_pop(L, 1);
diff --git a/daemon/ffimodule.c b/daemon/ffimodule.c
index d9487e00..2c36f36b 100644
--- a/daemon/ffimodule.c
+++ b/daemon/ffimodule.c
@@ -129,7 +129,7 @@ static int l_ffi_call_layer(kr_layer_t *ctx, int slot_ix)
} else if (lua_isnil(L, -1)) { /* Don't change state. */
- } else if (!kr_assume(!lua_isthread(L, -1))) { /* Continuations */
+ } else if (kr_fails_assert(!lua_isthread(L, -1))) { /* Continuations */
/* TODO: unused, possibly in a bad shape. Meant KR_STATE_YIELD? */
if (l_ffi_defer(lua_tothread(L, -1)) != 0)
state = KR_STATE_FAIL;
diff --git a/daemon/http.c b/daemon/http.c
index c7d1cb3f..3055f3d0 100644
--- a/daemon/http.c
+++ b/daemon/http.c
@@ -124,7 +124,7 @@ static int send_data_callback(nghttp2_session *h2, nghttp2_frame *frame, const u
if (ret < 0)
return NGHTTP2_ERR_CALLBACK_FAILURE;
data->pos += length;
- if (!kr_assume(data->pos <= data->len))
+ if (kr_fails_assert(data->pos <= data->len))
return NGHTTP2_ERR_CALLBACK_FAILURE;
ret = send_padding(ctx, (uint8_t)frame->data.padlen);
@@ -441,7 +441,7 @@ static int on_frame_recv_callback(nghttp2_session *h2, const nghttp2_frame *fram
struct http_ctx *ctx = (struct http_ctx *)user_data;
ssize_t len;
int32_t stream_id = frame->hd.stream_id;
- if(!kr_assume(stream_id != -1))
+ if(kr_fails_assert(stream_id != -1))
return NGHTTP2_ERR_CALLBACK_FAILURE;
if ((frame->hd.flags & NGHTTP2_FLAG_END_STREAM) && ctx->incomplete_stream == stream_id) {
@@ -563,7 +563,7 @@ ssize_t http_process_input_data(struct session *session, const uint8_t *buf,
if (!ctx->h2)
return kr_error(ENOSYS);
- if (!kr_assume(ctx->session == session))
+ if (kr_fails_assert(ctx->session == session))
return kr_error(EINVAL);
ctx->submitted = 0;
diff --git a/daemon/io.c b/daemon/io.c
index dae43024..255f67c4 100644
--- a/daemon/io.c
+++ b/daemon/io.c
@@ -75,7 +75,7 @@ void udp_recv(uv_udp_t *handle, ssize_t nread, const uv_buf_t *buf,
if (session_flags(s)->outgoing) {
const struct sockaddr *peer = session_get_peer(s);
- if (!kr_assume(peer->sa_family != AF_UNSPEC))
+ if (kr_fails_assert(peer->sa_family != AF_UNSPEC))
return;
if (kr_sockaddr_cmp(peer, addr) != 0) {
kr_log_verbose("[io] <= ignoring UDP from unexpected address '%s'\n",
@@ -85,7 +85,7 @@ void udp_recv(uv_udp_t *handle, ssize_t nread, const uv_buf_t *buf,
}
ssize_t consumed = session_wirebuf_consume(s, (const uint8_t *)buf->base,
nread);
- (void)!kr_assume(consumed == nread);
+ kr_assert(consumed == nread);
session_wirebuf_process(s, addr);
session_wirebuf_discard(s);
mp_flush(the_worker->pkt_pool.ctx);
@@ -209,7 +209,7 @@ void tcp_timeout_trigger(uv_timer_t *timer)
{
struct session *s = timer->data;
- if (!kr_assume(!session_flags(s)->closing))
+ if (kr_fails_assert(!session_flags(s)->closing))
return;
if (!session_tasklist_is_empty(s)) {
@@ -266,7 +266,7 @@ void tcp_timeout_trigger(uv_timer_t *timer)
static void tcp_recv(uv_stream_t *handle, ssize_t nread, const uv_buf_t *buf)
{
struct session *s = handle->data;
- if (!kr_assume(s && session_get_handle(s) == (uv_handle_t *)handle && handle->type == UV_TCP))
+ if (kr_fails_assert(s && session_get_handle(s) == (uv_handle_t *)handle && handle->type == UV_TCP))
return;
if (session_flags(s)->closing) {
@@ -338,7 +338,7 @@ static void tcp_recv(uv_stream_t *handle, ssize_t nread, const uv_buf_t *buf)
/* data points to start of the free space in session wire buffer.
Simple increase internal counter. */
consumed = session_wirebuf_consume(s, data, data_len);
- (void)!kr_assume(consumed == data_len);
+ kr_assert(consumed == data_len);
int ret = session_wirebuf_process(s, session_get_peer(s));
if (ret < 0) {
@@ -814,13 +814,12 @@ static void xdp_rx(uv_poll_t* handle, int status, int events)
, NULL
#endif
);
- if (kr_assume(ret == KNOT_EOK)) {
- kr_log_verbose("[xdp] poll triggered, processing a batch of %d packets\n",
- (int)rcvd);
- } else { /* ATM other error codes can only be returned when called incorrectly */
+ if (kr_fails_assert(ret == KNOT_EOK)) {
+ /* ATM other error codes can only be returned when called incorrectly */
kr_log_error("[xdp] knot_xdp_recv(): %d, %s\n", ret, knot_strerror(ret));
return;
}
+ kr_log_verbose("[xdp] poll triggered, processing a batch of %d packets\n", (int)rcvd);
kr_require(rcvd <= XDP_RX_BATCH_SIZE);
for (int i = 0; i < rcvd; ++i) {
const knot_xdp_msg_t *msg = &msgs[i];
@@ -844,7 +843,7 @@ static void xdp_rx(uv_poll_t* handle, int status, int events)
/// Warn if the XDP program is running in emulated mode (XDP_SKB)
static void xdp_warn_mode(const char *ifname)
{
- if (!kr_assume(ifname))
+ if (kr_fails_assert(ifname))
return;
const unsigned if_index = if_nametoindex(ifname);
@@ -898,7 +897,7 @@ int io_listen_xdp(uv_loop_t *loop, struct endpoint *ep, const char *ifname)
if (!ret) xdp_warn_mode(ifname);
if (!ret) ret = uv_idle_init(loop, &xhd->tx_waker);
- if (ret || !kr_assume(xhd->socket)) {
+ if (ret || kr_fails_assert(xhd->socket)) {
free(xhd);
return ret == 0 ? kr_error(EINVAL) : kr_error(ret);
}
@@ -959,7 +958,7 @@ static void io_deinit(uv_handle_t *handle)
knot_xdp_deinit(xhd->socket);
free(xhd);
#else
- (void)!kr_assume(false);
+ kr_assert(false);
#endif
}
}
@@ -978,7 +977,7 @@ int io_start_read(uv_handle_t *handle)
case UV_TCP:
return uv_read_start((uv_stream_t *)handle, &handle_getbuf, &tcp_recv);
default:
- (void)!kr_assume(false);
+ kr_assert(false);
return kr_error(EINVAL);
}
}
diff --git a/daemon/lua/kres-gen.lua b/daemon/lua/kres-gen.lua
index b0100d72..981d38fc 100644
--- a/daemon/lua/kres-gen.lua
+++ b/daemon/lua/kres-gen.lua
@@ -301,8 +301,8 @@ struct kr_server_selection {
};
kr_layer_t kr_layer_t_static;
-_Bool kr_dbg_assumption_abort;
-int kr_dbg_assumption_fork;
+_Bool kr_dbg_assertion_abort;
+int kr_dbg_assertion_fork;
typedef int32_t (*kr_stale_cb)(int32_t ttl, const knot_dname_t *owner, uint16_t type,
const struct kr_query *qry);
diff --git a/daemon/lua/kres-gen.sh b/daemon/lua/kres-gen.sh
index 1a2b58df..9151fea2 100755
--- a/daemon/lua/kres-gen.sh
+++ b/daemon/lua/kres-gen.sh
@@ -134,8 +134,8 @@ EOF
# static variables; these lines might not be simple to generate
printf "
kr_layer_t kr_layer_t_static;
-_Bool kr_dbg_assumption_abort;
-int kr_dbg_assumption_fork;
+_Bool kr_dbg_assertion_abort;
+int kr_dbg_assertion_fork;
"
printf "
diff --git a/daemon/lua/sandbox.lua.in b/daemon/lua/sandbox.lua.in
index 0c86c424..fe24074b 100644
--- a/daemon/lua/sandbox.lua.in
+++ b/daemon/lua/sandbox.lua.in
@@ -132,14 +132,14 @@ setmetatable(env, {
debugging = {}
setmetatable(debugging, {
__index = function(_, k)
- if k == 'assumption_abort' then return ffi.C.kr_dbg_assumption_abort
- elseif k == 'assumption_fork' then return ffi.C.kr_dbg_assumption_fork
+ if k == 'assertion_abort' then return ffi.C.kr_dbg_assertion_abort
+ elseif k == 'assertion_fork' then return ffi.C.kr_dbg_assertion_fork
else panic('invalid debugging option: ' .. tostring(k))
end
end,
__newindex = function(_, k, v)
- if k == 'assumption_abort' then ffi.C.kr_dbg_assumption_abort = v
- elseif k == 'assumption_fork' then ffi.C.kr_dbg_assumption_fork = v
+ if k == 'assertion_abort' then ffi.C.kr_dbg_assertion_abort = v
+ elseif k == 'assertion_fork' then ffi.C.kr_dbg_assertion_fork = v
else panic('invalid debugging option: ' .. tostring(k))
end
end
diff --git a/daemon/network.c b/daemon/network.c
index fe1df3ab..7f31ba73 100644
--- a/daemon/network.c
+++ b/daemon/network.c
@@ -35,7 +35,7 @@ static int endpoint_open_lua_cb(struct network *net, struct endpoint *ep,
const char *log_addr)
{
const bool ok = ep->flags.kind && !ep->handle && !ep->engaged && ep->fd != -1;
- if (!kr_assume(ok))
+ if (kr_fails_assert(ok))
return kr_error(EINVAL);
/* First find callback in the endpoint registry. */
lua_State *L = the_worker->engine->L;
@@ -138,7 +138,7 @@ static void endpoint_close(struct network *net, struct endpoint *ep, bool force)
}
if (ep->flags.kind && !is_control && !is_xdp) {
- (void)!kr_assume(!ep->handle);
+ kr_assert(!ep->handle);
/* Special lua-handled endpoint. */
if (ep->engaged) {
endpoint_close_lua_cb(net, ep);
@@ -250,7 +250,7 @@ static int open_endpoint(struct network *net, const char *addr_str,
? sa == NULL && ep->fd == -1 && ep->nic_queue >= 0
&& ep->flags.sock_type == SOCK_DGRAM && !ep->flags.tls
: (sa != NULL) != (ep->fd != -1);
- if (!kr_assume(ok))
+ if (kr_fails_assert(ok))
return kr_error(EINVAL);
if (ep->handle) {
return kr_error(EEXIST);
@@ -309,7 +309,7 @@ static int open_endpoint(struct network *net, const char *addr_str,
} /* else */
if (ep->flags.sock_type == SOCK_DGRAM) {
- if (!kr_assume(!ep->flags.tls))
+ if (kr_fails_assert(!ep->flags.tls))
return kr_error(EINVAL);
uv_udp_t *ep_handle = malloc(sizeof(uv_udp_t));
ep->handle = (uv_handle_t *)ep_handle;
@@ -327,7 +327,7 @@ static int open_endpoint(struct network *net, const char *addr_str,
goto finish_ret;
} /* else */
- (void)!kr_assume(false);
+ kr_assert(false);
return kr_error(EINVAL);
finish_ret:
if (!ret) return ret;
@@ -374,7 +374,7 @@ static int create_endpoint(struct network *net, const char *addr_str,
int network_listen_fd(struct network *net, int fd, endpoint_flags_t flags)
{
- if (!kr_assume(!flags.xdp))
+ if (kr_fails_assert(!flags.xdp))
return kr_error(EINVAL);
/* Extract fd's socket type. */
socklen_t len = sizeof(flags.sock_type);
@@ -382,7 +382,7 @@ int network_listen_fd(struct network *net, int fd, endpoint_flags_t flags)
if (ret != 0)
return kr_error(errno);
const bool is_dtls = flags.sock_type == SOCK_DGRAM && !flags.kind && flags.tls;
- if (!kr_assume(!is_dtls))
+ if (kr_fails_assert(!is_dtls))
return kr_error(EINVAL); /* Perhaps DTLS some day. */
if (flags.sock_type != SOCK_DGRAM && flags.sock_type != SOCK_STREAM)
return kr_error(EBADF);
@@ -445,7 +445,7 @@ static int16_t nic_queue_auto(void)
int network_listen(struct network *net, const char *addr, uint16_t port,
int16_t nic_queue, endpoint_flags_t flags)
{
- if (!kr_assume(net != NULL && addr != 0 && nic_queue >= -1))
+ if (kr_fails_assert(net != NULL && addr != 0 && nic_queue >= -1))
return kr_error(EINVAL);
if (flags.xdp && nic_queue < 0) {
@@ -557,7 +557,7 @@ static int set_bpf_cb(const char *key, void *val, void *ext)
{
endpoint_array_t *endpoints = (endpoint_array_t *)val;
int *bpffd = (int *)ext;
- if (!kr_assume(endpoints && bpffd))
+ if (kr_fails_assert(endpoints && bpffd))
return kr_error(EINVAL);
for (size_t i = 0; i < endpoints->len; i++) {
@@ -596,7 +596,7 @@ int network_set_bpf(struct network *net, int bpf_fd)
static int clear_bpf_cb(const char *key, void *val, void *ext)
{
endpoint_array_t *endpoints = (endpoint_array_t *)val;
- if (!kr_assume(endpoints))
+ if (kr_fails_assert(endpoints))
return kr_error(EINVAL);
for (size_t i = 0; i < endpoints->len; i++) {
diff --git a/daemon/session.c b/daemon/session.c
index 31d7fa60..a0bf23f3 100644
--- a/daemon/session.c
+++ b/daemon/session.c
@@ -167,12 +167,12 @@ int session_tasklist_add(struct session *session, struct qr_task *task)
key_len = sizeof(char *);
}
trie_val_t *v = trie_get_ins(t, key, key_len);
- if (!kr_assume(v))
+ if (kr_fails_assert(v))
return kr_error(ENOMEM);
if (*v == NULL) {
*v = task;
worker_task_ref(task);
- } else if (!kr_assume(*v == task)) {
+ } else if (kr_fails_assert(*v == task)) {
return kr_error(EINVAL);
}
return kr_ok();
@@ -221,7 +221,7 @@ struct qr_task *session_tasklist_del_first(struct session *session, bool deref)
}
struct qr_task* session_tasklist_del_msgid(const struct session *session, uint16_t msg_id)
{
- if (!kr_assume(session->sflags.outgoing))
+ if (kr_fails_assert(session->sflags.outgoing))
return NULL;
trie_t *t = session->tasks;
struct qr_task *ret = NULL;
@@ -240,7 +240,7 @@ struct qr_task* session_tasklist_del_msgid(const struct session *session, uint16
struct qr_task* session_tasklist_find_msgid(const struct session *session, uint16_t msg_id)
{
- if (!kr_assume(session->sflags.outgoing))
+ if (kr_fails_assert(session->sflags.outgoing))
return NULL;
trie_t *t = session->tasks;
struct qr_task *ret = NULL;
@@ -363,7 +363,7 @@ struct session *session_new(uv_handle_t *handle, bool has_tls, bool has_http)
session->wire_buf = the_worker->wire_buf;
session->wire_buf_size = sizeof(the_worker->wire_buf);
} else {
- (void)!kr_assume(handle->type == UV_POLL/*XDP*/);
+ kr_assert(handle->type == UV_POLL/*XDP*/);
/* - wire_buf* are left zeroed, as they make no sense
* - timer is unused but OK for simplicity (server-side sessions are few)
*/
@@ -503,7 +503,7 @@ int session_timer_start(struct session *session, uv_timer_cb cb,
uint64_t timeout, uint64_t repeat)
{
uv_timer_t *timer = &session->timeout;
- if (!kr_assume(timer->data == session))
+ if (kr_fails_assert(timer->data == session))
return kr_error(EINVAL);
int ret = uv_timer_start(timer, cb, timeout, repeat);
if (ret != 0) {
@@ -628,7 +628,7 @@ int session_discard_packet(struct session *session, const knot_pkt_t *pkt)
return kr_error(EINVAL);
} else if (handle->type == UV_TCP) {
/* wire_buf contains TCP DNS message. */
- if (!kr_assume(wirebuf_data_size >= 2)) {
+ if (kr_fails_assert(wirebuf_data_size >= 2)) {
/* TCP message length field isn't in buffer, must not happen. */
session->wire_buf_start_idx = 0;
session->wire_buf_end_idx = 0;
@@ -636,7 +636,7 @@ int session_discard_packet(struct session *session, const knot_pkt_t *pkt)
}
wirebuf_msg_size = knot_wire_read_u16(wirebuf_msg_start);
wirebuf_msg_start += 2;
- if (!kr_assume(wirebuf_msg_size + 2 <= wirebuf_data_size)) {
+ if (kr_fails_assert(wirebuf_msg_size + 2 <= wirebuf_data_size)) {
/* TCP message length field is greater then
* number of bytes in buffer, must not happen. */
session->wire_buf_start_idx = 0;
@@ -645,7 +645,7 @@ int session_discard_packet(struct session *session, const knot_pkt_t *pkt)
}
}
- if (!kr_assume(wirebuf_msg_start == pkt_msg_start)) {
+ if (kr_fails_assert(wirebuf_msg_start == pkt_msg_start)) {
/* packet wirebuf must be located at the beginning
* of the session wirebuf, must not happen. */
session->wire_buf_start_idx = 0;
@@ -653,7 +653,7 @@ int session_discard_packet(struct session *session, const knot_pkt_t *pkt)
return kr_error(EINVAL);
}
- if (!kr_assume(wirebuf_msg_size >= pkt_msg_size)) {
+ if (kr_fails_assert(wirebuf_msg_size >= pkt_msg_size)) {
/* Message length field is lesser then packet size,
* must not happen. */
session->wire_buf_start_idx = 0;
@@ -748,7 +748,7 @@ int session_wirebuf_process(struct session *session, const struct sockaddr *peer
while (((pkt = session_produce_packet(session, &the_worker->pkt_pool)) != NULL) &&
(ret < max_iterations)) {
- if (!kr_assume(!session_wirebuf_error(session)))
+ if (kr_fails_assert(!session_wirebuf_error(session)))
return -1;
int res = worker_submit(session, peer, NULL, NULL, NULL, pkt);
/* Errors from worker_submit() are intetionally *not* handled in order to
@@ -775,7 +775,7 @@ void session_kill_ioreq(struct session *session, struct qr_task *task)
{
if (!session || session->sflags.closing)
return;
- if (!kr_assume(session->sflags.outgoing && session->handle))
+ if (kr_fails_assert(session->sflags.outgoing && session->handle))
return;
session_tasklist_del(session, task);
if (session->handle->type == UV_UDP) {
diff --git a/daemon/tls.c b/daemon/tls.c
index a02495c4..4da09108 100644
--- a/daemon/tls.c
+++ b/daemon/tls.c
@@ -69,7 +69,7 @@ static int kres_gnutls_set_priority(gnutls_session_t session) {
static ssize_t kres_gnutls_pull(gnutls_transport_ptr_t h, void *buf, size_t len)
{
struct tls_common_ctx *t = (struct tls_common_ctx *)h;
- if (!kr_assume(t)) {
+ if (kr_fails_assert(t)) {
errno = EFAULT;
return -1;
}
@@ -90,12 +90,14 @@ static ssize_t kres_gnutls_pull(gnutls_transport_ptr_t h, void *buf, size_t len)
static void on_write_complete(uv_write_t *req, int status)
{
- if (!kr_assume(req->data))
+ if (kr_fails_assert(req->data))
return;
struct async_write_ctx *async_ctx = (struct async_write_ctx *)req->data;
struct tls_common_ctx *t = async_ctx->t;
- if (kr_assume(t->write_queue_size))
+ if (t->write_queue_size)
t->write_queue_size -= 1;
+ else
+ kr_assert(false);
free(req->data);
}
@@ -107,7 +109,7 @@ static bool stream_queue_is_empty(struct tls_common_ctx *t)
static ssize_t kres_gnutls_vec_push(gnutls_transport_ptr_t h, const giovec_t * iov, int iovcnt)
{
struct tls_common_ctx *t = (struct tls_common_ctx *)h;
- if (!kr_assume(t)) {
+ if (kr_fails_assert(t)) {
errno = EFAULT;
return -1;
}
@@ -116,12 +118,12 @@ static ssize_t kres_gnutls_vec_push(gnutls_transport_ptr_t h, const giovec_t * i
return 0;
}
- if (!kr_assume(t->session)) {
+ if (kr_fails_assert(t->session)) {
errno = EFAULT;
return -1;
}
uv_stream_t *handle = (uv_stream_t *)session_get_handle(t->session);
- if (!kr_assume(handle && handle->type == UV_TCP)) {
+ if (kr_fails_assert(handle && handle->type == UV_TCP)) {
errno = EFAULT;
return -1;
}
@@ -286,7 +288,7 @@ static int tls_handshake(struct tls_common_ctx *ctx, tls_handshake_cb handshake_
struct tls_ctx *tls_new(struct worker_ctx *worker)
{
- if (!kr_assume(worker && worker->engine))
+ if (kr_fails_assert(worker && worker->engine))
return NULL;
struct network *net = &worker->engine->net;
@@ -365,7 +367,7 @@ struct tls_ctx *tls_new(struct worker_ctx *worker)
void tls_close(struct tls_common_ctx *ctx)
{
- if (ctx == NULL || ctx->tls_session == NULL || !kr_assume(ctx->session))
+ if (ctx == NULL || ctx->tls_session == NULL || kr_fails_assert(ctx->session))
return;
if (ctx->handshake_state == TLS_HS_DONE) {
@@ -403,7 +405,7 @@ int tls_write(uv_write_t *req, uv_handle_t *handle, knot_pkt_t *pkt, uv_write_cb
struct session *s = handle->data;
struct tls_common_ctx *tls_ctx = session_tls_get_common_ctx(s);
- if (!kr_assume(tls_ctx && session_flags(s)->outgoing == tls_ctx->client_side))
+ if (kr_fails_assert(tls_ctx && session_flags(s)->outgoing == tls_ctx->client_side))
return kr_error(EINVAL);
const uint16_t pkt_size = htons(pkt->size);
@@ -452,10 +454,10 @@ ssize_t tls_process_input_data(struct session *s, const uint8_t *buf, ssize_t nr
return kr_error(ENOSYS);
}
- if (!kr_assume(tls_p->session == s))
+ if (kr_fails_assert(tls_p->session == s))
return kr_error(EINVAL);
const bool ok = tls_p->recv_buf == buf && nread <= sizeof(tls_p->recv_buf);
- if (!kr_assume(ok)) /* don't risk overflowing the buffer if we have a mistake somewhere */
+ if (kr_fails_assert(ok)) /* don't risk overflowing the buffer if we have a mistake somewhere */
return kr_error(EINVAL);
const char *logstring = tls_p->client_side ? client_logstring : server_logstring;
@@ -558,7 +560,7 @@ ssize_t tls_process_input_data(struct session *s, const uint8_t *buf, ssize_t nr
* \return error code */
static int get_oob_key_pin(gnutls_x509_crt_t crt, char *outchar, ssize_t outchar_len, bool raw)
{
- if (!kr_assume(!raw || outchar_len >= TLS_SHA256_RAW_LEN)) {
+ if (kr_fails_assert(!raw || outchar_len >= TLS_SHA256_RAW_LEN)) {
return kr_error(ENOSPC);
/* With !raw we have check inside kr_base64_encode. */
}
@@ -584,7 +586,7 @@ static int get_oob_key_pin(gnutls_x509_crt_t crt, char *outchar, ssize_t outchar
if (err >= 0 && err < outchar_len) {
err = GNUTLS_E_SUCCESS;
outchar[err] = '\0'; /* kr_base64_encode() doesn't do it */
- } else if (!kr_assume(err < 0)) {
+ } else if (kr_fails_assert(err < 0)) {
err = kr_error(ENOSPC); /* base64 fits but '\0' doesn't */
outchar[outchar_len - 1] = '\0';
}
@@ -776,7 +778,7 @@ void tls_credentials_free(struct tls_credentials *tls_credentials) {
void tls_client_param_unref(tls_client_param_t *entry)
{
- if (!entry || !kr_assume(entry->refs)) return;
+ if (!entry || kr_fails_assert(entry->refs)) return;
--(entry->refs);
if (entry->refs) return;
@@ -806,7 +808,7 @@ void tls_client_param_unref(tls_client_param_t *entry)
}
static int param_free(void **param, void *null)
{
- if (!kr_assume(param && *param))
+ if (kr_fails_assert(param && *param))
return -1;
tls_client_param_unref(*param);
return 0;
@@ -821,7 +823,7 @@ void tls_client_params_free(tls_client_params_t *params)
tls_client_param_t * tls_client_param_new()
{
tls_client_param_t *e = calloc(1, sizeof(*e));
- if (!kr_assume(e))
+ if (kr_fails_assert(e))
return NULL;
/* Note: those array_t don't need further initialization. */
e->refs = 1;
@@ -860,20 +862,20 @@ static bool construct_key(const union inaddr *addr, uint32_t *len, char *key)
*len = sizeof(addr->ip6.sin6_port) + sizeof(addr->ip6.sin6_addr);
return true;
default:
- (void)!kr_assume(!EINVAL);
+ kr_assert(!EINVAL);
return false;
}
}
tls_client_param_t ** tls_client_param_getptr(tls_client_params_t **params,
const struct sockaddr *addr, bool do_insert)
{
- if (!kr_assume(params && addr))
+ if (kr_fails_assert(params && addr))
return NULL;
/* We accept NULL for empty map; ensure the map exists if needed. */
if (!*params) {
if (!do_insert) return NULL;
*params = trie_create(NULL);
- if (!kr_assume(*params))
+ if (kr_fails_assert(*params))
return NULL;
}
/* Construct the key. */
@@ -911,7 +913,7 @@ static int client_verify_pin(const unsigned int cert_list_size,
const gnutls_datum_t *cert_list,
tls_client_param_t *params)
{
- if (!kr_assume(params->pins.len > 0))
+ if (kr_fails_assert(params->pins.len > 0))
return GNUTLS_E_CERTIFICATE_ERROR;
#if TLS_CAN_USE_PINS
for (int i = 0; i < cert_list_size; i++) {
@@ -965,7 +967,7 @@ static int client_verify_pin(const unsigned int cert_list_size,
#else /* TLS_CAN_USE_PINS */
kr_log_error("[tls_client] internal inconsistency: TLS_CAN_USE_PINS\n");
- (void)!kr_assume(false);
+ kr_assert(false);
return GNUTLS_E_CERTIFICATE_ERROR;
#endif
}
@@ -978,7 +980,7 @@ static int client_verify_pin(const unsigned int cert_list_size,
*/
static int client_verify_certchain(gnutls_session_t tls_session, const char *hostname)
{
- if (!kr_assume(hostname)) {
+ if (kr_fails_assert(hostname)) {
kr_log_error("[tls_client] internal config inconsistency: no hostname set\n");
return GNUTLS_E_CERTIFICATE_ERROR;
}
@@ -1018,7 +1020,7 @@ static int client_verify_certchain(gnutls_session_t tls_session, const char *hos
static int client_verify_certificate(gnutls_session_t tls_session)
{
struct tls_client_ctx *ctx = gnutls_session_get_ptr(tls_session);
- if (!kr_assume(ctx->params))
+ if (kr_fails_assert(ctx->params))
return GNUTLS_E_CERTIFICATE_ERROR;
if (ctx->params->insecure) {
@@ -1118,7 +1120,7 @@ void tls_client_ctx_free(struct tls_client_ctx *ctx)
int tls_pull_timeout_func(gnutls_transport_ptr_t h, unsigned int ms)
{
struct tls_common_ctx *t = (struct tls_common_ctx *)h;
- if (!kr_assume(t)) {
+ if (kr_fails_assert(t)) {
errno = EFAULT;
return -1;
}
@@ -1139,7 +1141,7 @@ int tls_client_connect_start(struct tls_client_ctx *client_ctx,
if (session == NULL || client_ctx == NULL)
return kr_error(EINVAL);
- if (!kr_assume(session_flags(session)->outgoing && session_get_handle(session)->type == UV_TCP))
+ if (kr_fails_assert(session_flags(session)->outgoing && session_get_handle(session)->type == UV_TCP))
return kr_error(EINVAL);
struct tls_common_ctx *ctx = &client_ctx->c;
diff --git a/daemon/tls_session_ticket-srv.c b/daemon/tls_session_ticket-srv.c
index 315eb48e..2ac91f8f 100644
--- a/daemon/tls_session_ticket-srv.c
+++ b/daemon/tls_session_ticket-srv.c
@@ -72,11 +72,11 @@ static bool tst_key_invariants(void)
static tst_ctx_t * tst_key_create(const char *secret, size_t secret_len, uv_loop_t *loop)
{
const size_t hash_len = sizeof(time_t) + secret_len;
- if (!kr_assume(!secret_len || (secret && hash_len >= secret_len && hash_len <= UINT16_MAX))) {
+ if (kr_fails_assert(!secret_len || (secret && hash_len >= secret_len && hash_len <= UINT16_MAX))) {
return NULL;
/* reasonable secret_len is best enforced in config API */
}
- if (!kr_assume(tst_key_invariants()))
+ if (kr_fails_assert(tst_key_invariants()))
return NULL;
#if !TLS_SESSION_RESUMPTION_SYNC
if (secret_len) {
@@ -107,7 +107,7 @@ static int tst_key_get_random(tst_ctx_t *ctx)
gnutls_datum_t key_tmp = { NULL, 0 };
int err = gnutls_session_ticket_key_generate(&key_tmp);
if (err) return kr_error(err);
- if (!kr_assume(key_tmp.size == SESSION_KEY_SIZE))
+ if (kr_fails_assert(key_tmp.size == SESSION_KEY_SIZE))
return kr_error(EFAULT);
memcpy(ctx->key, key_tmp.data, SESSION_KEY_SIZE);
gnutls_memset(key_tmp.data, 0, SESSION_KEY_SIZE);
@@ -118,7 +118,7 @@ static int tst_key_get_random(tst_ctx_t *ctx)
/** Recompute the session ticket key, if epoch has changed or forced. */
static int tst_key_update(tst_ctx_t *ctx, time_t epoch, bool force_update)
{
- if (!kr_assume(ctx && ctx->hash_len >= sizeof(epoch)))
+ if (kr_fails_assert(ctx && ctx->hash_len >= sizeof(epoch)))
return kr_error(EINVAL);
/* documented limitation: time_t and endianess must match
* on instances sharing a secret */
@@ -132,7 +132,7 @@ static int tst_key_update(tst_ctx_t *ctx, time_t epoch, bool force_update)
}
/* Otherwise, deterministic variant of secret rotation, if supported. */
#if !TLS_SESSION_RESUMPTION_SYNC
- (void)!kr_assume(!ENOTSUP);
+ kr_assert(!ENOTSUP);
return kr_error(ENOTSUP);
#else
int err = gnutls_hash_fast(TST_HASH, ctx->hash_data,
@@ -144,11 +144,12 @@ static int tst_key_update(tst_ctx_t *ctx, time_t epoch, bool force_update)
/** Free all resources of the key (securely). */
static void tst_key_destroy(uv_handle_t *timer)
{
- if (!kr_assume(timer))
+ if (kr_fails_assert(timer))
return;
tst_ctx_t *ctx = timer->data;
- if (kr_assume(ctx))
- gnutls_memset(ctx, 0, offsetof(tst_ctx_t, hash_data) + ctx->hash_len);
+ if (kr_fails_assert(ctx))
+ return;
+ gnutls_memset(ctx, 0, offsetof(tst_ctx_t, hash_data) + ctx->hash_len);
free(ctx);
}
@@ -178,7 +179,7 @@ static void tst_key_check(uv_timer_t *timer, bool force_update)
if (err) {
kr_log_error("[tls] session ticket: failed rotation, %s\n",
kr_strerror(err));
- if (!kr_assume(err != kr_error(EINVAL)))
+ if (kr_fails_assert(err != kr_error(EINVAL)))
return;
}
/* Reschedule. */
@@ -187,13 +188,13 @@ static void tst_key_check(uv_timer_t *timer, bool force_update)
const uint64_t remain_ms = (tv_sec_next - now.tv_sec - 1) * (uint64_t)1000
+ ms_until_second + 1;
/* ^ +1 because we don't want to wake up half a millisecond before the epoch! */
- if (!kr_assume(remain_ms < (TST_KEY_LIFETIME + 1 /*rounding tolerance*/) * 1000))
+ if (kr_fails_assert(remain_ms < (TST_KEY_LIFETIME + 1 /*rounding tolerance*/) * 1000))
return;
kr_log_verbose("[tls] session ticket: epoch %"PRIu64
", scheduling rotation check in %"PRIu64" ms\n",
(uint64_t)epoch, remain_ms);
err = uv_timer_start(timer, &tst_timer_callback, remain_ms, 0);
- if (!kr_assume(err == 0)) {
+ if (kr_fails_assert(err == 0)) {
kr_log_error("[tls] session ticket: failed to schedule, %s\n",
uv_strerror(err));
return;
@@ -204,7 +205,7 @@ static void tst_key_check(uv_timer_t *timer, bool force_update)
void tls_session_ticket_enable(struct tls_session_ticket_ctx *ctx, gnutls_session_t session)
{
- if (!kr_assume(ctx && session))
+ if (kr_fails_assert(ctx && session))
return;
const gnutls_datum_t gd = {
.size = SESSION_KEY_SIZE,
@@ -221,7 +222,7 @@ void tls_session_ticket_enable(struct tls_session_ticket_ctx *ctx, gnutls_sessio
tst_ctx_t * tls_session_ticket_ctx_create(uv_loop_t *loop, const char *secret,
size_t secret_len)
{
- if (!kr_assume(loop && (!secret_len || secret)))
+ if (kr_fails_assert(loop && (!secret_len || secret)))
return NULL;
#if GNUTLS_VERSION_NUMBER < 0x030500
/* We would need different SESSION_KEY_SIZE; avoid an error. */
diff --git a/daemon/udp_queue.c b/daemon/udp_queue.c
index 8617b2c0..f899d7c2 100644
--- a/daemon/udp_queue.c
+++ b/daemon/udp_queue.c
@@ -133,7 +133,7 @@ void udp_queue_push(int fd, struct kr_request *req, struct qr_task *task)
++(q->len);
if (q->len >= UDP_QUEUE_LEN) {
- (void)!kr_assume(q->len == UDP_QUEUE_LEN);
+ kr_assert(q->len == UDP_QUEUE_LEN);
udp_queue_send(fd);
/* We don't need to search state.waiting_fds;
* anyway, it's more efficient to let the hook do that. */
diff --git a/daemon/worker.c b/daemon/worker.c
index 9f2bc242..f1ade4d2 100644
--- a/daemon/worker.c
+++ b/daemon/worker.c
@@ -141,7 +141,7 @@ static uv_handle_t *ioreq_spawn(struct worker_ctx *worker,
{
bool precond = (socktype == SOCK_DGRAM || socktype == SOCK_STREAM)
&& (family == AF_INET || family == AF_INET6);
- if (!kr_assume(precond)) {
+ if (kr_fails_assert(precond)) {
kr_log_verbose("[work] ioreq_spawn: pre-condition failed\n");
return NULL;
}
@@ -170,7 +170,7 @@ static uv_handle_t *ioreq_spawn(struct worker_ctx *worker,
addr = (union inaddr *)&worker->out_addr6;
}
if (addr->ip.sa_family != AF_UNSPEC) {
- if (!kr_assume(addr->ip.sa_family == family)) {
+ if (kr_fails_assert(addr->ip.sa_family == family)) {
io_free(handle);
return NULL;
}
@@ -268,12 +268,12 @@ static int subreq_key(char *dst, knot_pkt_t *pkt)
#if ENABLE_XDP
static uint8_t *alloc_wire_cb(struct kr_request *req, uint16_t *maxlen)
{
- if (!kr_assume(maxlen))
+ if (kr_fails_assert(maxlen))
return NULL;
struct request_ctx *ctx = (struct request_ctx *)req;
/* We know it's an AF_XDP socket; otherwise this CB isn't assigned. */
uv_handle_t *handle = session_get_handle(ctx->source.session);
- if (!kr_assume(handle->type == UV_POLL))
+ if (kr_fails_assert(handle->type == UV_POLL))
return NULL;
xdp_handle_data_t *xhd = handle->data;
knot_xdp_msg_t out;
@@ -285,7 +285,7 @@ static uint8_t *alloc_wire_cb(struct kr_request *req, uint16_t *maxlen)
ipv6, &out, NULL);
#endif
if (ret != KNOT_EOK) {
- (void)!kr_assume(ret == KNOT_ENOMEM);
+ kr_assert(ret == KNOT_ENOMEM);
*maxlen = 0;
return NULL;
}
@@ -297,7 +297,7 @@ static uint8_t *alloc_wire_cb(struct kr_request *req, uint16_t *maxlen)
}
static void free_wire(const struct request_ctx *ctx)
{
- if (!kr_assume(ctx->req.alloc_wire_cb == alloc_wire_cb))
+ if (kr_fails_assert(ctx->req.alloc_wire_cb == alloc_wire_cb))
return;
knot_pkt_t *ans = ctx->req.answer;
if (unlikely(ans == NULL)) /* dropped */
@@ -306,7 +306,7 @@ static void free_wire(const struct request_ctx *ctx)
return;
/* We know it's an AF_XDP socket; otherwise alloc_wire_cb isn't assigned. */
uv_handle_t *handle = session_get_handle(ctx->source.session);
- if (!kr_assume(handle->type == UV_POLL))
+ if (kr_fails_assert(handle->type == UV_POLL))
return;
xdp_handle_data_t *xhd = handle->data;
/* Freeing is done by sending an empty packet (the API won't really send it). */
@@ -315,7 +315,7 @@ static void free_wire(const struct request_ctx *ctx)
out.payload.iov_len = 0;
uint32_t sent;
int ret = knot_xdp_send(xhd->socket, &out, 1, &sent);
- (void)!kr_assume(ret == KNOT_EOK && sent == 0);
+ kr_assert(ret == KNOT_EOK && sent == 0);
kr_log_verbose("[xdp] freed unsent buffer, ret = %d\n", ret);
}
#endif
@@ -360,19 +360,19 @@ static struct request_ctx *request_create(struct worker_ctx *worker,
/* TODO Relocate pool to struct request */
ctx->worker = worker;
- if (session && !kr_assume(session_flags(session)->outgoing == false)) {
+ if (session && kr_fails_assert(session_flags(session)->outgoing == false)) {
pool_release(worker, pool.ctx);
return NULL;
}
ctx->source.session = session;
- if (!kr_assume(!!eth_to == !!eth_from)) {
+ if (kr_fails_assert(!!eth_to == !!eth_from)) {
pool_release(worker, pool.ctx);
return NULL;
}
const bool is_xdp = eth_to != NULL;
if (is_xdp) {
#if ENABLE_XDP
- if (!kr_assume(session)) {
+ if (kr_fails_assert(session)) {
pool_release(worker, pool.ctx);
return NULL;
}
@@ -380,7 +380,7 @@ static struct request_ctx *request_create(struct worker_ctx *worker,
memcpy(&ctx->source.eth_addrs[1], eth_from, sizeof(ctx->source.eth_addrs[1]));
ctx->req.alloc_wire_cb = alloc_wire_cb;
#else
- (void)!kr_assume(!EINVAL);
+ kr_assert(!EINVAL);
pool_release(worker, pool.ctx);
return NULL;
#endif
@@ -432,7 +432,7 @@ static struct request_ctx *request_create(struct worker_ctx *worker,
/** More initialization, related to the particular incoming query/packet. */
static int request_start(struct request_ctx *ctx, knot_pkt_t *query)
{
- if (!kr_assume(query && ctx))
+ if (kr_fails_assert(query && ctx))
return kr_error(EINVAL);
struct kr_request *req = &ctx->req;
@@ -489,7 +489,7 @@ static void request_free(struct request_ctx *ctx)
#if ENABLE_XDP
free_wire(ctx);
#else
- (void)!kr_assume(!EINVAL);
+ kr_assert(!EINVAL);
#endif
}
/* Return mempool to ring or free it if it's full */
@@ -527,7 +527,7 @@ static struct qr_task *qr_task_create(struct request_ctx *ctx)
task->pktbuf = pktbuf;
array_init(task->waiting);
task->refs = 0;
- (void)!kr_assume(ctx->task == NULL);
+ kr_assert(ctx->task == NULL);
ctx->task = task;
/* Make the primary reference to task. */
qr_task_ref(task);
@@ -541,7 +541,7 @@ static void qr_task_free(struct qr_task *task)
{
struct request_ctx *ctx = task->ctx;
- if (!kr_assume(ctx))
+ if (kr_fails_assert(ctx))
return;
struct worker_ctx *worker = ctx->worker;
@@ -557,13 +557,13 @@ static void qr_task_free(struct qr_task *task)
/*@ Register new qr_task within session. */
static int qr_task_register(struct qr_task *task, struct session *session)
{
- if (!kr_assume(!session_flags(session)->outgoing && session_get_handle(session)->type == UV_TCP))
+ if (kr_fails_assert(!session_flags(session)->outgoing && session_get_handle(session)->type == UV_TCP))
return kr_error(EINVAL);
session_tasklist_add(session, task);
struct request_ctx *ctx = task->ctx;
- if (!kr_assume(ctx && (ctx->source.session == NULL || ctx->source.session == session)))
+ if (kr_fails_assert(ctx && (ctx->source.session == NULL || ctx->source.session == session)))
return kr_error(EINVAL);
ctx->source.session = session;
/* Soft-limit on parallel queries, there is no "slow down" RCODE
@@ -611,17 +611,17 @@ int qr_task_on_send(struct qr_task *task, const uv_handle_t *handle, int status)
qr_task_complete(task);
}
- if (!handle || !kr_assume(handle->data))
+ if (!handle || kr_fails_assert(handle->data))
return status;
struct session* s = handle->data;
if (handle->type == UV_UDP && session_flags(s)->outgoing) {
// This should ensure that we are only dealing with our question to upstream
- if (!kr_assume(!knot_wire_get_qr(task->pktbuf->wire)))
+ if (kr_fails_assert(!knot_wire_get_qr(task->pktbuf->wire)))
return status;
// start the timer
struct kr_query *qry = array_tail(task->ctx->req.rplan.pending);
- if (!kr_assume(qry))
+ if (kr_fails_assert(qry))
return status;
size_t timeout = task->transport->timeout;
int ret = session_timer_start(s, on_udp_timeout, timeout, 0);
@@ -680,7 +680,7 @@ static int qr_task_send(struct qr_task *task, struct session *session,
struct request_ctx *ctx = task->ctx;
uv_handle_t *handle = session_get_handle(session);
- if (!kr_assume(handle && handle->data == session))
+ if (kr_fails_assert(handle && handle->data == session))
return qr_task_on_send(task, NULL, kr_error(EINVAL));
const bool is_stream = handle->type == UV_TCP;
if (!is_stream && handle->type != UV_UDP) abort();
@@ -717,7 +717,7 @@ static int qr_task_send(struct qr_task *task, struct session *session,
task->send_time = kr_now();
task->recv_time = 0; // task structure is being reused so we have to zero this out here
/* Send using given protocol */
- if (!kr_assume(!session_flags(session)->closing))
+ if (kr_fails_assert(!session_flags(session)->closing))
return qr_task_on_send(task, NULL, kr_error(EIO));
if (session_flags(session)->has_http) {
#if ENABLE_DOH2
@@ -761,7 +761,7 @@ static int qr_task_send(struct qr_task *task, struct session *session,
write_req->data = task;
ret = uv_write(write_req, (uv_stream_t *)handle, buf, 3, &on_write);
} else {
- (void)!kr_assume(false);
+ kr_assert(false);
}
if (ret == 0) {
@@ -821,7 +821,7 @@ static struct kr_query *task_get_last_pending_query(struct qr_task *task)
static int session_tls_hs_cb(struct session *session, int status)
{
- if (!kr_assume(session_flags(session)->outgoing))
+ if (kr_fails_assert(session_flags(session)->outgoing))
return kr_error(EINVAL);
struct sockaddr *peer = session_get_peer(session);
int deletion_res = worker_del_tcp_waiting(the_worker, peer);
@@ -953,12 +953,12 @@ static void on_connect(uv_connect_t *req, int status)
struct sockaddr *peer = session_get_peer(session);
free(req);
- if (!kr_assume(session_flags(session)->outgoing))
+ if (kr_fails_assert(session_flags(session)->outgoing))
return;
if (session_flags(session)->closing) {
worker_del_tcp_waiting(worker, peer);
- (void)!kr_assume(session_is_empty(session));
+ kr_assert(session_is_empty(session));
return;
}
@@ -976,7 +976,7 @@ static void on_connect(uv_connect_t *req, int status)
"is already timeouted, close\n",
peer_str ? peer_str : "");
}
- (void)!kr_assume(session_tasklist_is_empty(session));
+ kr_assert(session_tasklist_is_empty(session));
session_waitinglist_retry(session, false);
session_close(session);
return;
@@ -993,7 +993,7 @@ static void on_connect(uv_connect_t *req, int status)
"is already connected, close\n",
peer_str ? peer_str : "");
}
- (void)!kr_assume(session_tasklist_is_empty(session));
+ kr_assert(session_tasklist_is_empty(session));
session_waitinglist_retry(session, false);
session_close(session);
return;
@@ -1014,7 +1014,7 @@ static void on_connect(uv_connect_t *req, int status)
struct kr_query *qry = array_tail(task->ctx->req.rplan.pending);
qry->server_selection.error(qry, task->transport, KR_SELECTION_TCP_CONNECT_FAILED);
}
- (void)!kr_assume(session_tasklist_is_empty(session));
+ kr_assert(session_tasklist_is_empty(session));
session_waitinglist_retry(session, false);
session_close(session);
return;
@@ -1027,7 +1027,7 @@ static void on_connect(uv_connect_t *req, int status)
/* session isn't in list of waiting queries, *
* something gone wrong */
session_waitinglist_finalize(session, KR_STATE_FAIL);
- (void)!kr_assume(session_tasklist_is_empty(session));
+ kr_assert(session_tasklist_is_empty(session));
session_close(session);
return;
}
@@ -1073,7 +1073,7 @@ static void on_tcp_connect_timeout(uv_timer_t *timer)
struct worker_ctx *worker = the_worker;
kr_require(worker);
- (void)!kr_assume(session_tasklist_is_empty(session));
+ kr_assert(session_tasklist_is_empty(session));
struct sockaddr *peer = session_get_peer(session);
worker_del_tcp_waiting(worker, peer);
@@ -1098,7 +1098,7 @@ static void on_tcp_connect_timeout(uv_timer_t *timer)
worker->stats.timeout += session_waitinglist_get_len(session);
session_waitinglist_retry(session, true);
- (void)!kr_assume(session_tasklist_is_empty(session));
+ kr_assert(session_tasklist_is_empty(session));
/* uv_cancel() doesn't support uv_connect_t request,
* so that we can't cancel it.
* There still exists possibility of successful connection
@@ -1113,9 +1113,9 @@ static void on_tcp_connect_timeout(uv_timer_t *timer)
static void on_udp_timeout(uv_timer_t *timer)
{
struct session *session = timer->data;
- (void)!kr_assume(session_get_handle(session)->data == session);
- (void)!kr_assume(session_tasklist_get_len(session) == 1);
- (void)!kr_assume(session_waitinglist_is_empty(session));
+ kr_assert(session_get_handle(session)->data == session);
+ kr_assert(session_tasklist_get_len(session) == 1);
+ kr_assert(session_waitinglist_is_empty(session));
uv_timer_stop(timer);
@@ -1161,7 +1161,7 @@ static uv_handle_t *transmit(struct qr_task *task)
struct sockaddr *addr = (struct sockaddr *)choice;
struct session *session = ret->data;
struct sockaddr *peer = session_get_peer(session);
- (void)!kr_assume(peer->sa_family == AF_UNSPEC && session_flags(session)->outgoing);
+ kr_assert(peer->sa_family == AF_UNSPEC && session_flags(session)->outgoing);
memcpy(peer, addr, kr_sockaddr_len(addr));
if (qr_task_send(task, session, (struct sockaddr *)choice,
task->pktbuf) != 0) {
@@ -1192,7 +1192,7 @@ static void subreq_finalize(struct qr_task *task, const struct sockaddr *packet_
if (klen > 0) {
void *val_deleted;
int ret = trie_del(task->ctx->worker->subreq_out, key, klen, &val_deleted);
- (void)!kr_assume(ret == KNOT_EOK && val_deleted == task);
+ kr_assert(ret == KNOT_EOK && val_deleted == task);
}
/* Notify waiting tasks. */
struct kr_query *leader_qry = array_tail(task->ctx->req.rplan.pending);
@@ -1220,7 +1220,7 @@ static void subreq_finalize(struct qr_task *task, const struct sockaddr *packet_
static void subreq_lead(struct qr_task *task)
{
- if (!kr_assume(task))
+ if (kr_fails_assert(task))
return;
char key[SUBREQ_KEY_LEN];
const int klen = subreq_key(key, task->pktbuf);
@@ -1230,7 +1230,7 @@ static void subreq_lead(struct qr_task *task)
trie_get_ins(task->ctx->worker->subreq_out, key, klen);
if (unlikely(!tvp))
return; /*ENOMEM*/
- if (!kr_assume(*tvp == NULL))
+ if (kr_fails_assert(*tvp == NULL))
return;
*tvp = task;
task->leading = true;
@@ -1238,7 +1238,7 @@ static void subreq_lead(struct qr_task *task)
static bool subreq_enqueue(struct qr_task *task)
{
- if (!kr_assume(task))
+ if (kr_fails_assert(task))
return false;
char key[SUBREQ_KEY_LEN];
const int klen = subreq_key(key, task->pktbuf);
@@ -1278,7 +1278,7 @@ static int xdp_push(struct qr_task *task, const uv_handle_t *src_handle)
#if ENABLE_XDP
struct request_ctx *ctx = task->ctx;
xdp_handle_data_t *xhd = src_handle->data;
- if (!kr_assume(xhd && xhd->socket && xhd->session == ctx->source.session))
+ if (kr_fails_assert(xhd && xhd->socket && xhd->session == ctx->source.session))
return qr_task_on_send(task, src_handle, kr_error(EINVAL));
knot_xdp_msg_t msg;
@@ -1298,7 +1298,7 @@ static int xdp_push(struct qr_task *task, const uv_handle_t *src_handle)
return qr_task_on_send(task, src_handle, ret);
#else
- (void)!kr_assume(!EINVAL);
+ kr_assert(!EINVAL);
return kr_error(EINVAL);
#endif
}
@@ -1334,7 +1334,7 @@ static int qr_task_finalize(struct qr_task *task, int state)
/* Send back answer */
int ret;
const uv_handle_t *src_handle = session_get_handle(source_session);
- if (!kr_assume(src_handle->type == UV_UDP || src_handle->type == UV_TCP
+ if (kr_fails_assert(src_handle->type == UV_UDP || src_handle->type == UV_TCP
|| src_handle->type == UV_POLL)) {
ret = kr_error(EINVAL);
} else if (src_handle->type == UV_POLL) {
@@ -1342,8 +1342,10 @@ static int qr_task_finalize(struct qr_task *task, int state)
} else if (src_handle->type == UV_UDP && ENABLE_SENDMMSG) {
int fd;
ret = uv_fileno(src_handle, &fd);
- if (kr_assume(ret == 0))
+ if (ret == 0)
udp_queue_push(fd, &ctx->req, task);
+ else
+ kr_assert(false);
} else {
ret = qr_task_send(task, source_session, &ctx->source.addr.ip, ctx->req.answer);
}
@@ -1354,7 +1356,7 @@ static int qr_task_finalize(struct qr_task *task, int state)
while (!session_tasklist_is_empty(source_session)) {
struct qr_task *t = session_tasklist_del_first(source_session, false);
struct request_ctx *c = t->ctx;
- (void)!kr_assume(c->source.session == source_session);
+ kr_assert(c->source.session == source_session);
c->source.session = NULL;
/* Don't finalize them as there can be other tasks
* waiting for answer to this particular task.
@@ -1395,7 +1397,7 @@ static int udp_task_step(struct qr_task *task,
static int tcp_task_waiting_connection(struct session *session, struct qr_task *task)
{
- if (!kr_assume(session_flags(session)->outgoing && !session_flags(session)->closing))
+ if (kr_fails_assert(session_flags(session)->outgoing && !session_flags(session)->closing))
return kr_error(EINVAL);
/* Add task to the end of list of waiting tasks.
* It will be notified in on_connect() or qr_task_on_send(). */
@@ -1408,7 +1410,7 @@ static int tcp_task_waiting_connection(struct session *session, struct qr_task *
static int tcp_task_existing_connection(struct session *session, struct qr_task *task)
{
- if (!kr_assume(session_flags(session)->outgoing && !session_flags(session)->closing))
+ if (kr_fails_assert(session_flags(session)->outgoing && !session_flags(session)->closing))
return kr_error(EINVAL);
struct request_ctx *ctx = task->ctx;
struct worker_ctx *worker = ctx->worker;
@@ -1471,7 +1473,7 @@ static int tcp_task_make_connection(struct qr_task *task, const struct sockaddr
return kr_error(EINVAL);
}
struct session *session = client->data;
- if (!kr_assume(session_flags(session)->has_tls == has_tls)) {
+ if (kr_fails_assert(session_flags(session)->has_tls == has_tls)) {
tls_client_ctx_free(tls_ctx);
free(conn);
return kr_error(EINVAL);
@@ -1539,7 +1541,7 @@ static int tcp_task_make_connection(struct qr_task *task, const struct sockaddr
static int tcp_task_step(struct qr_task *task,
const struct sockaddr *packet_source, knot_pkt_t *packet)
{
- if (!kr_assume(task->pending_count == 0)) {
+ if (kr_fails_assert(task->pending_count == 0)) {
subreq_finalize(task, packet_source, packet);
return qr_task_finalize(task, KR_STATE_FAIL);
}
@@ -1601,7 +1603,7 @@ static int qr_task_step(struct qr_task *task,
/* Consume input and produce next query */
struct request_ctx *ctx = task->ctx;
- if (!kr_assume(ctx))
+ if (kr_fails_assert(ctx))
return qr_task_finalize(task, KR_STATE_FAIL);
struct kr_request *req = &ctx->req;
struct worker_ctx *worker = ctx->worker;
@@ -1665,7 +1667,7 @@ static int qr_task_step(struct qr_task *task,
case KR_TRANSPORT_TLS:
return tcp_task_step(task, packet_source, packet);
default:
- (void)!kr_assume(!EINVAL);
+ kr_assert(!EINVAL);
return kr_error(EINVAL);
}
}
@@ -1762,13 +1764,13 @@ int worker_submit(struct session *session,
(int)id);
return kr_error(ENOENT);
}
- if (!kr_assume(!session_flags(session)->closing))
+ if (kr_fails_assert(!session_flags(session)->closing))
return kr_error(EINVAL);
addr = peer;
/* Note recieve time for RTT calculation */
task->recv_time = kr_now();
}
- if (!kr_assume(!uv_is_closing(session_get_handle(session))))
+ if (kr_fails_assert(!uv_is_closing(session_get_handle(session))))
return kr_error(EINVAL);
/* Packet was successfully parsed.
@@ -1782,10 +1784,10 @@ int worker_submit(struct session *session,
static int map_add_tcp_session(map_t *map, const struct sockaddr* addr,
struct session *session)
{
- if (!kr_assume(map && addr))
+ if (kr_fails_assert(map && addr))
return kr_error(EINVAL);
const char *key = tcpsess_key(addr);
- if (!kr_assume(key && map_contains(map, key) == 0))
+ if (kr_fails_assert(key && map_contains(map, key) == 0))
return kr_error(EINVAL);
int ret = map_set(map, key, session);
return ret ? kr_error(EINVAL) : kr_ok();
@@ -1793,10 +1795,10 @@ static int map_add_tcp_session(map_t *map, const struct sockaddr* addr,
static int map_del_tcp_session(map_t *map, const struct sockaddr* addr)
{
- if (!kr_assume(map && addr))
+ if (kr_fails_assert(map && addr))
return kr_error(EINVAL);
const char *key = tcpsess_key(addr);
- if (!kr_assume(key))
+ if (kr_fails_assert(key))
return kr_error(EINVAL);
int ret = map_del(map, key);
return ret ? kr_error(ENOENT) : kr_ok();
@@ -1805,10 +1807,10 @@ static int map_del_tcp_session(map_t *map, const struct sockaddr* addr)
static struct session* map_find_tcp_session(map_t *map,
const struct sockaddr *addr)
{
- if (!kr_assume(map && addr))
+ if (kr_fails_assert(map && addr))
return NULL;
const char *key = tcpsess_key(addr);
- if (!kr_assume(key))
+ if (kr_fails_assert(key))
return NULL;
struct session* ret = map_get(map, key);
return ret;
@@ -1879,7 +1881,7 @@ int worker_end_tcp(struct session *session)
while (!session_waitinglist_is_empty(session)) {
struct qr_task *task = session_waitinglist_pop(session, false);
- (void)!kr_assume(task->refs > 1);
+ kr_assert(task->refs > 1);
session_tasklist_del(session, task);
if (session_flags(session)->outgoing) {
if (task->ctx->req.options.FORWARD) {
@@ -1894,7 +1896,7 @@ int worker_end_tcp(struct session *session)
}
qr_task_step(task, NULL, NULL);
} else {
- (void)!kr_assume(task->ctx->source.session == session);
+ kr_assert(task->ctx->source.session == session);
task->ctx->source.session = NULL;
}
worker_task_unref(task);
@@ -1910,7 +1912,7 @@ int worker_end_tcp(struct session *session)
}
qr_task_step(task, NULL, NULL);
} else {
- (void)!kr_assume(task->ctx->source.session == session);
+ kr_assert(task->ctx->source.session == session);
task->ctx->source.session = NULL;
}
worker_task_unref(task);
@@ -1968,7 +1970,7 @@ knot_pkt_t *worker_resolve_mk_pkt(const char *qname_str, uint16_t qtype, uint16_
struct qr_task *worker_resolve_start(knot_pkt_t *query, struct kr_qflags options)
{
struct worker_ctx *worker = the_worker;
- if (!kr_assume(worker && query))
+ if (kr_fails_assert(worker && query))
return NULL;
@@ -2132,7 +2134,7 @@ static inline void reclaim_mp_freelist(mp_freelist_t *list)
void worker_deinit(void)
{
struct worker_ctx *worker = the_worker;
- if (!kr_assume(worker))
+ if (kr_fails_assert(worker))
return;
if (worker->z_import != NULL) {
zi_free(worker->z_import);
@@ -2156,7 +2158,7 @@ void worker_deinit(void)
int worker_init(struct engine *engine, int worker_count)
{
- if (!kr_assume(engine && engine->L && the_worker == NULL))
+ if (kr_fails_assert(engine && engine->L && the_worker == NULL))
return kr_error(EINVAL);
kr_bindings_register(engine->L);
@@ -2197,7 +2199,7 @@ int worker_init(struct engine *engine, int worker_count)
lua_pushstring(engine->L, inst_name);
} else {
ret = asprintf(&pid_str, "%ld", (long)pid);
- (void)!kr_assume(ret > 0);
+ kr_assert(ret > 0);
lua_pushstring(engine->L, pid_str);
}
lua_setfield(engine->L, -2, "id");
diff --git a/daemon/zimport.c b/daemon/zimport.c
index 69f13cdb..c0ece8b3 100644
--- a/daemon/zimport.c
+++ b/daemon/zimport.c
@@ -236,7 +236,7 @@ static int zi_rrset_find_put(struct zone_import_ctx *z_import,
static int zi_rrset_put(struct zone_import_ctx *z_import, knot_pkt_t *pkt,
knot_rrset_t *rr)
{
- if (!kr_assume(rr && rr->type != KNOT_RRTYPE_RRSIG))
+ if (kr_fails_assert(rr && rr->type != KNOT_RRTYPE_RRSIG))
return -1;
int err = knot_pkt_put(pkt, 0, rr, 0);
if (err != KNOT_EOK) {
@@ -454,7 +454,7 @@ static void zi_zone_process(uv_timer_t* handle)
size_t ns_imported = 0;
size_t other_imported = 0;
- if (!kr_assume(z_import->worker)) {
+ if (kr_fails_assert(z_import->worker)) {
failed = 1;
goto finish;
}
@@ -728,7 +728,7 @@ int zi_zone_import(struct zone_import_ctx *z_import,
const char *zone_file, const char *origin,
uint16_t rclass, uint32_t ttl)
{
- if (!kr_assume(z_import && z_import->worker && zone_file))
+ if (kr_fails_assert(z_import && z_import->worker && zone_file))
return -1;
zs_scanner_t *s = malloc(sizeof(zs_scanner_t));
diff --git a/doc/config-debugging.rst b/doc/config-debugging.rst
index f7eb6e88..2e88d859 100644
--- a/doc/config-debugging.rst
+++ b/doc/config-debugging.rst
@@ -8,19 +8,20 @@ the crashed process. Configuring the system to collect coredump from crashed
process is out of the scope of this documentation, but some tips can be found
`here <https://lists.nic.cz/pipermail/knot-resolver-users/2019/000239.html>`_.
-Kresd uses *assumptions*, which are checks that should always pass and indicate
-some weird or unexpected state if they don't. In such cases, they show up in
-the log as errors. By default, the process recovers from those states if possible, but the
-behaviour can be changed with the following options to aid further debugging.
+Kresd uses its own mechanism for assertions. They are checks that should always
+pass and indicate some weird or unexpected state if they don't. In such cases,
+they show up in the log as errors. By default, the process recovers from those
+states if possible, but the behaviour can be changed with the following options
+to aid further debugging.
-.. envvar:: debugging.assumption_abort = false|true
+.. envvar:: debugging.assertion_abort = false|true
:return: boolean (default: false in meson's release mode, true otherwise)
- Allow the process to be aborted in case it encounters a failed assumption.
+ Allow the process to be aborted in case it encounters a failed assertion.
(Some critical conditions always lead to abortion, regardless of settings.)
-.. envvar:: debugging.assumption_fork = milliseconds
+.. envvar:: debugging.assertion_fork = milliseconds
:return: int (default: 5 minutes in meson's release mode, 0 otherwise)
diff --git a/lib/cache/api.c b/lib/cache/api.c
index af6aa7e3..c1acce2c 100644
--- a/lib/cache/api.c
+++ b/lib/cache/api.c
@@ -109,7 +109,7 @@ static int assert_right_version(struct kr_cache *cache)
int kr_cache_open(struct kr_cache *cache, const struct kr_cdb_api *api, struct kr_cdb_opts *opts, knot_mm_t *mm)
{
- if (!kr_assume(cache))
+ if (kr_fails_assert(cache))
return kr_error(EINVAL);
memset(cache, 0, sizeof(*cache));
/* Open cache */
@@ -133,11 +133,11 @@ int kr_cache_open(struct kr_cache *cache, const struct kr_cdb_api *api, struct k
}
char *fpath = kr_absolutize_path(opts->path, "data.mdb");
- if (kr_assume(fpath)) {
- kr_cache_emergency_file_to_remove = fpath;
- } else {
+ if (kr_fails_assert(fpath)) {
/* non-critical, but still */
fpath = "<ENOMEM>";
+ } else {
+ kr_cache_emergency_file_to_remove = fpath;
}
if (ret == 0 && opts->maxsize) {
@@ -297,11 +297,11 @@ static bool check_rrtype(uint16_t type, const struct kr_query *qry/*logging*/)
/** Like key_exact_type() but omits a couple checks not holding for pkt cache. */
knot_db_val_t key_exact_type_maypkt(struct key *k, uint16_t type)
{
- if (!kr_assume(check_rrtype(type, NULL)))
+ if (kr_fails_assert(check_rrtype(type, NULL)))
return (knot_db_val_t){ NULL, 0 };
switch (type) {
case KNOT_RRTYPE_RRSIG: /* no RRSIG query caching, at least for now */
- (void)!kr_assume(false);
+ kr_assert(false);
return (knot_db_val_t){ NULL, 0 };
/* xNAME lumped into NS. */
case KNOT_RRTYPE_CNAME:
@@ -393,7 +393,7 @@ int cache_stash(kr_layer_t *ctx, knot_pkt_t *pkt)
/* Stash individual records. */
ranked_rr_array_t *selected[] = kr_request_selected(req);
trie_t *nsec_pmap = trie_create(&req->pool);
- if (!kr_assume(nsec_pmap))
+ if (kr_fails_assert(nsec_pmap))
goto finally;
for (int psec = KNOT_ANSWER; psec <= KNOT_ADDITIONAL; ++psec) {
ranked_rr_array_t *arr = selected[psec];
@@ -442,7 +442,7 @@ finally:
/** Preliminary checks before stash_rrset(). Don't call if returns <= 0. */
static int stash_rrset_precond(const knot_rrset_t *rr, const struct kr_query *qry/*logs*/)
{
- if (!kr_assume(rr && rr->rclass == KNOT_CLASS_IN))
+ if (kr_fails_assert(rr && rr->rclass == KNOT_CLASS_IN))
return kr_error(EINVAL);
if (!check_rrtype(rr->type, qry))
return kr_ok();
@@ -456,7 +456,7 @@ static int stash_rrset_precond(const knot_rrset_t *rr, const struct kr_query *qr
static bool rrset_has_min_range_or_weird(const knot_rrset_t *rr, const struct kr_query *qry)
{
if (rr->rrs.count != 1) {
- (void)!kr_assume(rr->rrs.count > 0);
+ kr_assert(rr->rrs.count > 0);
if (rr->type == KNOT_RRTYPE_NSEC || rr->type == KNOT_RRTYPE_NSEC3
|| rr->rrs.count == 0) {
return true; /*< weird */
@@ -514,7 +514,7 @@ static ssize_t stash_rrset(struct kr_cache *cache, const struct kr_query *qry,
VERBOSE_MSG(qry, "=> skipping NSEC3 with too many iterations\n");
return kr_ok();
}
- if (!kr_assume(cache && stash_rrset_precond(rr, qry) > 0))
+ if (kr_fails_assert(cache && stash_rrset_precond(rr, qry) > 0))
return kr_error(EINVAL);
int ret = kr_ok();
@@ -543,7 +543,7 @@ static ssize_t stash_rrset(struct kr_cache *cache, const struct kr_query *qry,
/* Skip any NSEC*s that aren't validated or are suspicious. */
if (!kr_rank_test(rank, KR_RANK_SECURE) || rr->rrs.count != 1)
goto return_needs_pkt;
- if (!kr_assume(rr_sigs && rr_sigs->rrs.count && rr_sigs->rrs.rdata)) {
+ if (kr_fails_assert(rr_sigs && rr_sigs->rrs.count && rr_sigs->rrs.rdata)) {
ret = kr_error(EINVAL);
goto return_needs_pkt;
}
@@ -554,7 +554,7 @@ static ssize_t stash_rrset(struct kr_cache *cache, const struct kr_query *qry,
void **npp = NULL;
if (nsec_pmap) {
npp = trie_get_ins(nsec_pmap, (const char *)signer, signer_size);
- if (!kr_assume(npp))
+ if (kr_fails_assert(npp))
return kr_error(ENOMEM);
}
if (rr->type == KNOT_RRTYPE_NSEC) {
@@ -576,14 +576,14 @@ static ssize_t stash_rrset(struct kr_cache *cache, const struct kr_query *qry,
key = key_NSEC3(k, encloser, nsec_p_mkHash(rdata->data));
if (npp && !*npp) {
*npp = mm_alloc(&qry->request->pool, np_dlen);
- if (!kr_assume(*npp))
+ if (kr_fails_assert(*npp))
break;
memcpy(*npp, rdata->data, np_dlen);
}
break;
default:
ret = kr_dname_lf(k->buf, encloser, wild_labels);
- if (!kr_assume(ret == 0))
+ if (kr_fails_assert(ret == 0))
goto return_needs_pkt;
key = key_exact_type(k, rr->type);
}
@@ -591,7 +591,7 @@ static ssize_t stash_rrset(struct kr_cache *cache, const struct kr_query *qry,
/* Compute in-cache size for the new data. */
const knot_rdataset_t *rds_sigs = rr_sigs ? &rr_sigs->rrs : NULL;
const int rr_ssize = rdataset_dematerialize_size(&rr->rrs);
- if (!kr_assume(rr_ssize == to_even(rr_ssize)))
+ if (kr_fails_assert(rr_ssize == to_even(rr_ssize)))
return kr_error(EINVAL);
knot_db_val_t val_new_entry = {
.data = NULL,
@@ -603,7 +603,7 @@ static ssize_t stash_rrset(struct kr_cache *cache, const struct kr_query *qry,
ret = entry_h_splice(&val_new_entry, rank, key, k->type, rr->type,
rr->owner, qry, cache, timestamp);
if (ret) return kr_ok(); /* some aren't really errors */
- if (!kr_assume(val_new_entry.data))
+ if (kr_fails_assert(val_new_entry.data))
return kr_error(EFAULT);
const uint32_t ttl = rr->ttl;
@@ -617,7 +617,7 @@ static ssize_t stash_rrset(struct kr_cache *cache, const struct kr_query *qry,
eh->rank = rank;
rdataset_dematerialize(&rr->rrs, eh->data);
rdataset_dematerialize(rds_sigs, eh->data + rr_ssize);
- if (!kr_assume(entry_h_consistent_E(val_new_entry, rr->type)))
+ if (kr_fails_assert(entry_h_consistent_E(val_new_entry, rr->type)))
return kr_error(EINVAL);
#if 0 /* Occasionally useful when debugging some kinds of changes. */
@@ -626,7 +626,7 @@ static ssize_t stash_rrset(struct kr_cache *cache, const struct kr_query *qry,
knot_db_val_t val = { NULL, 0 };
ret = cache_op(cache, read, &key, &val, 1);
if (ret != kr_error(ENOENT)) { // ENOENT might happen in some edge case, I guess
- (void)!kr_assume(!ret);
+ kr_assert(!ret);
entry_list_t el;
entry_list_parse(val, el);
}
@@ -675,7 +675,7 @@ static int stash_rrarray_entry(ranked_rr_array_t *arr, int arr_i,
/* TODO: ATM we assume that some properties are the same
* for all RRSIGs in the set (esp. label count). */
ranked_rr_array_entry_t *e = arr->at[j];
- if (!kr_assume(!e->in_progress))
+ if (kr_fails_assert(!e->in_progress))
return kr_error(EINVAL);
bool ok = e->qry_uid == qry->uid && !e->cached
&& e->rr->type == KNOT_RRTYPE_RRSIG
@@ -998,7 +998,7 @@ int kr_cache_check_health(struct kr_cache *cache, int interval)
}
cache->health_timer->data = cache;
}
- (void)!kr_assume(cache->health_timer->data);
+ kr_assert(cache->health_timer->data);
return kr_error(uv_timer_start(cache->health_timer, health_timer_cb, interval, interval));
}
diff --git a/lib/cache/cdb_lmdb.c b/lib/cache/cdb_lmdb.c
index 53c9f8ab..427ec456 100644
--- a/lib/cache/cdb_lmdb.c
+++ b/lib/cache/cdb_lmdb.c
@@ -144,7 +144,7 @@ static void clear_stale_readers(struct lmdb_env *env)
*/
static int txn_get_noresize(struct lmdb_env *env, unsigned int flag, MDB_txn **txn)
{
- if (!kr_assume(!env->txn.rw && (!env->txn.ro || !env->txn.ro_active)))
+ if (kr_fails_assert(!env->txn.rw && (!env->txn.ro || !env->txn.ro_active)))
return kr_error(1);
int attempts = 0;
int ret;
@@ -174,7 +174,7 @@ retry:
/** Obtain a transaction. (they're cached in env->txn) */
static int txn_get(struct lmdb_env *env, MDB_txn **txn, bool rdonly)
{
- if (!kr_assume(env && txn))
+ if (kr_fails_assert(env && txn))
return kr_error(EINVAL);
if (env->txn.rw) {
/* Reuse the *open* RW txn even if only reading is requested.
@@ -194,7 +194,7 @@ static int txn_get(struct lmdb_env *env, MDB_txn **txn, bool rdonly)
int ret = txn_get_noresize(env, 0/*RW*/, &env->txn.rw);
if (ret == MDB_SUCCESS) {
*txn = env->txn.rw;
- (void)!kr_assume(*txn);
+ kr_assert(*txn);
}
return lmdb_error(ret);
}
@@ -211,7 +211,7 @@ static int txn_get(struct lmdb_env *env, MDB_txn **txn, bool rdonly)
}
env->txn.ro_active = true;
*txn = env->txn.ro;
- (void)!kr_assume(*txn);
+ kr_assert(*txn);
return kr_ok();
}
@@ -234,7 +234,7 @@ static int cdb_commit(kr_cdb_pt db, struct kr_cdb_stats *stats)
/** Obtain a read-only cursor (and a read-only transaction). */
static int txn_curs_get(struct lmdb_env *env, MDB_cursor **curs, struct kr_cdb_stats *stats)
{
- if (!kr_assume(env && curs))
+ if (kr_fails_assert(env && curs))
return kr_error(EINVAL);
if (env->txn.ro_curs_active)
goto success;
@@ -255,10 +255,10 @@ static int txn_curs_get(struct lmdb_env *env, MDB_cursor **curs, struct kr_cdb_s
if (ret) return lmdb_error(ret);
env->txn.ro_curs_active = true;
success:
- (void)!kr_assume(env->txn.ro_curs_active && env->txn.ro && env->txn.ro_active
+ kr_assert(env->txn.ro_curs_active && env->txn.ro && env->txn.ro_active
&& !env->txn.rw);
*curs = env->txn.ro_curs;
- (void)!kr_assume(*curs);
+ kr_assert(*curs);
return kr_ok();
}
@@ -291,7 +291,7 @@ static void txn_abort(struct lmdb_env *env)
/*! \brief Close the database. */
static void cdb_close_env(struct lmdb_env *env, struct kr_cdb_stats *stats)
{
- if (!kr_assume(env && env->env))
+ if (kr_fails_assert(env && env->env))
return;
/* Get rid of any transactions. */
@@ -506,7 +506,7 @@ static int cdb_check_health(kr_cdb_pt db, struct kr_cdb_stats *stats)
* The lock is auto-released by OS in case the process finishes in any way (file remains). */
static int lockfile_get(const char *path)
{
- if (!kr_assume(path))
+ if (kr_fails_assert(path))
return kr_error(EINVAL);
const int fd = open(path, O_CREAT|O_RDWR, S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP);
if (fd < 0)
@@ -532,7 +532,7 @@ static int lockfile_get(const char *path)
/** Release and remove lockfile created by lockfile_get(). Return kr_error(). */
static int lockfile_release(int fd)
{
- if (!kr_assume(fd > 0)) // fd == 0 is surely a mistake, in our case at least
+ if (kr_fails_assert(fd > 0)) // fd == 0 is surely a mistake, in our case at least
return kr_error(EINVAL);
if (close(fd)) {
return kr_error(errno);
@@ -783,7 +783,7 @@ static int cdb_match(kr_cdb_pt db, struct kr_cdb_stats *stats,
static int cdb_read_leq(kr_cdb_pt db, struct kr_cdb_stats *stats,
knot_db_val_t *key, knot_db_val_t *val)
{
- if (!kr_assume(db && key && key->data && val))
+ if (kr_fails_assert(db && key && key->data && val))
return kr_error(EINVAL);
struct lmdb_env *env = db2env(db);
MDB_cursor *curs = NULL;
diff --git a/lib/cache/entry_list.c b/lib/cache/entry_list.c
index a0bdf53c..85432133 100644
--- a/lib/cache/entry_list.c
+++ b/lib/cache/entry_list.c
@@ -15,7 +15,7 @@ static int entry_h_len(knot_db_val_t val);
void entry_list_memcpy(struct entry_apex *ea, entry_list_t list)
{
- if (!kr_assume(ea))
+ if (kr_fails_assert(ea))
return;
memset(ea, 0, offsetof(struct entry_apex, data));
ea->has_ns = list[EL_NS ].len;
@@ -39,7 +39,7 @@ void entry_list_memcpy(struct entry_apex *ea, entry_list_t list)
int entry_list_parse(const knot_db_val_t val, entry_list_t list)
{
- if (!kr_assume(val.data && val.len && list))
+ if (kr_fails_assert(val.data && val.len && list))
return kr_error(EINVAL);
/* Parse the apex itself (nsec parameters). */
const struct entry_apex *ea = entry_apex_consistent(val);
@@ -82,23 +82,23 @@ int entry_list_parse(const knot_db_val_t val, entry_list_t list)
case EL_CNAME: has_type = ea->has_cname; break;
case EL_DNAME: has_type = ea->has_dname; break;
default:
- (void)!kr_assume(!EINVAL);
+ kr_assert(!EINVAL);
return kr_error(EINVAL); /* something very bad */
}
if (!has_type) {
list[i].len = 0;
continue;
}
- if (!kr_assume(it < it_bound))
+ if (kr_fails_assert(it < it_bound))
return kr_error(EILSEQ);
const int len = entry_h_len(
(knot_db_val_t){ .data = (void *)it, .len = it_bound - it });
- if (!kr_assume(len >= 0))
+ if (kr_fails_assert(len >= 0))
return kr_error(len);
list[i].len = len;
it += to_even(len);
}
- if (!kr_assume(it == it_bound)) /* better not use it; might be "damaged" */
+ if (kr_fails_assert(it == it_bound)) /* better not use it; might be "damaged" */
return kr_error(EILSEQ);
return kr_ok();
}
@@ -118,7 +118,7 @@ static int entry_h_len(const knot_db_val_t val)
int sets = 2;
while (sets-- > 0) {
d += KR_CACHE_RR_COUNT_SIZE + rdataset_dematerialized_size(d, NULL);
- if (!kr_assume(d <= data_bound))
+ if (kr_fails_assert(d <= data_bound))
return kr_error(EILSEQ);
}
} else { /* A "packet" (opaque ATM). */
@@ -127,7 +127,7 @@ static int entry_h_len(const knot_db_val_t val)
memcpy(&len, d, sizeof(len));
d += 2 + to_even(len);
}
- if (!kr_assume(d <= data_bound))
+ if (kr_fails_assert(d <= data_bound))
return kr_error(EILSEQ);
return d - (uint8_t *)val.data;
}
@@ -218,7 +218,7 @@ int entry_h_splice(
const struct kr_query *qry, struct kr_cache *cache, uint32_t timestamp)
{
//TODO: another review, perhaps incuding the API
- if (!kr_assume(val_new_entry && val_new_entry->len > 0))
+ if (kr_fails_assert(val_new_entry && val_new_entry->len > 0))
return kr_error(EINVAL);
int i_type;
diff --git a/lib/cache/entry_pkt.c b/lib/cache/entry_pkt.c
index 3c40c3b1..4a139dc2 100644
--- a/lib/cache/entry_pkt.c
+++ b/lib/cache/entry_pkt.c
@@ -81,7 +81,7 @@ void stash_pkt(const knot_pkt_t *pkt, const struct kr_query *qry,
/* All bad cases should be filtered above,
* at least the same way as pktcache in kresd 1.5.x. */
kr_rank_set(&rank, KR_RANK_SECURE);
- } else (void)!kr_assume(false);
+ } else kr_assert(false);
}
const uint16_t pkt_type = knot_pkt_qtype(pkt);
@@ -101,7 +101,7 @@ void stash_pkt(const knot_pkt_t *pkt, const struct kr_query *qry,
int ret = kr_dname_lf(k->buf, owner, false);
if (ret) {
/* A server might (incorrectly) reply with QDCOUNT=0. */
- (void)!kr_assume(owner == NULL);
+ kr_assert(owner == NULL);
return;
}
key = key_exact_type_maypkt(k, pkt_type);
@@ -116,7 +116,7 @@ void stash_pkt(const knot_pkt_t *pkt, const struct kr_query *qry,
struct kr_cache *cache = &req->ctx->cache;
ret = entry_h_splice(&val_new_entry, rank, key, k->type, pkt_type,
owner, qry, cache, qry->timestamp.tv_sec);
- if (ret || !kr_assume(val_new_entry.data)) return; /* some aren't really errors */
+ if (ret || kr_fails_assert(val_new_entry.data)) return; /* some aren't really errors */
struct entry_h *eh = val_new_entry.data;
memset(eh, 0, offsetof(struct entry_h, data));
eh->time = qry->timestamp.tv_sec;
@@ -168,13 +168,13 @@ int answer_from_pkt(kr_layer_t *ctx, knot_pkt_t *pkt, uint16_t type,
return kr_error(ENOENT);
/* LATER(opt): try harder to avoid stashing such packets */
}
- if (!kr_assume(ret == KNOT_EOK))
+ if (kr_fails_assert(ret == KNOT_EOK))
return kr_error(ret);
knot_wire_set_id(pkt->wire, msgid);
/* Add rank into the additional field. */
for (size_t i = 0; i < pkt->rrset_count; ++i) {
- (void)!kr_assume(!pkt->rr[i].additional);
+ kr_assert(!pkt->rr[i].additional);
uint8_t *rr_rank = mm_alloc(&pkt->mm, sizeof(*rr_rank));
if (!rr_rank) {
return kr_error(ENOMEM);
diff --git a/lib/cache/entry_rr.c b/lib/cache/entry_rr.c
index 07e9a857..ac82db32 100644
--- a/lib/cache/entry_rr.c
+++ b/lib/cache/entry_rr.c
@@ -34,10 +34,10 @@ void rdataset_dematerialize(const knot_rdataset_t *rds, uint8_t * restrict data)
static int rdataset_materialize(knot_rdataset_t * restrict rds, const uint8_t * const data,
const uint8_t *data_bound, knot_mm_t *pool)
{
- if (!kr_assume(rds && data && data_bound && data_bound > data && !rds->rdata
+ if (kr_fails_assert(rds && data && data_bound && data_bound > data && !rds->rdata
/*&& !((size_t)data & 1)*/))
return kr_error(EINVAL);
- (void)!kr_assume(pool); /* not required, but that's our current usage; guard leaks */
+ kr_assert(pool); /* not required, but that's our current usage; guard leaks */
const uint8_t *d = data; /* iterates over the cache data */
/* First sum up the sizes for wire format length. */
/* TODO: we might overrun here already, but we need to trust cache anyway...*/
@@ -77,15 +77,15 @@ int entry2answer(struct answer *ans, int id,
const bool not_ok = ans->rrsets[id].set.rr || ans->rrsets[id].sig_rds.rdata
|| (type == KNOT_RRTYPE_NSEC && ans->nsec_p.raw)
|| (type == KNOT_RRTYPE_NSEC3 && !ans->nsec_p.raw);
- if (!kr_assume(!not_ok))
+ if (kr_fails_assert(!not_ok))
return kr_error(EINVAL);
/* Materialize the base RRset. */
knot_rrset_t *rr = ans->rrsets[id].set.rr
= knot_rrset_new(owner, type, KNOT_CLASS_IN, new_ttl, ans->mm);
- if (!kr_assume(rr))
+ if (kr_fails_assert(rr))
return kr_error(ENOMEM);
int ret = rdataset_materialize(&rr->rrs, eh->data, eh_bound, ans->mm);
- if (!kr_assume(ret >= 0)) goto fail;
+ if (kr_fails_assert(ret >= 0)) goto fail;
size_t data_off = ret;
ans->rrsets[id].set.rank = eh->rank;
ans->rrsets[id].set.expiring = is_expiring(eh->ttl, new_ttl);
@@ -94,10 +94,10 @@ int entry2answer(struct answer *ans, int id,
if (want_rrsigs) {
ret = rdataset_materialize(&ans->rrsets[id].sig_rds, eh->data + data_off,
eh_bound, ans->mm);
- if (!kr_assume(ret >= 0)) goto fail;
+ if (kr_fails_assert(ret >= 0)) goto fail;
/* Sanity check: we consumed exactly all data. */
int unused_bytes = eh_bound - (uint8_t *)eh->data - data_off - ret;
- if (!kr_assume(unused_bytes == 0)) {
+ if (kr_fails_assert(unused_bytes == 0)) {
kr_log_error("[cach] entry2answer ERROR: unused bytes: %d\n",
unused_bytes);
ret = kr_error(EILSEQ);
diff --git a/lib/cache/impl.h b/lib/cache/impl.h
index 4b934a88..cddd0b74 100644
--- a/lib/cache/impl.h
+++ b/lib/cache/impl.h
@@ -152,7 +152,7 @@ static inline knot_db_val_t key_exact_type(struct key *k, uint16_t type)
/* Sanity check: forbidden types represented in other way(s). */
case KNOT_RRTYPE_NSEC:
case KNOT_RRTYPE_NSEC3:
- (void)!kr_assume(false);
+ kr_assert(false);
return (knot_db_val_t){ NULL, 0 };
}
return key_exact_type_maypkt(k, type);
@@ -199,7 +199,7 @@ static inline uint16_t EL2RRTYPE(enum EL i)
case EL_NS: return KNOT_RRTYPE_NS;
case EL_CNAME: return KNOT_RRTYPE_CNAME;
case EL_DNAME: return KNOT_RRTYPE_DNAME;
- default: (void)!kr_assume(false); return 0;
+ default: kr_assert(false); return 0;
}
}
@@ -342,7 +342,7 @@ enum {
/** Materialize RRset + RRSIGs into ans->rrsets[id].
* LATER(optim.): it's slightly wasteful that we allocate knot_rrset_t for the packet
*
- * \return error code. They are all bad conditions and "guarded" by kr_assume().
+ * \return error code. They are all bad conditions and "guarded" by kresd's assertions.
*/
int entry2answer(struct answer *ans, int id,
const struct entry_h *eh, const uint8_t *eh_bound,
diff --git a/lib/cache/knot_pkt.c b/lib/cache/knot_pkt.c
index b7b97c91..f0723c22 100644
--- a/lib/cache/knot_pkt.c
+++ b/lib/cache/knot_pkt.c
@@ -64,7 +64,7 @@ int pkt_append(knot_pkt_t *pkt, const struct answer_rrset *rrset, uint8_t rank)
/* write both sets */
const knot_rdataset_t *rdss[2] = { &rrset->set.rr->rrs, &rrset->sig_rds };
for (int i = 0; i < rrset_cnt; ++i) {
- if (!kr_assume(rdss[i]->count))
+ if (kr_fails_assert(rdss[i]->count))
return kr_error(EINVAL);
/* allocate rank */
uint8_t *rr_rank = mm_alloc(&pkt->mm, sizeof(*rr_rank));
diff --git a/lib/cache/nsec1.c b/lib/cache/nsec1.c
index 7b177449..32e969eb 100644
--- a/lib/cache/nsec1.c
+++ b/lib/cache/nsec1.c
@@ -18,16 +18,16 @@ static int dname_wire_reconstruct(knot_dname_t *buf, const struct key *k,
{
/* Reconstruct from key: first the ending, then zone name. */
int ret = knot_dname_lf2wire(buf, kwz.len, kwz.data);
- if (!kr_assume(ret >= 0)) {
+ if (kr_fails_assert(ret >= 0)) {
VERBOSE_MSG(NULL, "=> NSEC: LF2wire ret = %d\n", ret);
return ret;
}
/* The last written byte is the zero label for root -> overwrite. */
knot_dname_t *zone_start = buf + ret - 1;
- if (!kr_assume(*zone_start == '\0'))
+ if (kr_fails_assert(*zone_start == '\0'))
return kr_error(EFAULT);
ret = knot_dname_to_wire(zone_start, k->zname, KNOT_DNAME_MAXLEN - kwz.len);
- if (!kr_assume(ret == k->zlf_len + 1))
+ if (kr_fails_assert(ret == k->zlf_len + 1))
return ret < 0 ? ret : kr_error(EILSEQ);
return kr_ok();
}
@@ -40,13 +40,13 @@ knot_db_val_t key_NSEC1(struct key *k, const knot_dname_t *name, bool add_wildca
int ret;
const bool ok = k && name
&& !(ret = kr_dname_lf(k->buf, name, add_wildcard));
- if (!kr_assume(ok))
+ if (kr_fails_assert(ok))
return (knot_db_val_t){ NULL, 0 };
uint8_t *begin = k->buf + 1 + k->zlf_len; /* one byte after zone's zero */
uint8_t *end = k->buf + 1 + k->buf[0]; /* we don't use the final zero in key,
* but move it anyway */
- if (!kr_assume(end >= begin))
+ if (kr_fails_assert(end >= begin))
return (knot_db_val_t){ NULL, 0 };
int key_len;
if (end > begin) {
@@ -129,16 +129,16 @@ static const char * find_leq_NSEC1(struct kr_cache *cache, const struct kr_query
{
/* Do the cache operation. */
const size_t nwz_off = key_nwz_off(k);
- if (!kr_assume(key.data && key.len >= nwz_off))
+ if (kr_fails_assert(key.data && key.len >= nwz_off))
return "range search ERROR";
knot_db_val_t key_nsec = key;
knot_db_val_t val = { NULL, 0 };
int ret = cache_op(cache, read_leq, &key_nsec, &val);
if (ret < 0) {
- if (kr_assume(ret == kr_error(ENOENT))) {
- return "range search miss";
- } else {
+ if (kr_fails_assert(ret == kr_error(ENOENT))) {
return "range search ERROR";
+ } else {
+ return "range search miss";
}
}
if (value) {
@@ -196,7 +196,7 @@ static const char * find_leq_NSEC1(struct kr_cache *cache, const struct kr_query
memcpy(&next_len, next + offsetof(knot_rdata_t, len), sizeof(next_len));
next_data = next + offsetof(knot_rdata_t, data);
}
- if (!kr_assume(KR_CACHE_RR_COUNT_SIZE == 2 && get_uint16(eh->data) != 0)) {
+ if (kr_fails_assert(KR_CACHE_RR_COUNT_SIZE == 2 && get_uint16(eh->data) != 0)) {
return "ERROR"; /* TODO: more checks? */
}
/*
@@ -207,7 +207,7 @@ static const char * find_leq_NSEC1(struct kr_cache *cache, const struct kr_query
*/
knot_dname_t ch_buf[KNOT_DNAME_MAXLEN];
knot_dname_t *chs = kwz_high ? kwz_high->data : ch_buf;
- if (!kr_assume(chs))
+ if (kr_fails_assert(chs))
return "EINVAL";
{
@@ -223,20 +223,20 @@ static const char * find_leq_NSEC1(struct kr_cache *cache, const struct kr_query
ret = kr_dname_lf(chs, lower_buf, false);
}
- if (!kr_assume(ret == 0))
+ if (kr_fails_assert(ret == 0))
return "ERROR";
knot_db_val_t kwz_hi = { /* skip the zone name */
.data = chs + 1 + k->zlf_len,
.len = chs[0] - k->zlf_len,
};
- if (!kr_assume((ssize_t)(kwz_hi.len) >= 0))
+ if (kr_fails_assert((ssize_t)(kwz_hi.len) >= 0))
return "ERROR";
/* 2. do the actual range check. */
const knot_db_val_t kwz_sname = {
.data = (void *)/*const-cast*/(k->buf + 1 + nwz_off),
.len = k->buf[0] - k->zlf_len,
};
- if (!kr_assume((ssize_t)(kwz_sname.len) >= 0))
+ if (kr_fails_assert((ssize_t)(kwz_sname.len) >= 0))
return "ERROR";
bool covers = /* we know for sure that the low end is before kwz_sname */
3 == kwz_between((knot_db_val_t){ NULL, 0 }, kwz_sname, kwz_hi);
@@ -259,7 +259,7 @@ int nsec1_encloser(struct key *k, struct answer *ans,
/* Basic sanity check. */
const bool ok = k && ans && clencl_labels && cover_low_kwz && cover_hi_kwz
&& qry && cache;
- if (!kr_assume(ok))
+ if (kr_fails_assert(ok))
return kr_error(EINVAL);
/* Find a previous-or-equal name+NSEC in cache covering the QNAME,
@@ -297,7 +297,7 @@ int nsec1_encloser(struct key *k, struct answer *ans,
const knot_rrset_t *nsec_rr = ans->rrsets[AR_NSEC].set.rr;
const uint8_t *bm = knot_nsec_bitmap(nsec_rr->rrs.rdata);
uint16_t bm_size = knot_nsec_bitmap_len(nsec_rr->rrs.rdata);
- if (!kr_assume(bm))
+ if (kr_fails_assert(bm))
return kr_error(EFAULT);
if (exact_match) {
@@ -339,7 +339,7 @@ int nsec1_encloser(struct key *k, struct answer *ans,
*/
knot_dname_t next[KNOT_DNAME_MAXLEN];
int ret = knot_dname_to_wire(next, knot_nsec_next(nsec_rr->rrs.rdata), sizeof(next));
- if (!kr_assume(ret >= 0))
+ if (kr_fails_assert(ret >= 0))
return kr_error(ret);
knot_dname_to_lower(next);
*clencl_labels = MAX(
@@ -380,14 +380,14 @@ int nsec1_src_synth(struct key *k, struct answer *ans, const knot_dname_t *clenc
/* Construct key for the source of synthesis. */
knot_db_val_t key = key_NSEC1(k, clencl_name, true);
const size_t nwz_off = key_nwz_off(k);
- if (!kr_assume(key.data && key.len >= nwz_off))
+ if (kr_fails_assert(key.data && key.len >= nwz_off))
return kr_error(1);
/* Check if our sname-covering NSEC also covers/matches SS. */
knot_db_val_t kwz = {
.data = (uint8_t *)key.data + nwz_off,
.len = key.len - nwz_off,
};
- if (!kr_assume((ssize_t)(kwz.len) >= 0))
+ if (kr_fails_assert((ssize_t)(kwz.len) >= 0))
return kr_error(EINVAL);
const int cmp = kwz_between(cover_low_kwz, kwz, cover_hi_kwz);
if (nonexistence_ok(cmp, ans->rrsets[AR_NSEC].set.rr)) {
@@ -421,7 +421,7 @@ int nsec1_src_synth(struct key *k, struct answer *ans, const knot_dname_t *clenc
nsec_rr = ans->rrsets[AR_WILD].set.rr;
}
- if (!kr_assume(nsec_rr))
+ if (kr_fails_assert(nsec_rr))
return kr_error(EFAULT);
const uint32_t new_ttl_log =
kr_verbose_status ? nsec_rr->ttl : -1;
@@ -429,7 +429,7 @@ int nsec1_src_synth(struct key *k, struct answer *ans, const knot_dname_t *clenc
uint16_t bm_size = knot_nsec_bitmap_len(nsec_rr->rrs.rdata);
int ret;
struct answer_rrset * const arw = &ans->rrsets[AR_WILD];
- if (!kr_assume(bm)) {
+ if (kr_fails_assert(bm)) {
ret = kr_error(EFAULT);
goto clean_wild;
}
diff --git a/lib/cache/nsec3.c b/lib/cache/nsec3.c
index cee5dabb..928b680c 100644
--- a/lib/cache/nsec3.c
+++ b/lib/cache/nsec3.c
@@ -21,7 +21,7 @@ static const knot_db_val_t VAL_EMPTY = { NULL, 0 };
static knot_db_val_t key_NSEC3_common(struct key *k, const knot_dname_t *zname,
const nsec_p_hash_t nsec_p_hash)
{
- if (!kr_assume(k && zname && !kr_dname_lf(k->buf, zname, false)))
+ if (kr_fails_assert(k && zname && !kr_dname_lf(k->buf, zname, false)))
return VAL_EMPTY;
/* CACHE_KEY_DEF: key == zone's dname_lf + '\0' + '3' + nsec_p hash (4B)
@@ -84,7 +84,7 @@ static knot_db_val_t key_NSEC3_name(struct key *k, const knot_dname_t *name,
.data = (uint8_t *)/*const-cast*/name,
};
- if (!kr_assume(nsec_p->libknot.iterations <= KR_NSEC3_MAX_ITERATIONS)) {
+ if (kr_fails_assert(nsec_p->libknot.iterations <= KR_NSEC3_MAX_ITERATIONS)) {
/* This is mainly defensive; it shouldn't happen thanks to downgrades. */
return VAL_EMPTY;
}
@@ -95,14 +95,14 @@ static knot_db_val_t key_NSEC3_name(struct key *k, const knot_dname_t *name,
};
int ret = dnssec_nsec3_hash(&dname, &nsec_p->libknot, &hash);
if (ret != DNSSEC_EOK) return VAL_EMPTY;
- if (!kr_assume(hash.size == NSEC3_HASH_LEN))
+ if (kr_fails_assert(hash.size == NSEC3_HASH_LEN))
return VAL_EMPTY;
#else
dnssec_binary_t hash = { .size = 0, .data = NULL };
int ret = dnssec_nsec3_hash(&dname, &nsec_p->libknot, &hash);
if (ret != DNSSEC_EOK) return VAL_EMPTY;
- if (!kr_assume(hash.size == NSEC3_HASH_LEN && hash.data))
+ if (kr_fails_assert(hash.size == NSEC3_HASH_LEN && hash.data))
return VAL_EMPTY;
memcpy(knot_db_val_bound(val), hash.data, NSEC3_HASH_LEN);
free(hash.data);
@@ -136,7 +136,7 @@ static const char * find_leq_NSEC3(struct kr_cache *cache, const struct kr_query
{
/* Do the cache operation. */
const size_t hash_off = key_nsec3_hash_off(k);
- if (!kr_assume(key.data && key.len >= hash_off))
+ if (kr_fails_assert(key.data && key.len >= hash_off))
return "range search ERROR";
knot_db_val_t key_found = key;
knot_db_val_t val = { NULL, 0 };
@@ -145,10 +145,10 @@ static const char * find_leq_NSEC3(struct kr_cache *cache, const struct kr_query
* would probably be slightly more efficient with LMDB,
* but the code complexity would grow considerably. */
if (ret < 0) {
- if (kr_assume(ret == kr_error(ENOENT))) {
- return "range search miss";
- } else {
+ if (kr_fails_assert(ret == kr_error(ENOENT))) {
return "range search ERROR";
+ } else {
+ return "range search miss";
}
}
if (value) {
@@ -199,7 +199,7 @@ static const char * find_leq_NSEC3(struct kr_cache *cache, const struct kr_query
}
/* We know it starts before sname, so let's check the other end.
* A. find the next hash and check its length. */
- if (!kr_assume(KR_CACHE_RR_COUNT_SIZE == 2 && get_uint16(eh->data) != 0))
+ if (kr_fails_assert(KR_CACHE_RR_COUNT_SIZE == 2 && get_uint16(eh->data) != 0))
return "ERROR"; /* TODO: more checks? Also, `next` computation is kinda messy. */
const uint8_t *hash_next = nsec_p_raw + nsec_p_len
+ sizeof(uint8_t) /* hash length from rfc5155 */;
@@ -227,7 +227,7 @@ static void key_NSEC3_hash2text(const knot_db_val_t key, char *text)
/* CACHE_KEY_DEF ^^ */
int len = base32hex_encode(hash_raw, NSEC3_HASH_LEN, (uint8_t *)text,
NSEC3_HASH_TXT_LEN);
- (void)!kr_assume(len == NSEC3_HASH_TXT_LEN);
+ kr_assert(len == NSEC3_HASH_TXT_LEN);
text[NSEC3_HASH_TXT_LEN] = '\0';
}
@@ -237,7 +237,7 @@ static int dname_wire_reconstruct(knot_dname_t *buf, const knot_dname_t *zname,
const uint8_t *hash_raw)
{
int len = base32hex_encode(hash_raw, NSEC3_HASH_LEN, buf + 1, NSEC3_HASH_TXT_LEN);
- if (!kr_assume(len == NSEC3_HASH_TXT_LEN))
+ if (kr_fails_assert(len == NSEC3_HASH_TXT_LEN))
return kr_error(EINVAL);
buf[0] = len;
int ret = knot_dname_to_wire(buf + 1 + len, zname, KNOT_DNAME_MAXLEN - 1 - len);
@@ -259,7 +259,7 @@ int nsec3_encloser(struct key *k, struct answer *ans,
/* Basic sanity check. */
const bool ok = k && k->zname && ans && clencl_labels
&& qry && cache;
- if (!kr_assume(ok))
+ if (kr_fails_assert(ok))
return kr_error(EINVAL);
/*** Find the closest encloser - cycle: name starting at sname,
@@ -351,7 +351,7 @@ int nsec3_encloser(struct key *k, struct answer *ans,
const knot_rrset_t *nsec_rr = ans->rrsets[ans_id].set.rr;
const uint8_t *bm = knot_nsec3_bitmap(nsec_rr->rrs.rdata);
uint16_t bm_size = knot_nsec3_bitmap_len(nsec_rr->rrs.rdata);
- if (!kr_assume(bm))
+ if (kr_fails_assert(bm))
return kr_error(EFAULT);
if (name_labels == sname_labels) {
if (kr_nsec_bitmap_nodata_check(bm, bm_size, qry->stype,
@@ -369,7 +369,7 @@ int nsec3_encloser(struct key *k, struct answer *ans,
} /* else */
- if (!kr_assume(name_labels + 1 == last_nxproven_labels))
+ if (kr_fails_assert(name_labels + 1 == last_nxproven_labels))
return kr_error(EINVAL);
if (kr_nsec_children_in_zone_check(bm, bm_size) != 0) {
VERBOSE_MSG(qry,
@@ -461,7 +461,7 @@ int nsec3_src_synth(struct key *k, struct answer *ans, const knot_dname_t *clenc
/* The wildcard exists. Find if it's NODATA - check type bitmap. */
const uint8_t *bm = knot_nsec3_bitmap(nsec_rr->rrs.rdata);
uint16_t bm_size = knot_nsec3_bitmap_len(nsec_rr->rrs.rdata);
- if (!kr_assume(bm))
+ if (kr_fails_assert(bm))
return kr_error(EFAULT);
if (kr_nsec_bitmap_nodata_check(bm, bm_size, qry->stype, nsec_rr->owner) == 0) {
/* NODATA proven; just need to add SOA+RRSIG later */
diff --git a/lib/cache/peek.c b/lib/cache/peek.c
index 33b9177d..5ea1c4d6 100644
--- a/lib/cache/peek.c
+++ b/lib/cache/peek.c
@@ -59,12 +59,12 @@ static void nsec_p_cleanup(struct nsec_p *nsec_p)
* \return error code, e.g. kr_error(ESTALE) */
static int nsec_p_ttl(knot_db_val_t entry, const uint32_t timestamp, int32_t *new_ttl)
{
- if (!kr_assume(entry.data))
+ if (kr_fails_assert(entry.data))
return kr_error(EINVAL);
uint32_t stamp;
if (!entry.len)
return kr_error(ENOENT);
- if (!kr_assume(entry.len >= sizeof(stamp)))
+ if (kr_fails_assert(entry.len >= sizeof(stamp)))
return kr_error(EILSEQ);
memcpy(&stamp, entry.data, sizeof(stamp));
int32_t newttl = stamp - timestamp;
@@ -111,7 +111,7 @@ int peek_nosync(kr_layer_t *ctx, knot_pkt_t *pkt)
struct key k_storage, *k = &k_storage;
int ret = kr_dname_lf(k->buf, qry->sname, false);
- if (!kr_assume(ret == 0))
+ if (kr_fails_assert(ret == 0))
return ctx->state;
const uint8_t lowest_rank = get_lowest_rank(qry, qry->sname, qry->stype);
@@ -129,7 +129,7 @@ int peek_nosync(kr_layer_t *ctx, knot_pkt_t *pkt)
}
if (!ret) {
return KR_STATE_DONE;
- } else if (!kr_assume(ret == kr_error(ENOENT))) {
+ } else if (kr_fails_assert(ret == kr_error(ENOENT))) {
VERBOSE_MSG(qry, "=> exact hit error: %d %s\n", ret, kr_strerror(ret));
return ctx->state;
}
@@ -137,19 +137,19 @@ int peek_nosync(kr_layer_t *ctx, knot_pkt_t *pkt)
/**** 1b. otherwise, find the longest prefix zone/xNAME (with OK time+rank). [...] */
k->zname = qry->sname;
ret = kr_dname_lf(k->buf, k->zname, false); /* LATER(optim.): probably remove */
- if (!kr_assume(ret == 0))
+ if (kr_fails_assert(ret == 0))
return ctx->state;
entry_list_t el;
ret = closest_NS(cache, k, el, qry, false, qry->stype == KNOT_RRTYPE_DS);
if (ret) {
- if (!kr_assume(ret == kr_error(ENOENT)) || !el[0].len) {
+ if (kr_fails_assert(ret == kr_error(ENOENT)) || !el[0].len) {
return ctx->state;
}
}
switch (k->type) {
case KNOT_RRTYPE_CNAME: {
const knot_db_val_t v = el[EL_CNAME];
- if (!kr_assume(v.data && v.len))
+ if (kr_fails_assert(v.data && v.len))
return ctx->state;
const int32_t new_ttl = get_new_ttl(v.data, qry, qry->sname,
KNOT_RRTYPE_CNAME, qry->timestamp.tv_sec);
@@ -159,7 +159,7 @@ int peek_nosync(kr_layer_t *ctx, knot_pkt_t *pkt)
}
case KNOT_RRTYPE_DNAME: {
const knot_db_val_t v = el[EL_DNAME];
- if (!kr_assume(v.data && v.len))
+ if (kr_fails_assert(v.data && v.len))
return ctx->state;
/* TTL: for simplicity, we just ask for TTL of the generated CNAME. */
const int32_t new_ttl = get_new_ttl(v.data, qry, qry->sname,
@@ -183,7 +183,7 @@ int peek_nosync(kr_layer_t *ctx, knot_pkt_t *pkt)
if (!eh) { /* fall back to root hints? */
ret = kr_zonecut_set_sbelt(req->ctx, &qry->zone_cut);
if (ret) return ctx->state;
- (void)!kr_assume(!qry->zone_cut.parent);
+ kr_assert(!qry->zone_cut.parent);
//VERBOSE_MSG(qry, "=> using root hints\n");
//qry->flags.AWAIT_CUT = false;
@@ -248,7 +248,7 @@ int peek_nosync(kr_layer_t *ctx, knot_pkt_t *pkt)
ret = cache_op(cache, read, &key, &val, 1);
const struct entry_h *eh;
if (ret || !(eh = entry_h_consistent_E(val, KNOT_RRTYPE_SOA))) {
- (void)!kr_assume(ret); /* only want to catch `eh` failures */
+ kr_assert(ret); /* only want to catch `eh` failures */
VERBOSE_MSG(qry, "=> SOA missed\n");
return ctx->state;
}
@@ -278,7 +278,7 @@ int peek_nosync(kr_layer_t *ctx, knot_pkt_t *pkt)
real_rcode = KNOT_RCODE_NXDOMAIN;
break;
default:
- (void)!kr_assume(false);
+ kr_assert(false);
case 0: /* i.e. nothing was found */
/* LATER(optim.): zone cut? */
VERBOSE_MSG(qry, "=> cache miss\n");
@@ -288,7 +288,7 @@ int peek_nosync(kr_layer_t *ctx, knot_pkt_t *pkt)
if (pkt_renew(pkt, qry->sname, qry->stype)
|| knot_pkt_begin(pkt, KNOT_ANSWER)
) {
- (void)!kr_assume(false);
+ kr_assert(false);
return ctx->state;
}
knot_wire_set_rcode(pkt->wire, real_rcode);
@@ -299,7 +299,7 @@ int peek_nosync(kr_layer_t *ctx, knot_pkt_t *pkt)
if (!ans.rrsets[i].set.rr) continue;
expiring = expiring || ans.rrsets[i].set.expiring;
ret = pkt_append(pkt, &ans.rrsets[i], ans.rrsets[i].set.rank);
- if (!kr_assume(ret == 0))
+ if (kr_fails_assert(ret == 0))
return ctx->state;
}
@@ -346,7 +346,7 @@ static int peek_encloser(
}
/* We should have either a match or a cover at this point. */
- if (!kr_assume(ans->rcode == PKT_NODATA || ans->rcode == PKT_NXDOMAIN))
+ if (kr_fails_assert(ans->rcode == PKT_NODATA || ans->rcode == PKT_NXDOMAIN))
return kr_error(EINVAL);
const bool ncloser_covered = ans->rcode == PKT_NXDOMAIN;
@@ -362,13 +362,13 @@ static int peek_encloser(
int ret = nsec1_src_synth(k, ans, clencl_name,
cover_low_kwz, cover_hi_kwz, qry, cache);
if (ret == AR_SOA) return 0;
- (void)!kr_assume(ret <= 0);
+ kr_assert(ret <= 0);
if (ret) return ret;
} else if (ncloser_covered && ans->nsec_p.raw && !clencl_is_tentative) {
int ret = nsec3_src_synth(k, ans, clencl_name, qry, cache);
if (ret == AR_SOA) return 0;
- (void)!kr_assume(ret <= 0);
+ kr_assert(ret <= 0);
if (ret) return ret;
} /* else (!ncloser_covered) so no wildcard checks needed,
@@ -380,7 +380,7 @@ static int peek_encloser(
return kr_ok(); /* decrease indentation */
/* Construct key for exact qry->stype + source of synthesis. */
int ret = kr_dname_lf(k->buf, clencl_name, true);
- if (!kr_assume(ret == 0))
+ if (kr_fails_assert(ret == 0))
return kr_error(ret);
const uint16_t types[] = { qry->stype, KNOT_RRTYPE_CNAME };
for (int i = 0; i < (2 - (qry->stype == KNOT_RRTYPE_CNAME)); ++i) {
@@ -388,7 +388,7 @@ static int peek_encloser(
lowest_rank, qry, cache);
if (ret == kr_ok()) {
return kr_ok();
- } else if (!kr_assume(ret == kr_error(ENOENT) || ret == kr_error(ESTALE))) {
+ } else if (kr_fails_assert(ret == kr_error(ENOENT) || ret == kr_error(ESTALE))) {
return kr_error(ret);
}
/* else continue */
@@ -413,7 +413,7 @@ static void answer_simple_qflags(struct kr_qflags *qf, const struct entry_h *eh,
}
#define CHECK_RET(ret) do { \
- if (!kr_assume((ret) >= 0)) return kr_error((ret)); \
+ if (kr_fails_assert((ret) >= 0)) return kr_error((ret)); \
} while (false)
static int answer_simple_hit(kr_layer_t *ctx, knot_pkt_t *pkt, uint16_t type,
@@ -513,7 +513,7 @@ static int found_exact_hit(kr_layer_t *ctx, knot_pkt_t *pkt, knot_db_val_t val,
int ret = entry_h_seek(&val, qry->stype);
if (ret) return ret;
const struct entry_h *eh = entry_h_consistent_E(val, qry->stype);
- if (!kr_assume(eh))
+ if (kr_fails_assert(eh))
return kr_error(ENOENT);
// LATER: recovery in case of error, perhaps via removing the entry?
// LATER(optim): pehaps optimize the zone cut search
@@ -556,7 +556,7 @@ static int try_wild(struct key *k, struct answer *ans, const knot_dname_t *clenc
ret = entry_h_seek(&val, type);
}
if (ret) {
- if (!kr_assume(ret == kr_error(ENOENT)))
+ if (kr_fails_assert(ret == kr_error(ENOENT)))
VERBOSE_MSG(qry, "=> wildcard: hit error %d %s\n",
ret, strerror(abs(ret)));
WITH_VERBOSE(qry) {
@@ -569,7 +569,7 @@ static int try_wild(struct key *k, struct answer *ans, const knot_dname_t *clenc
}
/* Check if the record is OK. */
const struct entry_h *eh = entry_h_consistent_E(val, type);
- if (!kr_assume(eh))
+ if (kr_fails_assert(eh))
return kr_error(ret);
// LATER: recovery in case of error, perhaps via removing the entry?
int32_t new_ttl = get_new_ttl(eh, qry, qry->sname, type, qry->timestamp.tv_sec);
@@ -593,7 +593,7 @@ static int try_wild(struct key *k, struct answer *ans, const knot_dname_t *clenc
int kr_cache_closest_apex(struct kr_cache *cache, const knot_dname_t *name, bool is_DS,
knot_dname_t ** apex)
{
- if (!kr_assume(cache && cache->db && name && apex && *apex == NULL))
+ if (kr_fails_assert(cache && cache->db && name && apex && *apex == NULL))
return kr_error(EINVAL);
struct key k_storage, *k = &k_storage;
int ret = kr_dname_lf(k->buf, name, false);
@@ -651,7 +651,7 @@ static int closest_NS(struct kr_cache *cache, struct key *k, entry_list_t el,
knot_db_val_t val;
int ret = cache_op(cache, read, &key, &val, 1);
if (ret == kr_error(ENOENT)) goto next_label;
- if (!kr_assume(ret == 0)) {
+ if (kr_fails_assert(ret == 0)) {
if (need_zero) memset(el, 0, sizeof(entry_list_t));
return kr_error(ret);
}
@@ -659,7 +659,7 @@ static int closest_NS(struct kr_cache *cache, struct key *k, entry_list_t el,
/* Check consistency, find any type;
* using `goto` for shortening by another label. */
ret = entry_list_parse(val, el);
- if (!kr_assume(ret == 0)) // do something about it?
+ if (kr_fails_assert(ret == 0)) // do something about it?
goto next_label;
need_zero = false;
/* More types are possible; try in order.
@@ -697,7 +697,7 @@ static int closest_NS(struct kr_cache *cache, struct key *k, entry_list_t el,
/* We miss root NS in cache, but let's at least assume it exists. */
k->type = KNOT_RRTYPE_NS;
k->zlf_len = zlf_len;
- (void)!kr_assume(zlf_len == 0);
+ kr_assert(zlf_len == 0);
if (need_zero) memset(el, 0, sizeof(entry_list_t));
return kr_error(ENOENT);
}
@@ -741,7 +741,7 @@ static int check_NS_entry(struct key *k, const knot_db_val_t entry, const int i,
type = EL2RRTYPE(i);
/* Find the entry for the type, check positivity, TTL */
const struct entry_h *eh = entry_h_consistent_E(entry, type);
- if (!kr_assume(eh)) {
+ if (kr_fails_assert(eh)) {
VERBOSE_MSG(qry, "=> EH not consistent\n");
return kr_error(EILSEQ);
}
diff --git a/lib/cookies/alg_sha.c b/lib/cookies/alg_sha.c
index 5c5da495..43b19eef 100644
--- a/lib/cookies/alg_sha.c
+++ b/lib/cookies/alg_sha.c
@@ -21,7 +21,7 @@
static inline void update_hash(struct hmac_sha256_ctx *ctx,
const struct sockaddr *sa)
{
- if (!kr_assume(ctx && sa))
+ if (kr_fails_assert(ctx && sa))
return;
int addr_len = kr_inaddr_len(sa);
diff --git a/lib/cookies/helper.c b/lib/cookies/helper.c
index d7c04f17..8ef21517 100644
--- a/lib/cookies/helper.c
+++ b/lib/cookies/helper.c
@@ -15,7 +15,7 @@
static const uint8_t *peek_and_check_cc(kr_cookie_lru_t *cache, const void *sa,
const uint8_t *cc, uint16_t cc_len)
{
- if (!kr_assume(cache && sa && cc && cc_len))
+ if (kr_fails_assert(cache && sa && cc && cc_len))
return NULL;
const uint8_t *cached_opt = kr_cookie_lru_get(cache, sa);
@@ -38,7 +38,7 @@ static const uint8_t *peek_and_check_cc(kr_cookie_lru_t *cache, const void *sa,
static int opt_rr_put_cookie(knot_rrset_t *opt_rr, uint8_t *data,
uint16_t data_len, knot_mm_t *mm)
{
- if (!kr_assume(opt_rr && data && data_len > 0))
+ if (kr_fails_assert(opt_rr && data && data_len > 0))
return kr_error(EINVAL);
const uint8_t *cc = NULL, *sc = NULL;
@@ -48,7 +48,7 @@ static int opt_rr_put_cookie(knot_rrset_t *opt_rr, uint8_t *data,
&sc, &sc_len);
if (ret != KNOT_EOK)
return kr_error(EINVAL);
- if (!kr_assume(data_len == cc_len + sc_len))
+ if (kr_fails_assert(data_len == cc_len + sc_len))
return kr_error(EINVAL);
uint16_t cookies_size = data_len;
@@ -58,14 +58,14 @@ static int opt_rr_put_cookie(knot_rrset_t *opt_rr, uint8_t *data,
cookies_size, &cookies_data, mm);
if (ret != KNOT_EOK)
return kr_error(EINVAL);
- if (!kr_assume(cookies_data))
+ if (kr_fails_assert(cookies_data))
return kr_error(EINVAL);
cookies_size = knot_edns_opt_cookie_write(cc, cc_len, sc, sc_len,
cookies_data, cookies_size);
if (cookies_size == 0)
return kr_error(EINVAL);
- if (!kr_assume(cookies_size == data_len))
+ if (kr_fails_assert(cookies_size == data_len))
return kr_error(EINVAL);
return kr_ok();
@@ -76,7 +76,7 @@ static int opt_rr_put_cookie(knot_rrset_t *opt_rr, uint8_t *data,
*/
static int opt_rr_put_cookie_opt(knot_rrset_t *opt_rr, uint8_t *option, knot_mm_t *mm)
{
- if (!kr_assume(opt_rr && option))
+ if (kr_fails_assert(opt_rr && option))
return kr_error(EINVAL);
uint16_t opt_code = knot_edns_opt_get_code(option);
@@ -121,7 +121,7 @@ int kr_request_put_cookie(const struct kr_cookie_comp *clnt_comp,
const struct knot_cc_alg *cc_alg = kr_cc_alg_get(clnt_comp->alg_id);
if (!cc_alg)
return kr_error(EINVAL);
- if (!kr_assume(cc_alg->gen_func))
+ if (kr_fails_assert(cc_alg->gen_func))
return kr_error(EINVAL);
cc_len = cc_alg->gen_func(&input, cc, cc_len);
if (cc_len != KNOT_OPT_COOKIE_CLNT)
@@ -185,7 +185,7 @@ int kr_answer_write_cookie(struct knot_sc_input *sc_input,
&pkt->mm);
if (ret != KNOT_EOK)
return kr_error(ENOMEM);
- if (!kr_assume(cookie))
+ if (kr_fails_assert(cookie))
return kr_error(EFAULT);
/*
diff --git a/lib/dnssec.c b/lib/dnssec.c
index 24e69c1d..4acc9717 100644
--- a/lib/dnssec.c
+++ b/lib/dnssec.c
@@ -277,7 +277,7 @@ static int kr_rrset_validate_with_key(kr_rrset_validation_ctx_t *vctx,
bool kr_ds_algo_support(const knot_rrset_t *ta)
{
- if (!kr_assume(ta && ta->type == KNOT_RRTYPE_DS && ta->rclass == KNOT_CLASS_IN))
+ if (kr_fails_assert(ta && ta->type == KNOT_RRTYPE_DS && ta->rclass == KNOT_CLASS_IN))
return false;
/* Check if at least one DS has a usable algorithm pair. */
knot_rdata_t *rdata_i = ta->rrs.rdata;
@@ -298,7 +298,7 @@ int kr_dnskeys_trusted(kr_rrset_validation_ctx_t *vctx, const knot_rrset_t *ta)
const bool ok = pkt && keys && ta && ta->rrs.count && ta->rrs.rdata
&& ta->type == KNOT_RRTYPE_DS;
- if (!kr_assume(ok))
+ if (kr_fails_assert(ok))
return kr_error(EINVAL);
/* RFC4035 5.2, bullet 1
@@ -325,7 +325,7 @@ int kr_dnskeys_trusted(kr_rrset_validation_ctx_t *vctx, const knot_rrset_t *ta)
continue;
}
kr_dnssec_key_free(&key);
- (void)!kr_assume(vctx->result == 0);
+ kr_assert(vctx->result == 0);
return vctx->result;
}
@@ -435,7 +435,7 @@ int kr_dnssec_key_from_rdata(struct dseckey **key, const knot_dname_t *kown, con
void kr_dnssec_key_free(struct dseckey **key)
{
- if (!kr_assume(key))
+ if (kr_fails_assert(key))
return;
dnssec_key_free((dnssec_key_t *) *key);
@@ -448,7 +448,7 @@ int kr_dnssec_matches_name_and_type(const ranked_rr_array_t *rrs, uint32_t qry_u
int ret = kr_error(ENOENT);
for (size_t i = 0; i < rrs->len; ++i) {
const ranked_rr_array_entry_t *entry = rrs->at[i];
- if (!kr_assume(!entry->in_progress))
+ if (kr_fails_assert(!entry->in_progress))
return kr_error(EINVAL);
const knot_rrset_t *nsec = entry->rr;
if (entry->qry_uid != qry_uid || entry->yielded) {
diff --git a/lib/dnssec/nsec.c b/lib/dnssec/nsec.c
index ae4a4046..578cec58 100644
--- a/lib/dnssec/nsec.c
+++ b/lib/dnssec/nsec.c
@@ -101,7 +101,7 @@ static int dname_cmp(const knot_dname_t *d1, const knot_dname_t *d2)
*/
static int nsec_covers(const knot_rrset_t *nsec, const knot_dname_t *sname)
{
- if (!kr_assume(nsec && sname))
+ if (kr_fails_assert(nsec && sname))
return kr_error(EINVAL);
if (dname_cmp(sname, nsec->owner) <= 0)
return abs(ENOENT); /* 'sname' before 'owner', so can't be covered */
@@ -110,7 +110,7 @@ static int nsec_covers(const knot_rrset_t *nsec, const knot_dname_t *sname)
/* We have to lower-case it with libknot >= 2.7; see also RFC 6840 5.1. */
knot_dname_t next[KNOT_DNAME_MAXLEN];
int ret = knot_dname_to_wire(next, knot_nsec_next(nsec->rrs.rdata), sizeof(next));
- if (!kr_assume(ret >= 0))
+ if (kr_fails_assert(ret >= 0))
return kr_error(ret);
knot_dname_to_lower(next);
@@ -172,7 +172,7 @@ static int nsec_covers(const knot_rrset_t *nsec, const knot_dname_t *sname)
static int name_error_response_check_rr(int *flags, const knot_rrset_t *nsec,
const knot_dname_t *name)
{
- if (!kr_assume(flags && nsec && name))
+ if (kr_fails_assert(flags && nsec && name))
return kr_error(EINVAL);
if (nsec_covers(nsec, name) == 0)
@@ -232,7 +232,7 @@ int kr_nsec_name_error_response_check(const knot_pkt_t *pkt, knot_section_t sect
*/
static int coverign_rrsig_labels(const knot_rrset_t *nsec, const knot_pktsection_t *sec)
{
- if (!kr_assume(nsec && sec))
+ if (kr_fails_assert(nsec && sec))
return kr_error(EINVAL);
int ret = kr_error(ENOENT);
@@ -318,7 +318,7 @@ int kr_nsec_bitmap_nodata_check(const uint8_t *bm, uint16_t bm_size, uint16_t ty
static int no_data_response_check_rrtype(int *flags, const knot_rrset_t *nsec,
uint16_t type)
{
- if (!kr_assume(flags && nsec))
+ if (kr_fails_assert(flags && nsec))
return kr_error(EINVAL);
const uint8_t *bm = knot_nsec_bitmap(nsec->rrs.rdata);
@@ -339,7 +339,7 @@ static int no_data_response_check_rrtype(int *flags, const knot_rrset_t *nsec,
static int no_data_wildcard_existence_check(int *flags, const knot_rrset_t *nsec,
const knot_pktsection_t *sec)
{
- if (!kr_assume(flags && nsec && sec))
+ if (kr_fails_assert(flags && nsec && sec))
return kr_error(EINVAL);
int rrsig_labels = coverign_rrsig_labels(nsec, sec);
@@ -536,7 +536,7 @@ int kr_nsec_matches_name_and_type(const knot_rrset_t *nsec,
/* It's not secure enough to just check a single bit for (some) other types,
* but we don't (currently) only use this API for NS. See RFC 6840 sec. 4.
*/
- if (!kr_assume(type == KNOT_RRTYPE_NS && nsec && name))
+ if (kr_fails_assert(type == KNOT_RRTYPE_NS && nsec && name))
return kr_error(EINVAL);
if (!knot_dname_is_equal(nsec->owner, name))
return kr_error(ENOENT);
diff --git a/lib/dnssec/nsec3.c b/lib/dnssec/nsec3.c
index 04e178ac..5bd9f097 100644
--- a/lib/dnssec/nsec3.c
+++ b/lib/dnssec/nsec3.c
@@ -34,11 +34,11 @@
*/
static int nsec3_parameters(dnssec_nsec3_params_t *params, const knot_rrset_t *nsec3)
{
- if (!kr_assume(params && nsec3))
+ if (kr_fails_assert(params && nsec3))
return kr_error(EINVAL);
const knot_rdata_t *rr = knot_rdataset_at(&nsec3->rrs, 0);
- if (!kr_assume(rr))
+ if (kr_fails_assert(rr))
return kr_error(EINVAL);
/* Every NSEC3 RR contains data from NSEC3PARAMS. */
@@ -67,11 +67,11 @@ static int nsec3_parameters(dnssec_nsec3_params_t *params, const knot_rrset_t *n
static int hash_name(dnssec_binary_t *hash, const dnssec_nsec3_params_t *params,
const knot_dname_t *name)
{
- if (!kr_assume(hash && params))
+ if (kr_fails_assert(hash && params))
return kr_error(EINVAL);
if (!name)
return kr_error(EINVAL);
- if (!kr_assume(params->iterations <= KR_NSEC3_MAX_ITERATIONS)) {
+ if (kr_fails_assert(params->iterations <= KR_NSEC3_MAX_ITERATIONS)) {
/* This if is mainly defensive; it shouldn't happen. */
return kr_error(EINVAL);
}
@@ -98,7 +98,7 @@ static int hash_name(dnssec_binary_t *hash, const dnssec_nsec3_params_t *params,
*/
static int read_owner_hash(dnssec_binary_t *hash, size_t max_hash_size, const knot_rrset_t *nsec3)
{
- if (!kr_assume(hash && nsec3 && hash->data))
+ if (kr_fails_assert(hash && nsec3 && hash->data))
return kr_error(EINVAL);
int32_t ret = base32hex_decode(nsec3->owner + 1, nsec3->owner[0], hash->data, max_hash_size);
@@ -121,7 +121,7 @@ static int read_owner_hash(dnssec_binary_t *hash, size_t max_hash_size, const kn
static int closest_encloser_match(int *flags, const knot_rrset_t *nsec3,
const knot_dname_t *name, unsigned *skipped)
{
- if (!kr_assume(flags && nsec3 && name && skipped))
+ if (kr_fails_assert(flags && nsec3 && name && skipped))
return kr_error(EINVAL);
uint8_t hash_data[MAX_HASH_BYTES] = {0, };
@@ -185,7 +185,7 @@ fail:
*/
static int covers_name(int *flags, const knot_rrset_t *nsec3, const knot_dname_t *name)
{
- if (!kr_assume(flags && nsec3 && name))
+ if (kr_fails_assert(flags && nsec3 && name))
return kr_error(EINVAL);
uint8_t hash_data[MAX_HASH_BYTES] = { 0, };
@@ -286,7 +286,7 @@ static bool has_optout(const knot_rrset_t *nsec3)
*/
static int matches_name(const knot_rrset_t *nsec3, const knot_dname_t *name)
{
- if (!kr_assume(nsec3 && name))
+ if (kr_fails_assert(nsec3 && name))
return kr_error(EINVAL);
uint8_t hash_data[MAX_HASH_BYTES] = { 0, };
@@ -331,7 +331,7 @@ fail:
*/
static int prepend_asterisk(uint8_t *tgt, size_t maxlen, const knot_dname_t *name)
{
- if (!kr_assume(maxlen >= 3))
+ if (kr_fails_assert(maxlen >= 3))
return kr_error(EINVAL);
memcpy(tgt, "\1*", 3);
return knot_dname_to_wire(tgt + 2, name, maxlen - 2);
@@ -390,7 +390,7 @@ static int closest_encloser_proof(const knot_pkt_t *pkt,
--skipped;
next_closer = sname;
for (unsigned j = 0; j < skipped; ++j) {
- if (!kr_assume(next_closer[0]))
+ if (kr_fails_assert(next_closer[0]))
return kr_error(EINVAL);
next_closer = knot_wire_next_label(next_closer, NULL);
}
@@ -555,7 +555,7 @@ int kr_nsec3_wildcard_answer_response_check(const knot_pkt_t *pkt, knot_section_
/* Compute the next closer name. */
for (int i = 0; i < trim_to_next; ++i) {
- if (!kr_assume(sname[0]))
+ if (kr_fails_assert(sname[0]))
return kr_error(EINVAL);
sname = knot_wire_next_label(sname, NULL);
}
@@ -603,7 +603,7 @@ int kr_nsec3_no_data(const knot_pkt_t *pkt, knot_section_t section_id,
if (ret != 0)
return ret;
- if (!kr_assume(encloser_name && covering_next_nsec3))
+ if (kr_fails_assert(encloser_name && covering_next_nsec3))
return kr_error(EFAULT);
ret = matches_closest_encloser_wildcard(pkt, section_id,
encloser_name, stype);
@@ -713,7 +713,7 @@ int kr_nsec3_matches_name_and_type(const knot_rrset_t *nsec3,
/* It's not secure enough to just check a single bit for (some) other types,
* but we don't (currently) only use this API for NS. See RFC 6840 sec. 4.
*/
- if (!kr_assume(type == KNOT_RRTYPE_NS))
+ if (kr_fails_assert(type == KNOT_RRTYPE_NS))
return kr_error(EINVAL);
int ret = matches_name(nsec3, name);
if (ret)
diff --git a/lib/dnssec/signature.c b/lib/dnssec/signature.c
index caf00bde..5eba8177 100644
--- a/lib/dnssec/signature.c
+++ b/lib/dnssec/signature.c
@@ -42,7 +42,7 @@ fail:
int kr_authenticate_referral(const knot_rrset_t *ref, const dnssec_key_t *key)
{
- if (!kr_assume(ref && key))
+ if (kr_fails_assert(ref && key))
return kr_error(EINVAL);
if (ref->type != KNOT_RRTYPE_DS)
return kr_error(EINVAL);
@@ -73,7 +73,7 @@ int kr_authenticate_referral(const knot_rrset_t *ref, const dnssec_key_t *key)
*/
static int adjust_wire_ttl(uint8_t *wire, size_t wire_size, uint32_t new_ttl)
{
- if (!kr_assume(wire))
+ if (kr_fails_assert(wire))
return kr_error(EINVAL);
static_assert(sizeof(uint16_t) == 2, "uint16_t must be exactly 2 bytes");
static_assert(sizeof(uint32_t) == 4, "uint32_t must be exactly 4 bytes");
@@ -97,7 +97,7 @@ static int adjust_wire_ttl(uint8_t *wire, size_t wire_size, uint32_t new_ttl)
rdlen = ntohs(rdlen);
i += sizeof(uint16_t) + rdlen;
- if (!kr_assume(i <= wire_size))
+ if (kr_fails_assert(i <= wire_size))
return kr_error(EINVAL);
}
@@ -117,7 +117,7 @@ static int adjust_wire_ttl(uint8_t *wire, size_t wire_size, uint32_t new_ttl)
#define RRSIG_RDATA_SIGNER_OFFSET 18
static int sign_ctx_add_self(dnssec_sign_ctx_t *ctx, const uint8_t *rdata)
{
- if (!kr_assume(ctx && rdata))
+ if (kr_fails_assert(ctx && rdata))
return kr_error(EINVAL);
int result;
@@ -190,10 +190,10 @@ static int sign_ctx_add_records(dnssec_sign_ctx_t *ctx, const knot_rrset_t *cove
for (uint16_t i = 0; i < covered->rrs.count; ++i) {
/* RR(i) = name | type | class | OrigTTL | RDATA length | RDATA */
for (int j = 0; j < trim_labels; ++j) {
- if (!kr_assume(beginp[0]))
+ if (kr_fails_assert(beginp[0]))
return kr_error(EINVAL);
beginp = (uint8_t *) knot_wire_next_label(beginp, NULL);
- if (!kr_assume(beginp))
+ if (kr_fails_assert(beginp))
return kr_error(EFAULT);
}
*(--beginp) = '*';
diff --git a/lib/generic/lru.c b/lib/generic/lru.c
index 67345219..5ad791f7 100644
--- a/lib/generic/lru.c
+++ b/lib/generic/lru.c
@@ -58,7 +58,7 @@ static void * item_val(const struct lru *lru, struct lru_item *it)
/** @internal Free each item. */
KR_EXPORT void lru_free_items_impl(struct lru *lru)
{
- if (!kr_assume(lru))
+ if (kr_fails_assert(lru))
return;
for (size_t i = 0; i < (1 << (size_t)lru->log_groups); ++i) {
lru_group_t *g = &lru->groups[i];
@@ -70,7 +70,7 @@ KR_EXPORT void lru_free_items_impl(struct lru *lru)
/** @internal See lru_apply. */
KR_EXPORT void lru_apply_impl(struct lru *lru, lru_apply_fun f, void *baton)
{
- if (!kr_assume(lru && f))
+ if (kr_fails_assert(lru && f))
return;
for (size_t i = 0; i < (1 << (size_t)lru->log_groups); ++i) {
lru_group_t *g = &lru->groups[i];
@@ -88,7 +88,7 @@ KR_EXPORT void lru_apply_impl(struct lru *lru, lru_apply_fun f, void *baton)
g->hashes[j] = 0;
break;
default:
- (void)!kr_assume(ret == LRU_APPLY_DO_NOTHING);
+ kr_assert(ret == LRU_APPLY_DO_NOTHING);
}
}
}
@@ -98,7 +98,7 @@ KR_EXPORT void lru_apply_impl(struct lru *lru, lru_apply_fun f, void *baton)
KR_EXPORT struct lru * lru_create_impl(uint max_slots, uint val_alignment,
knot_mm_t *mm_array, knot_mm_t *mm)
{
- if (!kr_assume(max_slots && __builtin_popcount(val_alignment) == 1))
+ if (kr_fails_assert(max_slots && __builtin_popcount(val_alignment) == 1))
return NULL;
// let lru->log_groups = ceil(log2(max_slots / (float) assoc))
// without trying for efficiency
@@ -107,7 +107,7 @@ KR_EXPORT struct lru * lru_create_impl(uint max_slots, uint val_alignment,
for (uint s = group_count - 1; s; s /= 2)
++log_groups;
group_count = 1 << log_groups;
- if (!kr_assume(max_slots <= group_count * LRU_ASSOC && group_count * LRU_ASSOC < 2 * max_slots))
+ if (kr_fails_assert(max_slots <= group_count * LRU_ASSOC && group_count * LRU_ASSOC < 2 * max_slots))
return NULL;
/* Get a sufficiently aligning mm_array if NULL is passed. */
@@ -117,7 +117,7 @@ KR_EXPORT struct lru * lru_create_impl(uint max_slots, uint val_alignment,
mm_ctx_init_aligned(&mm_array_default, alignof(struct lru));
mm_array = &mm_array_default;
}
- if (!kr_assume(mm_array->alloc && mm_array->alloc != (knot_mm_alloc_t)mp_alloc))
+ if (kr_fails_assert(mm_array->alloc && mm_array->alloc != (knot_mm_alloc_t)mp_alloc))
return NULL;
size_t size = offsetof(struct lru, groups[group_count]);
@@ -162,7 +162,7 @@ KR_EXPORT void * lru_get_impl(struct lru *lru, const char *key, uint key_len,
{
bool ok = lru && (key || !key_len) && key_len <= UINT16_MAX
&& (!do_insert || val_len <= UINT16_MAX);
- if (!kr_assume(ok))
+ if (kr_fails_assert(ok))
return NULL; // reasonable fallback when not debugging
bool is_new_entry = false;
// find the right group
@@ -218,7 +218,7 @@ KR_EXPORT void * lru_get_impl(struct lru *lru, const char *key, uint key_len,
group_dec_counts(g);
return NULL;
insert: // insert into position i (incl. key)
- if (!kr_assume(i < LRU_ASSOC))
+ if (kr_fails_assert(i < LRU_ASSOC))
return NULL;
g->hashes[i] = khash_top;
it = g->items[i];
@@ -238,7 +238,7 @@ insert: // insert into position i (incl. key)
memset(item_val(lru, it), 0, val_len); // clear the value
is_new_entry = true;
found: // key and hash OK on g->items[i]; now update stamps
- if (!kr_assume(i < LRU_ASSOC))
+ if (kr_fails_assert(i < LRU_ASSOC))
return NULL;
group_inc_count(g, i);
if (is_new) {
diff --git a/lib/generic/pack.h b/lib/generic/pack.h
index d4ee390d..a26468f8 100644
--- a/lib/generic/pack.h
+++ b/lib/generic/pack.h
@@ -108,7 +108,7 @@ static inline pack_objlen_t pack_obj_len(uint8_t *it)
/** Return packed object value. */
static inline uint8_t *pack_obj_val(uint8_t *it)
{
- if (!kr_assume(it))
+ if (kr_fails_assert(it))
return NULL;
return it + sizeof(pack_objlen_t);
}
@@ -116,7 +116,7 @@ static inline uint8_t *pack_obj_val(uint8_t *it)
/** Return pointer to next packed object. */
static inline uint8_t *pack_obj_next(uint8_t *it)
{
- if (!kr_assume(it))
+ if (kr_fails_assert(it))
return NULL;
return pack_obj_val(it) + pack_obj_len(it);
}
@@ -141,7 +141,7 @@ static inline uint8_t *pack_last(pack_t pack)
*/
static inline int pack_obj_push(pack_t *pack, const uint8_t *obj, pack_objlen_t len)
{
- if (!kr_assume(pack && obj))
+ if (kr_fails_assert(pack && obj))
return kr_error(EINVAL);
size_t packed_len = len + sizeof(len);
if (pack->len + packed_len > pack->cap)
@@ -159,7 +159,7 @@ static inline int pack_obj_push(pack_t *pack, const uint8_t *obj, pack_objlen_t
*/
static inline uint8_t *pack_obj_find(pack_t *pack, const uint8_t *obj, pack_objlen_t len)
{
- if (!pack || !kr_assume(obj))
+ if (!pack || kr_fails_assert(obj))
return NULL;
uint8_t *endp = pack_tail(*pack);
uint8_t *it = pack_head(*pack);
@@ -177,7 +177,7 @@ static inline uint8_t *pack_obj_find(pack_t *pack, const uint8_t *obj, pack_objl
*/
static inline int pack_obj_del(pack_t *pack, const uint8_t *obj, pack_objlen_t len)
{
- if (!pack || !kr_assume(obj))
+ if (!pack || kr_fails_assert(obj))
return kr_error(EINVAL);
uint8_t *endp = pack_tail(*pack);
uint8_t *it = pack_obj_find(pack, obj, len);
@@ -194,7 +194,7 @@ static inline int pack_obj_del(pack_t *pack, const uint8_t *obj, pack_objlen_t l
* @return kr_error(ENOMEM) on allocation failure. */
static inline int pack_clone(pack_t **dst, const pack_t *src, knot_mm_t *pool)
{
- if (!kr_assume(dst && src))
+ if (kr_fails_assert(dst && src))
return kr_error(EINVAL);
/* Get a valid pack_t. */
if (!*dst) {
diff --git a/lib/generic/queue.c b/lib/generic/queue.c
index 39d59ba2..1e49f22b 100644
--- a/lib/generic/queue.c
+++ b/lib/generic/queue.c
@@ -21,7 +21,7 @@ void queue_init_impl(struct queue *q, size_t item_size)
void queue_deinit_impl(struct queue *q)
{
- if (!kr_assume(q))
+ if (kr_fails_assert(q))
return;
struct queue_chunk *p = q->head;
while (p != NULL) {
diff --git a/lib/generic/trie.c b/lib/generic/trie.c
index c83eb2ee..2a666092 100644
--- a/lib/generic/trie.c
+++ b/lib/generic/trie.c
@@ -265,7 +265,7 @@ void trie_free(trie_t *tbl)
void trie_clear(trie_t *tbl)
{
- if (!kr_assume(tbl))
+ if (kr_fails_assert(tbl))
return;
if (!tbl->weight)
return;
@@ -447,7 +447,7 @@ static void ns_init(nstack_t *ns, trie_t *tbl)
/*! \brief Free inside of the stack, i.e. not the passed pointer itself. */
static void ns_cleanup(nstack_t *ns)
{
- if (!kr_assume(ns && ns->stack))
+ if (kr_fails_assert(ns && ns->stack))
return;
if (likely(ns->stack == ns->stack_init))
return;
@@ -752,7 +752,7 @@ static int mk_leaf(node_t *leaf, const char *key, uint32_t len, knot_mm_t *mm)
trie_val_t* trie_get_ins(trie_t *tbl, const char *key, uint32_t len)
{
- if (!kr_assume(tbl))
+ if (kr_fails_assert(tbl))
return NULL;
// First leaf in an empty tbl?
if (unlikely(!tbl->weight)) {
@@ -846,7 +846,7 @@ int trie_apply(trie_t *tbl, int (*f)(trie_val_t *, void *), void *d)
/* These are all thin wrappers around static Tns* functions. */
trie_it_t* trie_it_begin(trie_t *tbl)
{
- if (!kr_assume(tbl))
+ if (kr_fails_assert(tbl))
return NULL;
trie_it_t *it = malloc(sizeof(nstack_t));
if (!it)
diff --git a/lib/layer/iterate.c b/lib/layer/iterate.c
index 499e9746..efdcc73c 100644
--- a/lib/layer/iterate.c
+++ b/lib/layer/iterate.c
@@ -307,7 +307,7 @@ static int update_cut(knot_pkt_t *pkt, const knot_rrset_t *rr,
continue;
}
int ret = kr_zonecut_add(cut, ns_name, NULL, 0);
- (void)!kr_assume(!ret);
+ kr_assert(!ret);
/* Choose when to use glue records. */
const bool in_bailiwick =
@@ -389,7 +389,7 @@ static int pick_authority(knot_pkt_t *pkt, struct kr_request *req, bool to_wire)
static int process_authority(knot_pkt_t *pkt, struct kr_request *req)
{
struct kr_query *qry = req->current_query;
- if (!kr_assume(!qry->flags.STUB))
+ if (kr_fails_assert(!qry->flags.STUB))
return KR_STATE_FAIL;
int result = KR_STATE_CONSUME;
@@ -493,7 +493,7 @@ static int finalize_answer(knot_pkt_t *pkt, struct kr_request *req)
static int unroll_cname(knot_pkt_t *pkt, struct kr_request *req, bool referral, const knot_dname_t **cname_ret)
{
struct kr_query *query = req->current_query;
- if (!kr_assume(!query->flags.STUB))
+ if (kr_fails_assert(!query->flags.STUB))
return KR_STATE_FAIL;
/* Process answer type */
const knot_pktsection_t *an = knot_pkt_section(pkt, KNOT_ANSWER);
@@ -849,7 +849,7 @@ static int process_answer(knot_pkt_t *pkt, struct kr_request *req)
static int process_stub(knot_pkt_t *pkt, struct kr_request *req)
{
struct kr_query *query = req->current_query;
- if (!kr_assume(query->flags.STUB))
+ if (kr_fails_assert(query->flags.STUB))
return KR_STATE_FAIL;
/* Pick all answer RRs. */
const knot_pktsection_t *an = knot_pkt_section(pkt, KNOT_ANSWER);
@@ -944,7 +944,7 @@ int kr_make_query(struct kr_query *query, knot_pkt_t *pkt)
static int prepare_query(kr_layer_t *ctx, knot_pkt_t *pkt)
{
- if (!kr_assume(pkt && ctx))
+ if (kr_fails_assert(pkt && ctx))
return KR_STATE_FAIL;
struct kr_request *req = ctx->req;
struct kr_query *query = req->current_query;
@@ -997,7 +997,7 @@ static bool satisfied_by_additional(const struct kr_query *qry)
*/
static int resolve(kr_layer_t *ctx, knot_pkt_t *pkt)
{
- if (!kr_assume(pkt && ctx))
+ if (kr_fails_assert(pkt && ctx))
return KR_STATE_FAIL;
struct kr_request *req = ctx->req;
struct kr_query *query = req->current_query;
@@ -1149,7 +1149,7 @@ static int resolve(kr_layer_t *ctx, knot_pkt_t *pkt)
* we trigger another cache *reading* attempt
* for the subsequent PRODUCE round.
*/
- (void)!kr_assume(query->flags.NONAUTH);
+ kr_assert(query->flags.NONAUTH);
query->flags.CACHE_TRIED = false;
VERBOSE_MSG("<= referral response, but cache should stop us short now\n");
} else {
diff --git a/lib/layer/validate.c b/lib/layer/validate.c
index f081d869..5f8a6008 100644
--- a/lib/layer/validate.c
+++ b/lib/layer/validate.c
@@ -86,7 +86,7 @@ static void log_bogus_rrsig(kr_rrset_validation_ctx_t *vctx, const struct kr_que
/** Check that given CNAME could be generated by given DNAME (no DNSSEC validation). */
static bool cname_matches_dname(const knot_rrset_t *rr_cn, const knot_rrset_t *rr_dn)
{
- if (!kr_assume(rr_cn->type == KNOT_RRTYPE_CNAME && rr_dn->type == KNOT_RRTYPE_DNAME))
+ if (kr_fails_assert(rr_cn->type == KNOT_RRTYPE_CNAME && rr_dn->type == KNOT_RRTYPE_DNAME))
return false;
/* When DNAME substitution happens, let's consider the "prefix"
* that is carried over and the "suffix" that is replaced.
@@ -423,7 +423,7 @@ static int update_parent_keys(struct kr_request *req, uint16_t answer_type)
{
struct kr_query *qry = req->current_query;
struct kr_query *parent = qry->parent;
- if (!kr_assume(parent))
+ if (kr_fails_assert(parent))
return KR_STATE_FAIL;
switch(answer_type) {
case KNOT_RRTYPE_DNSKEY:
@@ -748,7 +748,7 @@ static int unsigned_forward(kr_layer_t *ctx, knot_pkt_t *pkt)
qry->flags.DNSSEC_WANT = false;
qry->flags.DNSSEC_INSECURE = true;
if (qry->forward_flags.CNAME) {
- if (!kr_assume(qry->cname_parent))
+ if (kr_fails_assert(qry->cname_parent))
return KR_STATE_FAIL;
qry->cname_parent->flags.DNSSEC_WANT = false;
qry->cname_parent->flags.DNSSEC_INSECURE = true;
@@ -766,7 +766,7 @@ static int unsigned_forward(kr_layer_t *ctx, knot_pkt_t *pkt)
qry->flags.DNSSEC_WANT = false;
qry->flags.DNSSEC_INSECURE = true;
if (qry->forward_flags.CNAME) {
- if (!kr_assume(qry->cname_parent))
+ if (kr_fails_assert(qry->cname_parent))
return KR_STATE_FAIL;
qry->cname_parent->flags.DNSSEC_WANT = false;
qry->cname_parent->flags.DNSSEC_INSECURE = true;
diff --git a/lib/module.c b/lib/module.c
index 577ed959..98d2799b 100644
--- a/lib/module.c
+++ b/lib/module.c
@@ -36,7 +36,7 @@ static void *load_symbol(void *lib, const char *prefix, const char *name)
static int load_library(struct kr_module *module, const char *name, const char *path)
{
- if (!kr_assume(module && name && path))
+ if (kr_fails_assert(module && name && path))
return kr_error(EINVAL);
/* Absolute or relative path (then only library search path is used). */
auto_free char *lib_path = kr_strcatdup(4, path, "/", name, LIBEXT);
diff --git a/lib/resolve.c b/lib/resolve.c
index 698e1f17..d3fa992a 100644
--- a/lib/resolve.c
+++ b/lib/resolve.c
@@ -48,12 +48,12 @@ bool kr_rank_check(uint8_t rank)
bool kr_rank_test(uint8_t rank, uint8_t kr_flag)
{
- if (!kr_assume(kr_rank_check(rank) && kr_rank_check(kr_flag)))
+ if (kr_fails_assert(kr_rank_check(rank) && kr_rank_check(kr_flag)))
return false;
if (kr_flag == KR_RANK_AUTH) {
return rank & KR_RANK_AUTH;
}
- if (!kr_assume(!(kr_flag & KR_RANK_AUTH)))
+ if (kr_fails_assert(!(kr_flag & KR_RANK_AUTH)))
return false;
/* The rest are exclusive values - exactly one has to be set. */
return (rank & ~KR_RANK_AUTH) == kr_flag;
@@ -114,7 +114,7 @@ static int answer_finalize_yield(kr_layer_t *ctx) { return kr_ok(); }
(r)->state = layer.api->func(&layer, ##__VA_ARGS__); \
/* It's an easy mistake to return error code, for example. */ \
/* (though we could allow such an overload later) */ \
- if (!kr_assume(kr_state_consistent((r)->state))) { \
+ if (kr_fails_assert(kr_state_consistent((r)->state))) { \
(r)->state = KR_STATE_FAIL; \
} else \
if ((r)->state == KR_STATE_YIELD) { \
@@ -152,7 +152,7 @@ static void randomized_qname_case(knot_dname_t * restrict qname, uint32_t secret
{
if (secret == 0)
return;
- if (!kr_assume(qname))
+ if (kr_fails_assert(qname))
return;
const int len = knot_dname_size(qname) - 2; /* Skip first, last label. First is length, last is always root */
for (int i = 0; i < len; ++i) {
@@ -196,7 +196,7 @@ static void check_empty_nonterms(struct kr_query *qry, knot_pkt_t *pkt, struct k
kr_make_query(qry, pkt);
break;
}
- (void)!kr_assume(target[0]);
+ kr_assert(target[0]);
target = knot_wire_next_label(target, NULL);
}
kr_cache_commit(cache);
@@ -313,7 +313,7 @@ static int edns_put(knot_pkt_t *pkt, bool reclaim)
}
}
/* Write to packet. */
- if (!kr_assume(pkt->current == KNOT_ADDITIONAL))
+ if (kr_fails_assert(pkt->current == KNOT_ADDITIONAL))
return kr_error(EINVAL);
return knot_pkt_put(pkt, KNOT_COMPR_HINT_NONE, pkt->opt_rr, KNOT_PF_FREE);
}
@@ -384,7 +384,7 @@ static int write_extra_ranked_records(const ranked_rr_array_t *arr, uint16_t reo
for (size_t i = 0; i < arr->len; ++i) {
ranked_rr_array_entry_t * entry = arr->at[i];
- (void)!kr_assume(!entry->in_progress);
+ kr_assert(!entry->in_progress);
if (!entry->to_wire) {
continue;
}
@@ -420,7 +420,7 @@ static int write_extra_ranked_records(const ranked_rr_array_t *arr, uint16_t reo
/** @internal Add an EDNS padding RR into the answer if requested and required. */
static int answer_padding(struct kr_request *request)
{
- if (!kr_assume(request && request->answer && request->ctx))
+ if (kr_fails_assert(request && request->answer && request->ctx))
return kr_error(EINVAL);
if (!request->qsource.flags.tls) {
/* Not meaningful to pad without encryption. */
@@ -500,7 +500,7 @@ static void answer_finalize(struct kr_request *request)
for (int psec = KNOT_ANSWER; psec <= KNOT_ADDITIONAL; ++psec) {
const ranked_rr_array_t *arr = selected[psec];
for (ssize_t i = 0; i < arr->len; ++i) {
- if (!kr_assume(!arr->at[i]->to_wire)) {
+ if (kr_fails_assert(!arr->at[i]->to_wire)) {
answer_fail(request);
return;
}
@@ -705,12 +705,12 @@ knot_pkt_t *kr_request_ensure_answer(struct kr_request *request)
return request->answer;
const knot_pkt_t *qs_pkt = request->qsource.packet;
- if (!kr_assume(qs_pkt))
+ if (kr_fails_assert(qs_pkt))
goto fail;
// Find answer_max: limit on DNS wire length.
uint16_t answer_max;
const struct kr_request_qsource_flags *qs_flags = &request->qsource.flags;
- if (!kr_assume((qs_flags->tls || qs_flags->http) ? qs_flags->tcp : true))
+ if (kr_fails_assert((qs_flags->tls || qs_flags->http) ? qs_flags->tcp : true))
goto fail;
if (!request->qsource.addr || qs_flags->tcp) {
// not on UDP
@@ -735,7 +735,7 @@ knot_pkt_t *kr_request_ensure_answer(struct kr_request *request)
knot_pkt_t *answer = request->answer =
knot_pkt_new(wire, answer_max, &request->pool);
if (!answer || knot_pkt_init_response(answer, qs_pkt) != 0) {
- (void)!kr_assume(!answer); // otherwise we messed something up
+ kr_assert(!answer); // otherwise we messed something up
goto enomem;
}
if (!wire)
@@ -912,7 +912,7 @@ static int forward_trust_chain_check(struct kr_request *request, struct kr_query
return KR_STATE_PRODUCE;
}
- if (!kr_assume(qry->flags.FORWARD))
+ if (kr_fails_assert(qry->flags.FORWARD))
return KR_STATE_FAIL;
if (!trust_anchors) {
@@ -1427,7 +1427,7 @@ static bool outbound_request_update_cookies(struct kr_request *req,
const struct sockaddr *src,
const struct sockaddr *dst)
{
- if (!kr_assume(req))
+ if (kr_fails_assert(req))
return false;
/* RFC7873 4.1 strongly requires server address. */
@@ -1504,7 +1504,7 @@ int kr_resolve_checkout(struct kr_request *request, const struct sockaddr *src,
type = SOCK_STREAM;
break;
default:
- (void)!kr_assume(false);
+ kr_assert(false);
}
int state = request->state;
ITERATE_LAYERS(request, qry, checkout, packet, &transport->address.ip, type);
diff --git a/lib/resolve.h b/lib/resolve.h
index 8f883aab..88605b1e 100644
--- a/lib/resolve.h
+++ b/lib/resolve.h
@@ -130,9 +130,9 @@ bool kr_rank_test(uint8_t rank, uint8_t kr_flag) KR_PURE KR_EXPORT;
/** Set the rank state. The _AUTH flag is kept as it was. */
static inline void kr_rank_set(uint8_t *rank, uint8_t kr_flag)
{
- if (!kr_assume(rank && kr_rank_check(*rank)))
+ if (kr_fails_assert(rank && kr_rank_check(*rank)))
return;
- if (!kr_assume(kr_rank_check(kr_flag) && !(kr_flag & KR_RANK_AUTH)))
+ if (kr_fails_assert(kr_rank_check(kr_flag) && !(kr_flag & KR_RANK_AUTH)))
return;
*rank = kr_flag | (*rank & KR_RANK_AUTH);
}
diff --git a/lib/rplan.c b/lib/rplan.c
index bf0d0de7..e88b982b 100644
--- a/lib/rplan.c
+++ b/lib/rplan.c
@@ -166,7 +166,7 @@ static struct kr_query *kr_rplan_push_query(struct kr_rplan *rplan,
qry->reorder = qry->flags.REORDER_RR ? kr_rand_bytes(sizeof(qry->reorder)) : 0;
- (void)!kr_assume((rplan->pending.len == 0 && rplan->resolved.len == 0)
+ kr_assert((rplan->pending.len == 0 && rplan->resolved.len == 0)
== (rplan->initial == NULL));
if (rplan->initial == NULL) {
rplan->initial = qry;
diff --git a/lib/selection.c b/lib/selection.c
index f1f35cdb..535af169 100644
--- a/lib/selection.c
+++ b/lib/selection.c
@@ -55,7 +55,7 @@ static const char *kr_selection_error_str(enum kr_selection_error err) {
case KR_SELECTION_NUMBER_OF_ERRORS: break; // not a valid code
#undef X
}
- (void)!kr_assume(false); // we want to define all; compiler helps by -Wswitch (no default:)
+ kr_assert(false); // we want to define all; compiler helps by -Wswitch (no default:)
return NULL;
}
@@ -152,7 +152,7 @@ struct rtt_state get_rtt_state(const uint8_t *ip, size_t len,
if (cache->api->read(db, stats, &key, &value, 1)) {
state = default_rtt_state;
- } else if (!kr_assume(value.len == sizeof(struct rtt_state))) {
+ } else if (kr_fails_assert(value.len == sizeof(struct rtt_state))) {
// shouldn't happen but let's be more robust
state = default_rtt_state;
} else { // memcpy is safe for unaligned case (on non-x86)
@@ -195,7 +195,7 @@ void bytes_to_ip(uint8_t *bytes, size_t len, uint16_t port, union inaddr *dst)
dst->ip6.sin6_port = htons(port);
break;
default:
- (void)!kr_assume(false);
+ kr_assert(false);
}
}
@@ -207,7 +207,7 @@ uint8_t *ip_to_bytes(const union inaddr *src, size_t len)
case sizeof(struct in6_addr):
return (uint8_t *)&src->ip6.sin6_addr;
default:
- (void)!kr_assume(false);
+ kr_assert(false);
return NULL;
}
}
@@ -263,7 +263,7 @@ static void invalidate_dead_upstream(struct address_state *state,
// period when we don't want to use the address
state->generation = -1;
} else {
- (void)!kr_assume(now >= rs->dead_since + retry_timeout);
+ kr_assert(now >= rs->dead_since + retry_timeout);
// we allow to retry the server now
// TODO: perhaps tweak *rs?
}
@@ -486,7 +486,7 @@ struct kr_transport *select_transport(struct choice choices[], int choices_len,
port = KR_DNS_PORT;
break;
default:
- (void)!kr_assume(false);
+ kr_assert(false);
return NULL;
}
}
@@ -502,7 +502,7 @@ struct kr_transport *select_transport(struct choice choices[], int choices_len,
transport->address.ip6.sin6_port = htons(port);
break;
default:
- (void)!kr_assume(false);
+ kr_assert(false);
return NULL;
}
@@ -671,7 +671,7 @@ void error(struct kr_query *qry, struct address_state *addr_state,
addr_state->broken = true;
break;
default:
- (void)!kr_assume(false);
+ kr_assert(false);
return;
}
diff --git a/lib/selection_forward.c b/lib/selection_forward.c
index 6fcf4f23..ea2ec6ec 100644
--- a/lib/selection_forward.c
+++ b/lib/selection_forward.c
@@ -51,7 +51,7 @@ void forward_choose_transport(struct kr_query *qry,
addr_len = sizeof(struct in6_addr);
break;
default:
- (void)!kr_assume(false);
+ kr_assert(false);
*transport = NULL;
return;
}
diff --git a/lib/selection_iter.c b/lib/selection_iter.c
index beea36d4..b5b241c1 100644
--- a/lib/selection_iter.c
+++ b/lib/selection_iter.c
@@ -48,7 +48,7 @@ static struct address_state *get_address_state(struct iter_local_state *local_st
trie_val_t *address_state = trie_get_try(local_state->addresses, (char *)address,
transport->address_len);
if (!address_state) {
- (void)!kr_assume(transport->deduplicated);
+ kr_assert(transport->deduplicated);
/* Transport was chosen by a different query. */
return NULL;
}
@@ -227,7 +227,7 @@ static void update_name_state(knot_dname_t *name, enum kr_transport_protocol typ
name_state->aaaa_state = RECORD_TRIED;
break;
default:
- (void)!kr_assume(false);
+ kr_assert(false);
}
}
@@ -281,7 +281,7 @@ void iter_choose_transport(struct kr_query *qry, struct kr_transport **transport
local_state->no_ns_addr_count = 0;
break;
default:
- (void)!kr_assume(false);
+ kr_assert(false);
break;
}
diff --git a/lib/utils.c b/lib/utils.c
index 1812082f..098c5343 100644
--- a/lib/utils.c
+++ b/lib/utils.c
@@ -38,8 +38,8 @@
/* Logging & debugging */
bool kr_verbose_status = false;
-bool kr_dbg_assumption_abort = DBG_ASSUMPTION_ABORT;
-int kr_dbg_assumption_fork = DBG_ASSUMPTION_FORK;
+bool kr_dbg_assertion_abort = DBG_ASSERTION_ABORT;
+int kr_dbg_assertion_fork = DBG_ASSERTION_FORK;
void kr_fail(bool is_fatal, const char *expr, const char *func, const char *file, int line)
{
@@ -47,23 +47,23 @@ void kr_fail(bool is_fatal, const char *expr, const char *func, const char *file
if (is_fatal)
kr_log_critical("requirement \"%s\" failed in %s@%s:%d\n", expr, func, file, line);
else
- kr_log_error("assumption \"%s\" failed in %s@%s:%d\n", expr, func, file, line);
+ kr_log_error("assertion \"%s\" failed in %s@%s:%d\n", expr, func, file, line);
- if (is_fatal || (kr_dbg_assumption_abort && !kr_dbg_assumption_fork))
+ if (is_fatal || (kr_dbg_assertion_abort && !kr_dbg_assertion_fork))
abort();
- else if (!kr_dbg_assumption_abort || !kr_dbg_assumption_fork)
+ else if (!kr_dbg_assertion_abort || !kr_dbg_assertion_fork)
goto recover;
// We want to fork and abort the child, unless rate-limited.
static uint64_t limited_until = 0;
const uint64_t now = kr_now();
if (now < limited_until)
goto recover;
- if (kr_dbg_assumption_fork > 0) {
+ if (kr_dbg_assertion_fork > 0) {
// Add jitter +- 25%; in other words: 75% + uniform(0,50%).
// Motivation: if a persistent problem starts happening, desynchronize
// coredumps from different instances as they're not cheap.
- limited_until = now + kr_dbg_assumption_fork * 3 / 4
- + kr_dbg_assumption_fork * kr_rand_bytes(1) / 256 / 2;
+ limited_until = now + kr_dbg_assertion_fork * 3 / 4
+ + kr_dbg_assertion_fork * kr_rand_bytes(1) / 256 / 2;
}
if (fork() == 0)
abort();
@@ -207,7 +207,7 @@ char* kr_strcatdup(unsigned n, ...)
char * kr_absolutize_path(const char *dirname, const char *fname)
{
- if (!kr_assume(dirname && fname)) {
+ if (kr_fails_assert(dirname && fname)) {
errno = EINVAL;
return NULL;
}
@@ -264,7 +264,7 @@ static int pkt_recycle(knot_pkt_t *pkt, bool keep_question)
if (keep_question) {
base_size += knot_pkt_question_size(pkt);
}
- if (!kr_assume(base_size <= sizeof(buf))) return kr_error(EINVAL);
+ if (kr_fails_assert(base_size <= sizeof(buf))) return kr_error(EINVAL);
memcpy(buf, pkt->wire, base_size);
/* Clear the packet and its auxiliary structures */
@@ -315,7 +315,7 @@ int kr_pkt_put(knot_pkt_t *pkt, const knot_dname_t *name, uint32_t ttl,
void kr_pkt_make_auth_header(knot_pkt_t *pkt)
{
- if (!kr_assume(pkt && pkt->wire)) return;
+ if (kr_fails_assert(pkt && pkt->wire)) return;
knot_wire_clear_ad(pkt->wire);
knot_wire_set_aa(pkt->wire);
}
@@ -504,7 +504,7 @@ struct sockaddr * kr_straddr_socket(const char *addr, int port, knot_mm_t *pool)
return (struct sockaddr *)res;
}
default:
- (void)!kr_assume(false);
+ kr_assert(false);
return NULL;
}
}
@@ -544,7 +544,7 @@ int kr_straddr_subnet(void *dst, const char *addr)
int kr_straddr_split(const char *instr, char ipaddr[static restrict (INET6_ADDRSTRLEN + 1)],
uint16_t *port)
{
- if (!kr_assume(instr && ipaddr && port)) return kr_error(EINVAL);
+ if (kr_fails_assert(instr && ipaddr && port)) return kr_error(EINVAL);
/* Find where port number starts. */
const char *p_start = strchr(instr, '@');
if (!p_start)
@@ -676,7 +676,7 @@ static inline bool rrsets_match(const knot_rrset_t *rr1, const knot_rrset_t *rr2
*/
static int to_wire_ensure_unique(ranked_rr_array_t *array, size_t index)
{
- if (!kr_assume(array && index < array->len)) return kr_error(EINVAL);
+ if (kr_fails_assert(array && index < array->len)) return kr_error(EINVAL);
const struct ranked_rr_array_entry *e0 = array->at[index];
if (!e0->to_wire) {
@@ -708,7 +708,7 @@ int kr_ranked_rrarray_add(ranked_rr_array_t *array, const knot_rrset_t *rr,
{
/* From normal packet parser we always get RRs one by one,
* but cache and prefil modules (also) feed us larger RRsets. */
- (void)!kr_assume(rr->rrs.count >= 1);
+ kr_assert(rr->rrs.count >= 1);
/* Check if another rrset with the same
* rclass/type/owner combination exists within current query
* and merge if needed */
@@ -727,7 +727,7 @@ int kr_ranked_rrarray_add(ranked_rr_array_t *array, const knot_rrset_t *rr,
continue;
}
/* Found the entry to merge with. Check consistency and merge. */
- if (!kr_assume(stashed->rank == rank && !stashed->cached && stashed->in_progress))
+ if (kr_fails_assert(stashed->rank == rank && !stashed->cached && stashed->in_progress))
return kr_error(EEXIST);
/* It may happen that an RRset is first considered useful
@@ -787,7 +787,7 @@ int kr_ranked_rrarray_add(ranked_rr_array_t *array, const knot_rrset_t *rr,
return kr_error(ENOMEM);
}
rr_new->rrs = rr->rrs;
- if (!kr_assume(rr_new->additional == NULL)) return kr_error(EINVAL);
+ if (kr_fails_assert(rr_new->additional == NULL)) return kr_error(EINVAL);
entry->qry_uid = qry_uid;
entry->rr = rr_new;
@@ -873,7 +873,7 @@ int kr_ranked_rrarray_finalize(ranked_rr_array_t *array, uint32_t qry_uid, knot_
raw_it += size;
}
}
- if (!kr_assume(raw_it == (uint8_t *)rds->rdata + rds->size))
+ if (kr_fails_assert(raw_it == (uint8_t *)rds->rdata + rds->size))
return kr_error(EINVAL);
}
stashed->in_progress = false;
@@ -1116,7 +1116,7 @@ void kr_uv_free_cb(uv_handle_t* handle)
const char *kr_strptime_diff(const char *format, const char *time1_str,
const char *time0_str, double *diff) {
- if (!kr_assume(format && time1_str && time0_str && diff)) return NULL;
+ if (kr_fails_assert(format && time1_str && time0_str && diff)) return NULL;
struct tm time1_tm;
time_t time1_u;
@@ -1146,7 +1146,7 @@ const char *kr_strptime_diff(const char *format, const char *time1_str,
int knot_dname_lf2wire(knot_dname_t * const dst, uint8_t len, const uint8_t *lf)
{
knot_dname_t *d = dst; /* moving "cursor" as we write it out */
- if (!kr_assume(d && (len == 0 || lf))) return kr_error(EINVAL);
+ if (kr_fails_assert(d && (len == 0 || lf))) return kr_error(EINVAL);
/* we allow the final zero byte to be omitted */
if (!len) {
goto finish;
@@ -1163,7 +1163,7 @@ int knot_dname_lf2wire(knot_dname_t * const dst, uint8_t len, const uint8_t *lf)
--i;
const int label_start = i + 1; /* index of the first byte of the current label */
const int label_len = label_end - label_start;
- (void)!kr_assume(label_len >= 0);
+ kr_assert(label_len >= 0);
if (label_len > 63 || label_len <= 0)
return kr_error(EILSEQ);
/* write the label */
@@ -1217,7 +1217,7 @@ void kr_rnd_buffered(void *data, uint size)
void kr_rrset_init(knot_rrset_t *rrset, knot_dname_t *owner,
uint16_t type, uint16_t rclass, uint32_t ttl)
{
- if (!kr_assume(rrset)) return;
+ if (kr_fails_assert(rrset)) return;
knot_rrset_init(rrset, owner, type, rclass, ttl);
}
uint16_t kr_pkt_has_dnssec(const knot_pkt_t *pkt)
diff --git a/lib/utils.h b/lib/utils.h
index 01b5c050..b8bfe172 100644
--- a/lib/utils.h
+++ b/lib/utils.h
@@ -46,29 +46,36 @@ typedef void (*trace_log_f)(const struct kr_request *request, const char *msg);
#define kr_log_critical(...) kr_log_error(__VA_ARGS__)
#define kr_log_deprecate(...) fprintf(stderr, "deprecation WARNING: " __VA_ARGS__)
-/** Assert() but always, regardless of -DNDEBUG. See also kr_assume(). */
+/** Assert() but always, regardless of -DNDEBUG. See also kr_assert(). */
#define kr_require(expression) do { if (!(expression)) { \
kr_fail(true, #expression, __func__, __FILE__, __LINE__); \
__builtin_unreachable(); /* aid code analysis */ \
} } while (false)
-/** Check an assumption that's recoverable. Return the expression.
+/** Check an assertion that's recoverable. Return the true if it fails and needs handling.
*
* If the check fails, optionally fork()+abort() to generate coredump
* and continue running in parent process. Return value must be handled to
* ensure safe recovery from error. Use kr_require() for unrecoverable checks.
- * The errno variable is not mangled, e.g. you can: if (!kr_assume(...)) return errno;
+ * The errno variable is not mangled, e.g. you can: if (kr_fails_assert(...)) return errno;
*/
-#define kr_assume(expression) kr_assume_func((expression), #expression, \
- __func__, __FILE__, __LINE__)
+#define kr_fails_assert(expression) !kr_assert_func((expression), #expression, \
+ __func__, __FILE__, __LINE__)
-/** Whether kr_assume() checks should abort. */
-KR_EXPORT extern bool kr_dbg_assumption_abort;
+/** Kresd assertion without a return value.
+ *
+ * These can be turned on or off, for mandatory unrecoverable checks, use kr_require().
+ * For recoverable checks, use kr_fails_assert().
+ * */
+#define kr_assert(expression) (void)!kr_fails_assert((expression))
+
+/** Whether kr_assert() and kr_fails_assert() checks should abort. */
+KR_EXPORT extern bool kr_dbg_assertion_abort;
-/** How often kr_assume() should fork the process before issuing abort (if configured).
+/** How often kr_asert() should fork the process before issuing abort (if configured).
*
* This can be useful for debugging rare edge-cases in production.
- * if (kr_debug_assumption_abort && kr_debug_assumption_fork), it is
+ * if (kr_debug_assertion_abort && kr_debug_assertion_fork), it is
* possible to both obtain a coredump (from forked child) and recover from the
* non-fatal error in the parent process.
*
@@ -77,15 +84,15 @@ KR_EXPORT extern bool kr_dbg_assumption_abort;
* (in milliseconds, each instance separately, randomized +-25%)
* < 0: no rate-limiting (not recommended)
*/
-KR_EXPORT extern int kr_dbg_assumption_fork;
+KR_EXPORT extern int kr_dbg_assertion_fork;
-/** Use kr_require() and kr_assume() instead of directly this function. */
+/** Use kr_require(), kr_assert() or kr_fails_assert() instead of directly this function. */
KR_EXPORT KR_COLD void kr_fail(bool is_fatal, const char* expr, const char *func,
const char *file, int line);
-/** Use kr_require() and kr_assume() instead of directly this function. */
+/** Use kr_require(), kr_assert() or kr_fails_assert() instead of directly this function. */
__attribute__ ((warn_unused_result))
-static inline bool kr_assume_func(bool result, const char *expr, const char *func,
+static inline bool kr_assert_func(bool result, const char *expr, const char *func,
const char *file, int line)
{
if (!result)
@@ -347,7 +354,7 @@ int kr_ntop_str(int family, const void *src, uint16_t port, char *buf, size_t *b
*/
static inline char *kr_straddr(const struct sockaddr *addr)
{
- if (!kr_assume(addr)) return NULL;
+ if (kr_fails_assert(addr)) return NULL;
/* We are the sinle-threaded application */
static char str[INET6_ADDRSTRLEN + 1 + 5 + 1];
size_t len = sizeof(str);
@@ -528,7 +535,7 @@ static inline int kr_dname_lf(uint8_t *dst, const knot_dname_t *src, bool add_wi
return kr_error(EINVAL);
}
int len = right_aligned_dname_start[0];
- if (!kr_assume(right_aligned_dname_start + 1 + len - KNOT_DNAME_MAXLEN == right_aligned_dst))
+ if (kr_fails_assert(right_aligned_dname_start + 1 + len - KNOT_DNAME_MAXLEN == right_aligned_dst))
return kr_error(EINVAL);
memcpy(dst + 1, right_aligned_dname_start + 1, len);
if (add_wildcard) {
diff --git a/lib/zonecut.c b/lib/zonecut.c
index cb879ede..4577c634 100644
--- a/lib/zonecut.c
+++ b/lib/zonecut.c
@@ -57,7 +57,7 @@ int kr_zonecut_init(struct kr_zonecut *cut, const knot_dname_t *name, knot_mm_t
/** Completely free a pack_t. */
static inline void free_addr_set(pack_t *pack, knot_mm_t *pool)
{
- if (!kr_assume(pack)) {
+ if (kr_fails_assert(pack)) {
/* promised we don't store NULL packs */
return;
}
@@ -163,12 +163,12 @@ int kr_zonecut_copy_trust(struct kr_zonecut *dst, const struct kr_zonecut *src)
int kr_zonecut_add(struct kr_zonecut *cut, const knot_dname_t *ns, const void *data, int len)
{
- if (!kr_assume(cut && ns && cut->nsset && (!data || len > 0)))
+ if (kr_fails_assert(cut && ns && cut->nsset && (!data || len > 0)))
return kr_error(EINVAL);
/* Disabled; add_reverse_pair() misuses this for domain name in rdata. */
if (false && data && len != sizeof(struct in_addr)
&& len != sizeof(struct in6_addr)) {
- (void)!kr_assume(!EINVAL);
+ kr_assert(!EINVAL);
return kr_error(EINVAL);
}
@@ -216,7 +216,7 @@ int kr_zonecut_del(struct kr_zonecut *cut, const knot_dname_t *ns, const void *d
if (pack->len == 0) {
free_addr_set(pack, cut->pool);
ret = trie_del(cut->nsset, (const char *)ns, knot_dname_size(ns), NULL);
- if (!kr_assume(ret == 0)) /* only KNOT_ENOENT and that *can't* happen */
+ if (kr_fails_assert(ret == 0)) /* only KNOT_ENOENT and that *can't* happen */
return kr_error(ret);
return kr_ok();
}
@@ -235,7 +235,7 @@ int kr_zonecut_del_all(struct kr_zonecut *cut, const knot_dname_t *ns)
int ret = trie_del(cut->nsset, (const char *)ns, knot_dname_size(ns),
(trie_val_t *)&pack);
if (ret) { /* deletion failed */
- (void)!kr_assume(ret == KNOT_ENOENT);
+ kr_assert(ret == KNOT_ENOENT);
return kr_error(ENOENT);
}
free_addr_set(pack, cut->pool);
@@ -261,7 +261,7 @@ static int has_address(trie_val_t *v, void *baton_)
bool kr_zonecut_is_empty(struct kr_zonecut *cut)
{
- if (!kr_assume(cut && cut->nsset))
+ if (kr_fails_assert(cut && cut->nsset))
return true;
return !trie_apply(cut->nsset, has_address, NULL);
}
@@ -295,7 +295,7 @@ static addrset_info_t fetch_addr(pack_t *addrs, const knot_dname_t *ns, uint16_t
rdlen = 16;
break;
default:
- (void)!kr_assume(!EINVAL);
+ kr_assert(!EINVAL);
return AI_UNKNOWN;
}
@@ -337,7 +337,7 @@ static addrset_info_t fetch_addr(pack_t *addrs, const knot_dname_t *ns, uint16_t
++usable_cnt;
ret = pack_obj_push(addrs, rd->data, rd->len);
- (void)!kr_assume(!ret); /* didn't fit because of incorrectly reserved memory */
+ kr_assert(!ret); /* didn't fit because of incorrectly reserved memory */
/* LATER: for now we lose quite some information here,
* as keeping it would need substantial changes on other places,
* and it turned out to be premature optimization (most likely).
@@ -394,7 +394,7 @@ static int fetch_ns(struct kr_context *ctx, struct kr_zonecut *cut,
pack_t **pack = (pack_t **)trie_get_ins(cut->nsset,
(const char *)ns_name, ns_size);
if (!pack) return kr_error(ENOMEM);
- (void)!kr_assume(!*pack); /* not critical, really */
+ kr_assert(!*pack); /* not critical, really */
*pack = mm_alloc(cut->pool, sizeof(pack_t));
if (!*pack) return kr_error(ENOMEM);
pack_init(**pack);
@@ -460,7 +460,7 @@ static int fetch_secure_rrset(knot_rrset_t **rr, struct kr_cache *cache,
const knot_dname_t *owner, uint16_t type, knot_mm_t *pool,
const struct kr_query *qry)
{
- if (!kr_assume(rr))
+ if (kr_fails_assert(rr))
return kr_error(EINVAL);
/* peek, check rank and TTL */
struct kr_cache_p peek;
diff --git a/meson.build b/meson.build
index 73b73d96..f405fa33 100644
--- a/meson.build
+++ b/meson.build
@@ -178,11 +178,11 @@ conf_data.set('ENABLE_SENDMMSG', sendmmsg.to_int())
conf_data.set('ENABLE_XDP', xdp.to_int())
conf_data.set('ENABLE_CAP_NG', capng.found().to_int())
conf_data.set('ENABLE_DOH2', nghttp2.found().to_int())
-conf_data.set('DBG_ASSUMPTION_ABORT', get_option('debug').to_int())
+conf_data.set('DBG_ASSERTION_ABORT', get_option('debug').to_int())
if get_option('debug')
- conf_data.set('DBG_ASSUMPTION_FORK', '0')
+ conf_data.set('DBG_ASSERTION_FORK', '0')
else
- conf_data.set('DBG_ASSUMPTION_FORK', '(5 * 60 * 1000) /* five minutes */')
+ conf_data.set('DBG_ASSERTION_FORK', '(5 * 60 * 1000) /* five minutes */')
endif
kresconfig = configure_file(
diff --git a/modules/cookies/cookiectl.c b/modules/cookies/cookiectl.c
index 5be26c6b..31eab0db 100644
--- a/modules/cookies/cookiectl.c
+++ b/modules/cookies/cookiectl.c
@@ -45,7 +45,7 @@ static void kr_cookie_ctx_init(struct kr_cookie_ctx *ctx)
*/
static bool enabled_ok(const JsonNode *node)
{
- if (!kr_assume(node))
+ if (kr_fails_assert(node))
return false;
return node->tag == JSON_BOOL;
@@ -58,7 +58,7 @@ static bool enabled_ok(const JsonNode *node)
*/
static bool secret_ok(const JsonNode *node)
{
- if (!kr_assume(node))
+ if (kr_fails_assert(node))
return false;
if (node->tag != JSON_STRING) {
@@ -187,10 +187,10 @@ static int hexbyte2int(const char *hexstr)
}
dhi = hexchar2val(dhi);
- if (!kr_assume(dhi != -1))
+ if (kr_fails_assert(dhi != -1))
return -1;
dlo = hexchar2val(dlo);
- if (!kr_assume(dlo != -1))
+ if (kr_fails_assert(dlo != -1))
return -1;
return (dhi << 4) | dlo;
@@ -209,10 +209,10 @@ static int int2hexbyte(char *tgt, int i)
}
int ilo = hexval2char(i & 0x0f);
- if (!kr_assume(ilo != -1))
+ if (kr_fails_assert(ilo != -1))
return -1;
int ihi = hexval2char((i >> 4) & 0x0f);
- if (!kr_assume(ihi != -1))
+ if (kr_fails_assert(ihi != -1))
return -1;
tgt[0] = ihi;
@@ -249,7 +249,7 @@ static struct kr_cookie_secret *new_sq_from_hexstr(const char *hexstr)
free(sq);
return NULL;
}
- if (!kr_assume(0x00 <= num && num <= 0xff)) {
+ if (kr_fails_assert(0x00 <= num && num <= 0xff)) {
free(sq);
return NULL;
}
@@ -283,7 +283,7 @@ static struct kr_cookie_secret *create_secret(const JsonNode *node)
*/
static bool configuration_node_ok(const JsonNode *node)
{
- if (!kr_assume(node))
+ if (kr_fails_assert(node))
return false;
if (!node->key) {
@@ -340,7 +340,7 @@ static char *new_hexstr_from_sq(const struct kr_cookie_secret *sq)
static bool read_secret(JsonNode *root, const char *node_name,
const struct kr_cookie_secret *secret)
{
- if (!kr_assume(root && node_name && secret))
+ if (kr_fails_assert(root && node_name && secret))
return false;
char *secret_str = new_hexstr_from_sq(secret);
@@ -363,7 +363,7 @@ static bool read_secret(JsonNode *root, const char *node_name,
static bool read_available_hashes(JsonNode *root, const char *root_name,
const knot_lookup_t table[])
{
- if (!kr_assume(root && root_name && table))
+ if (kr_fails_assert(root && root_name && table))
return false;
JsonNode *array = json_mkarray();
@@ -399,7 +399,7 @@ static bool is_modified(const struct kr_cookie_comp *running,
struct kr_cookie_secret *secr,
const knot_lookup_t *alg_lookup)
{
- if (!kr_assume(running))
+ if (kr_fails_assert(running))
return false;
if (alg_lookup && alg_lookup->id >= 0) {
@@ -409,7 +409,7 @@ static bool is_modified(const struct kr_cookie_comp *running,
}
if (secr) {
- if (!kr_assume(secr->size > 0))
+ if (kr_fails_assert(secr->size > 0))
return false;
if (running->secr->size != secr->size ||
0 != memcmp(running->secr->data, secr->data,
@@ -427,7 +427,7 @@ static bool is_modified(const struct kr_cookie_comp *running,
static bool obtain_secret(JsonNode *root_node, struct kr_cookie_secret **secret,
const char *name)
{
- if (!kr_assume(secret && name))
+ if (kr_fails_assert(secret && name))
return false;
const JsonNode *node;
@@ -448,7 +448,7 @@ static void update_running(struct kr_cookie_settings *running,
struct kr_cookie_secret **secret,
const knot_lookup_t *alg_lookup)
{
- if (!kr_assume(running && secret) || !kr_assume(*secret || alg_lookup))
+ if (kr_fails_assert(running && secret) || kr_fails_assert(*secret || alg_lookup))
return;
running->recent.alg_id = -1;
@@ -457,7 +457,7 @@ static void update_running(struct kr_cookie_settings *running,
running->recent.alg_id = running->current.alg_id;
if (alg_lookup) {
- if (!kr_assume(alg_lookup->id >= 0))
+ if (kr_fails_assert(alg_lookup->id >= 0))
return;
running->current.alg_id = alg_lookup->id;
}
@@ -483,7 +483,7 @@ static void apply_changes(struct kr_cookie_settings *running,
const knot_lookup_t *alg_lookup,
const JsonNode *enabled)
{
- if (!kr_assume(running && secret))
+ if (kr_fails_assert(running && secret))
return;
if (is_modified(&running->current, *secret, alg_lookup)) {
@@ -491,7 +491,7 @@ static void apply_changes(struct kr_cookie_settings *running,
}
if (enabled) {
- (void)!kr_assume(enabled->tag == JSON_BOOL);
+ kr_assert(enabled->tag == JSON_BOOL);
running->enabled = enabled->bool_;
}
}
@@ -508,7 +508,7 @@ static void apply_changes(struct kr_cookie_settings *running,
*/
static bool config_apply_json(struct kr_cookie_ctx *ctx, JsonNode *root_node)
{
- if (!kr_assume(ctx && root_node))
+ if (kr_fails_assert(ctx && root_node))
return;
/*
diff --git a/modules/cookies/cookiemonster.c b/modules/cookies/cookiemonster.c
index f443a4d2..3cb06cb5 100644
--- a/modules/cookies/cookiemonster.c
+++ b/modules/cookies/cookiemonster.c
@@ -56,14 +56,14 @@ static int srvr_sockaddr_cc_check(const struct sockaddr *srvr_sa,
const uint8_t *cc, uint16_t cc_len,
const struct kr_cookie_settings *clnt_sett)
{
- if (!kr_assume(cc && cc_len > 0 && clnt_sett))
+ if (kr_fails_assert(cc && cc_len > 0 && clnt_sett))
return -2;
if (!srvr_sa) {
return -2;
}
- if (!kr_assume(clnt_sett->current.secr))
+ if (kr_fails_assert(clnt_sett->current.secr))
return -2;
/* The address must correspond with the client cookie. */
@@ -108,7 +108,7 @@ static int srvr_sockaddr_cc_check(const struct sockaddr *srvr_sa,
static const uint8_t *get_cookie_opt(kr_cookie_lru_t *cache,
const struct sockaddr *sa)
{
- if (!kr_assume(cache && sa))
+ if (kr_fails_assert(cache && sa))
return NULL;
const uint8_t *cached_cookie_opt = kr_cookie_lru_get(cache, sa);
@@ -134,7 +134,7 @@ static const uint8_t *get_cookie_opt(kr_cookie_lru_t *cache,
static bool is_cookie_cached(kr_cookie_lru_t *cache, const struct sockaddr *sa,
const uint8_t *cookie_opt)
{
- if (!kr_assume(cache && sa && cookie_opt))
+ if (kr_fails_assert(cache && sa && cookie_opt))
return false;
const uint8_t *cached_opt = get_cookie_opt(cache, sa);
@@ -162,7 +162,7 @@ static bool check_cookie_content_and_cache(const struct kr_cookie_settings *clnt
uint8_t *pkt_cookie_opt,
kr_cookie_lru_t *cache)
{
- if (!kr_assume(clnt_sett && req && pkt_cookie_opt && cache))
+ if (kr_fails_assert(clnt_sett && req && pkt_cookie_opt && cache))
return false;
const uint8_t *pkt_cookie_data = knot_edns_opt_get_data(pkt_cookie_opt);
@@ -180,7 +180,7 @@ static bool check_cookie_content_and_cache(const struct kr_cookie_settings *clnt
"got malformed DNS cookie or server cookie missing");
return false;
}
- if (!kr_assume(pkt_cc_len == KNOT_OPT_COOKIE_CLNT))
+ if (kr_fails_assert(pkt_cc_len == KNOT_OPT_COOKIE_CLNT))
return false;
/* Check server address against received client cookie. */
@@ -191,7 +191,7 @@ static bool check_cookie_content_and_cache(const struct kr_cookie_settings *clnt
VERBOSE_MSG(NULL, "%s\n", "could not match received cookie");
return false;
}
- if (!kr_assume(srvr_sockaddr))
+ if (kr_fails_assert(srvr_sockaddr))
return false;
/* Don't cache received cookies that don't match the current secret. */
@@ -307,7 +307,7 @@ static inline uint8_t *req_cookie_option(struct kr_request *req)
static int invalid_sc_status(int state, bool sc_present, bool ignore_badcookie,
const struct kr_request *req, knot_pkt_t *answer)
{
- if (!kr_assume(req && answer))
+ if (kr_fails_assert(req && answer))
return KR_STATE_FAIL;
const knot_pkt_t *pkt = req->qsource.packet;
diff --git a/modules/cookies/cookies.c b/modules/cookies/cookies.c
index d3a72f20..3ad82a16 100644
--- a/modules/cookies/cookies.c
+++ b/modules/cookies/cookies.c
@@ -17,7 +17,7 @@ static char *cookies_config(void *env, struct kr_module *module,
const char *args)
{
struct kr_cookie_ctx *cookie_ctx = module->data;
- if (!kr_assume(cookie_ctx))
+ if (kr_fails_assert(cookie_ctx))
return NULL;
/* Apply configuration, if any. */
diff --git a/modules/dnstap/dnstap.c b/modules/dnstap/dnstap.c
index 73e8df7d..f813653b 100644
--- a/modules/dnstap/dnstap.c
+++ b/modules/dnstap/dnstap.c
@@ -110,7 +110,7 @@ static void set_address(const struct sockaddr *sockaddr,
/** Fill a tcp_info or return kr_error(). */
static int get_tcp_info(const struct kr_request *req, struct tcp_info *info)
{
- if(!kr_assume(req && info))
+ if(kr_fails_assert(req && info))
return kr_error(EINVAL);
if (!req->qsource.dst_addr || !req->qsource.flags.tcp) /* not TCP-based */
return -abs(ENOENT);
@@ -383,10 +383,10 @@ static struct fstrm_writer* dnstap_unix_writer(const char *path) {
static int find_string(const JsonNode *node, char **val, size_t len) {
if (!node || !node->key)
return kr_error(EINVAL);
- if (!kr_assume(node->tag == JSON_STRING))
+ if (kr_fails_assert(node->tag == JSON_STRING))
return kr_error(EINVAL);
*val = strndup(node->string_, len);
- if (!kr_assume(*val != NULL))
+ if (kr_fails_assert(*val != NULL))
return kr_error(errno);
return kr_ok();
}
@@ -395,7 +395,7 @@ static int find_string(const JsonNode *node, char **val, size_t len) {
static bool find_bool(const JsonNode *node) {
if (!node || !node->key)
return false;
- if (!kr_assume(node->tag == JSON_BOOL))
+ if (kr_fails_assert(node->tag == JSON_BOOL))
return false;
return node->bool_;
}
diff --git a/modules/hints/hints.c b/modules/hints/hints.c
index 0d841941..10890e18 100644
--- a/modules/hints/hints.c
+++ b/modules/hints/hints.c
@@ -199,7 +199,7 @@ static const knot_dname_t * raw_addr2reverse(const uint8_t *raw_addr, int family
ssize_t free_space = reverse_addr + sizeof(reverse_addr) - ra_it;
int written = snprintf(ra_it, free_space, "%x.%x.",
raw_addr[i] & 0x0f, raw_addr[i] >> 4);
- if (!kr_assume(written < free_space))
+ if (kr_fails_assert(written < free_space))
return NULL;
ra_it += written;
}
@@ -461,7 +461,7 @@ static char* pack_hints(struct kr_zonecut *hints);
static char* hint_get(void *env, struct kr_module *module, const char *args)
{
struct kr_zonecut *hints = &((struct hints_data *) module->data)->hints;
- if (!kr_assume(hints))
+ if (kr_fails_assert(hints))
return NULL;
if (!args) {
diff --git a/modules/stats/stats.c b/modules/stats/stats.c
index c34c5abc..5deb0932 100644
--- a/modules/stats/stats.c
+++ b/modules/stats/stats.c
@@ -132,7 +132,7 @@ static void collect_sample(struct stat_data *data, struct kr_rplan *rplan)
* TODO: redesign the sampling approach. */
if (kr_rand_coin(1, FREQUENT_PSAMPLE)) {
int key_len = collect_key(key, qry->sname, qry->stype);
- if (!kr_assume(key_len >= 0))
+ if (kr_fails_assert(key_len >= 0))
continue;
unsigned *count = lru_get_new(data->queries.frequent, key, key_len, NULL);
if (count)
diff --git a/utils/cache_gc/db.c b/utils/cache_gc/db.c
index d164ea7b..3b4a75be 100644
--- a/utils/cache_gc/db.c
+++ b/utils/cache_gc/db.c
@@ -76,7 +76,7 @@ int kr_gc_key_consistent(knot_db_val_t key)
} else {
/* find the first double zero in the key */
for (i = 2; kd[i - 1] || kd[i - 2]; ++i) {
- if (!kr_assume(i < key.len))
+ if (kr_fails_assert(i < key.len))
return kr_error(EINVAL);
}
}
@@ -85,7 +85,7 @@ int kr_gc_key_consistent(knot_db_val_t key)
case 'E':
(void)0; // C can't have a variable definition following a label
uint16_t type;
- if (!kr_assume(i + 1 + sizeof(type) <= key.len))
+ if (kr_fails_assert(i + 1 + sizeof(type) <= key.len))
return kr_error(EINVAL);
memcpy(&type, kd + i + 1, sizeof(type));
return type;
@@ -96,7 +96,7 @@ int kr_gc_key_consistent(knot_db_val_t key)
case 'S': // the rtt_state entries are considered inconsistent, at least for now
return -1;
default:
- (void)!kr_assume(!EINVAL);
+ kr_assert(!EINVAL);
return kr_error(EINVAL);
}
}
diff --git a/utils/cache_gc/kr_cache_gc.c b/utils/cache_gc/kr_cache_gc.c
index a35c393b..baf8644f 100644
--- a/utils/cache_gc/kr_cache_gc.c
+++ b/utils/cache_gc/kr_cache_gc.c
@@ -151,7 +151,7 @@ struct kr_cache_gc_state {
void kr_cache_gc_free_state(kr_cache_gc_state_t **state)
{
- if (!kr_assume(state))
+ if (kr_fails_assert(state))
return;
if (!*state) { // not open
return;
@@ -165,7 +165,7 @@ int kr_cache_gc(kr_cache_gc_cfg_t *cfg, kr_cache_gc_state_t **state)
{
// The whole function works in four "big phases":
//// 1. find out whether we should even do analysis and deletion.
- if (!kr_assume(cfg && state))
+ if (kr_fails_assert(cfg && state))
return KNOT_EINVAL;
int ret;
// Ensure that we have open and "healthy" cache.
diff --git a/utils/cache_gc/main.c b/utils/cache_gc/main.c
index 5a0c4bd4..4656073a 100644
--- a/utils/cache_gc/main.c
+++ b/utils/cache_gc/main.c
@@ -26,7 +26,7 @@ static void got_killed(int signum)
case 3:
abort();
default:
- (void)!kr_assume(false);
+ kr_assert(false);
}
}
@@ -122,7 +122,7 @@ int main(int argc, char *argv[])
print_help();
return 1;
default:
- (void)!kr_assume(false);
+ kr_assert(false);
}
}