summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--daemon/lua/kres-gen-30.lua15
-rw-r--r--daemon/lua/kres-gen-31.lua15
-rw-r--r--daemon/lua/kres-gen-32.lua15
-rwxr-xr-xdaemon/lua/kres-gen.sh4
-rw-r--r--lib/cache/api.c22
-rw-r--r--lib/layer/iterate.c4
-rw-r--r--lib/meson.build1
-rw-r--r--lib/resolve-produce.c44
-rw-r--r--lib/resolve.h3
-rw-r--r--lib/rplan.h19
-rw-r--r--lib/rules/api.c42
-rw-r--r--lib/rules/api.h25
-rw-r--r--lib/rules/forward.c168
-rw-r--r--lib/rules/impl.h5
-rw-r--r--modules/policy/policy.lua52
15 files changed, 386 insertions, 48 deletions
diff --git a/daemon/lua/kres-gen-30.lua b/daemon/lua/kres-gen-30.lua
index 09138eaf..f92fa0b6 100644
--- a/daemon/lua/kres-gen-30.lua
+++ b/daemon/lua/kres-gen-30.lua
@@ -209,6 +209,12 @@ struct kr_rule_zonefile_config {
const char *origin;
uint32_t ttl;
};
+struct kr_rule_fwd_flags {
+ _Bool is_auth : 1;
+ _Bool is_tcp : 1;
+ _Bool is_nods : 1;
+};
+typedef struct kr_rule_fwd_flags kr_rule_fwd_flags_t;
struct kr_extended_error {
int32_t info_code;
const char *extra_text;
@@ -333,6 +339,13 @@ struct kr_server_selection {
};
typedef int kr_log_level_t;
enum kr_log_group {LOG_GRP_UNKNOWN = -1, LOG_GRP_SYSTEM = 1, LOG_GRP_CACHE, LOG_GRP_IO, LOG_GRP_NETWORK, LOG_GRP_TA, LOG_GRP_TLS, LOG_GRP_GNUTLS, LOG_GRP_TLSCLIENT, LOG_GRP_XDP, LOG_GRP_DOH, LOG_GRP_DNSSEC, LOG_GRP_HINT, LOG_GRP_PLAN, LOG_GRP_ITERATOR, LOG_GRP_VALIDATOR, LOG_GRP_RESOLVER, LOG_GRP_SELECTION, LOG_GRP_ZCUT, LOG_GRP_COOKIES, LOG_GRP_STATISTICS, LOG_GRP_REBIND, LOG_GRP_WORKER, LOG_GRP_POLICY, LOG_GRP_TASENTINEL, LOG_GRP_TASIGNALING, LOG_GRP_TAUPDATE, LOG_GRP_DAF, LOG_GRP_DETECTTIMEJUMP, LOG_GRP_DETECTTIMESKEW, LOG_GRP_GRAPHITE, LOG_GRP_PREFILL, LOG_GRP_PRIMING, LOG_GRP_SRVSTALE, LOG_GRP_WATCHDOG, LOG_GRP_NSID, LOG_GRP_DNSTAP, LOG_GRP_TESTS, LOG_GRP_DOTAUTH, LOG_GRP_HTTP, LOG_GRP_CONTROL, LOG_GRP_MODULE, LOG_GRP_DEVEL, LOG_GRP_RENUMBER, LOG_GRP_EDE, LOG_GRP_RULES, LOG_GRP_REQDBG};
+struct kr_query_data_src {
+ _Bool initialized;
+ _Bool all_set;
+ uint8_t rule_depth;
+ kr_rule_fwd_flags_t flags;
+ knot_db_val_t targets_ptr;
+};
kr_layer_t kr_layer_t_static;
_Bool kr_dbg_assertion_abort;
@@ -359,6 +372,7 @@ struct kr_query {
struct timeval timestamp;
struct kr_zonecut zone_cut;
struct kr_layer_pickle *deferred;
+ struct kr_query_data_src data_src;
int8_t cname_depth;
struct kr_query *cname_parent;
struct kr_request *request;
@@ -481,6 +495,7 @@ int kr_rule_tag_add(const char *, kr_rule_tags_t *);
int kr_rule_local_data_emptyzone(const knot_dname_t *, kr_rule_tags_t);
int kr_rule_local_data_nxdomain(const knot_dname_t *, kr_rule_tags_t);
int kr_rule_zonefile(const struct kr_rule_zonefile_config *);
+int kr_rule_forward(const knot_dname_t *, kr_rule_fwd_flags_t, const struct sockaddr **);
typedef struct {
int sock_type;
_Bool tls;
diff --git a/daemon/lua/kres-gen-31.lua b/daemon/lua/kres-gen-31.lua
index 5fc6eaba..4bc38f34 100644
--- a/daemon/lua/kres-gen-31.lua
+++ b/daemon/lua/kres-gen-31.lua
@@ -209,6 +209,12 @@ struct kr_rule_zonefile_config {
const char *origin;
uint32_t ttl;
};
+struct kr_rule_fwd_flags {
+ _Bool is_auth : 1;
+ _Bool is_tcp : 1;
+ _Bool is_nods : 1;
+};
+typedef struct kr_rule_fwd_flags kr_rule_fwd_flags_t;
struct kr_extended_error {
int32_t info_code;
const char *extra_text;
@@ -333,6 +339,13 @@ struct kr_server_selection {
};
typedef int kr_log_level_t;
enum kr_log_group {LOG_GRP_UNKNOWN = -1, LOG_GRP_SYSTEM = 1, LOG_GRP_CACHE, LOG_GRP_IO, LOG_GRP_NETWORK, LOG_GRP_TA, LOG_GRP_TLS, LOG_GRP_GNUTLS, LOG_GRP_TLSCLIENT, LOG_GRP_XDP, LOG_GRP_DOH, LOG_GRP_DNSSEC, LOG_GRP_HINT, LOG_GRP_PLAN, LOG_GRP_ITERATOR, LOG_GRP_VALIDATOR, LOG_GRP_RESOLVER, LOG_GRP_SELECTION, LOG_GRP_ZCUT, LOG_GRP_COOKIES, LOG_GRP_STATISTICS, LOG_GRP_REBIND, LOG_GRP_WORKER, LOG_GRP_POLICY, LOG_GRP_TASENTINEL, LOG_GRP_TASIGNALING, LOG_GRP_TAUPDATE, LOG_GRP_DAF, LOG_GRP_DETECTTIMEJUMP, LOG_GRP_DETECTTIMESKEW, LOG_GRP_GRAPHITE, LOG_GRP_PREFILL, LOG_GRP_PRIMING, LOG_GRP_SRVSTALE, LOG_GRP_WATCHDOG, LOG_GRP_NSID, LOG_GRP_DNSTAP, LOG_GRP_TESTS, LOG_GRP_DOTAUTH, LOG_GRP_HTTP, LOG_GRP_CONTROL, LOG_GRP_MODULE, LOG_GRP_DEVEL, LOG_GRP_RENUMBER, LOG_GRP_EDE, LOG_GRP_RULES, LOG_GRP_REQDBG};
+struct kr_query_data_src {
+ _Bool initialized;
+ _Bool all_set;
+ uint8_t rule_depth;
+ kr_rule_fwd_flags_t flags;
+ knot_db_val_t targets_ptr;
+};
kr_layer_t kr_layer_t_static;
_Bool kr_dbg_assertion_abort;
@@ -359,6 +372,7 @@ struct kr_query {
struct timeval timestamp;
struct kr_zonecut zone_cut;
struct kr_layer_pickle *deferred;
+ struct kr_query_data_src data_src;
int8_t cname_depth;
struct kr_query *cname_parent;
struct kr_request *request;
@@ -481,6 +495,7 @@ int kr_rule_tag_add(const char *, kr_rule_tags_t *);
int kr_rule_local_data_emptyzone(const knot_dname_t *, kr_rule_tags_t);
int kr_rule_local_data_nxdomain(const knot_dname_t *, kr_rule_tags_t);
int kr_rule_zonefile(const struct kr_rule_zonefile_config *);
+int kr_rule_forward(const knot_dname_t *, kr_rule_fwd_flags_t, const struct sockaddr **);
typedef struct {
int sock_type;
_Bool tls;
diff --git a/daemon/lua/kres-gen-32.lua b/daemon/lua/kres-gen-32.lua
index 57a7e54e..69ccfee8 100644
--- a/daemon/lua/kres-gen-32.lua
+++ b/daemon/lua/kres-gen-32.lua
@@ -210,6 +210,12 @@ struct kr_rule_zonefile_config {
const char *origin;
uint32_t ttl;
};
+struct kr_rule_fwd_flags {
+ _Bool is_auth : 1;
+ _Bool is_tcp : 1;
+ _Bool is_nods : 1;
+};
+typedef struct kr_rule_fwd_flags kr_rule_fwd_flags_t;
struct kr_extended_error {
int32_t info_code;
const char *extra_text;
@@ -334,6 +340,13 @@ struct kr_server_selection {
};
typedef int kr_log_level_t;
enum kr_log_group {LOG_GRP_UNKNOWN = -1, LOG_GRP_SYSTEM = 1, LOG_GRP_CACHE, LOG_GRP_IO, LOG_GRP_NETWORK, LOG_GRP_TA, LOG_GRP_TLS, LOG_GRP_GNUTLS, LOG_GRP_TLSCLIENT, LOG_GRP_XDP, LOG_GRP_DOH, LOG_GRP_DNSSEC, LOG_GRP_HINT, LOG_GRP_PLAN, LOG_GRP_ITERATOR, LOG_GRP_VALIDATOR, LOG_GRP_RESOLVER, LOG_GRP_SELECTION, LOG_GRP_ZCUT, LOG_GRP_COOKIES, LOG_GRP_STATISTICS, LOG_GRP_REBIND, LOG_GRP_WORKER, LOG_GRP_POLICY, LOG_GRP_TASENTINEL, LOG_GRP_TASIGNALING, LOG_GRP_TAUPDATE, LOG_GRP_DAF, LOG_GRP_DETECTTIMEJUMP, LOG_GRP_DETECTTIMESKEW, LOG_GRP_GRAPHITE, LOG_GRP_PREFILL, LOG_GRP_PRIMING, LOG_GRP_SRVSTALE, LOG_GRP_WATCHDOG, LOG_GRP_NSID, LOG_GRP_DNSTAP, LOG_GRP_TESTS, LOG_GRP_DOTAUTH, LOG_GRP_HTTP, LOG_GRP_CONTROL, LOG_GRP_MODULE, LOG_GRP_DEVEL, LOG_GRP_RENUMBER, LOG_GRP_EDE, LOG_GRP_RULES, LOG_GRP_REQDBG};
+struct kr_query_data_src {
+ _Bool initialized;
+ _Bool all_set;
+ uint8_t rule_depth;
+ kr_rule_fwd_flags_t flags;
+ knot_db_val_t targets_ptr;
+};
kr_layer_t kr_layer_t_static;
_Bool kr_dbg_assertion_abort;
@@ -360,6 +373,7 @@ struct kr_query {
struct timeval timestamp;
struct kr_zonecut zone_cut;
struct kr_layer_pickle *deferred;
+ struct kr_query_data_src data_src;
int8_t cname_depth;
struct kr_query *cname_parent;
struct kr_request *request;
@@ -482,6 +496,7 @@ int kr_rule_tag_add(const char *, kr_rule_tags_t *);
int kr_rule_local_data_emptyzone(const knot_dname_t *, kr_rule_tags_t);
int kr_rule_local_data_nxdomain(const knot_dname_t *, kr_rule_tags_t);
int kr_rule_zonefile(const struct kr_rule_zonefile_config *);
+int kr_rule_forward(const knot_dname_t *, kr_rule_fwd_flags_t, const struct sockaddr **);
typedef struct {
int sock_type;
_Bool tls;
diff --git a/daemon/lua/kres-gen.sh b/daemon/lua/kres-gen.sh
index 42da9b16..d4ee2772 100755
--- a/daemon/lua/kres-gen.sh
+++ b/daemon/lua/kres-gen.sh
@@ -128,6 +128,8 @@ ${CDEFS} ${LIBKRES} types <<-EOF
struct kr_request_qsource_flags
kr_rule_tags_t
struct kr_rule_zonefile_config
+ struct kr_rule_fwd_flags
+ typedef kr_rule_fwd_flags_t
struct kr_extended_error
struct kr_request
enum kr_rank
@@ -144,6 +146,7 @@ ${CDEFS} ${LIBKRES} types <<-EOF
struct kr_server_selection
kr_log_level_t
enum kr_log_group
+ struct kr_query_data_src
EOF
# static variables; these lines might not be simple to generate
@@ -292,6 +295,7 @@ ${CDEFS} ${LIBKRES} functions <<-EOF
kr_rule_local_data_emptyzone
kr_rule_local_data_nxdomain
kr_rule_zonefile
+ kr_rule_forward
EOF
diff --git a/lib/cache/api.c b/lib/cache/api.c
index 804de6f8..4b4a8c59 100644
--- a/lib/cache/api.c
+++ b/lib/cache/api.c
@@ -315,6 +315,19 @@ int cache_peek(kr_layer_t *ctx, knot_pkt_t *pkt)
{
struct kr_request *req = ctx->req;
struct kr_query *qry = req->current_query;
+
+ /* TODO: review when to run this? We want to process rules here
+ * even when some of the cache exit-conditions happen. NO_CACHE in particular. */
+ if (!req->options.PASSTHRU_LEGACY && !qry->flags.CACHE_TRIED) {
+ int ret = kr_rule_local_data_answer(qry, pkt);
+ if (ret < 0)
+ ctx->state = KR_STATE_FAIL;
+ if (ret != 0) {
+ qry->flags.CACHE_TRIED = true;
+ return ctx->state;
+ }
+ }
+
/* We first check various exit-conditions and then call the _real function. */
if (!kr_cache_is_open(&req->ctx->cache)
@@ -338,15 +351,6 @@ int cache_peek(kr_layer_t *ctx, knot_pkt_t *pkt)
return ctx->state;
}
- /* TODO: we _might_ want to process rules here even when some of the cache
- * exit-conditions happen, though I don't expect these cases to be important. */
- if (!req->options.PASSTHRU_LEGACY) {
- int ret = kr_rule_local_data_answer(qry, pkt);
- if (ret != -ENOENT) {
- return ret;
- }
- }
-
int ret = peek_nosync(ctx, pkt);
kr_cache_commit(&req->ctx->cache);
return ret;
diff --git a/lib/layer/iterate.c b/lib/layer/iterate.c
index dfb7c876..4eacf86f 100644
--- a/lib/layer/iterate.c
+++ b/lib/layer/iterate.c
@@ -932,6 +932,7 @@ static int begin(kr_layer_t *ctx)
return reset(ctx);
}
+/* LATER: make calls of this function more organized; spaghetti is though here. */
int kr_make_query(struct kr_query *query, knot_pkt_t *pkt)
{
/* Minimize QNAME (if possible). */
@@ -977,6 +978,7 @@ static int prepare_query(kr_layer_t *ctx, knot_pkt_t *pkt)
return KR_STATE_FAIL;
}
+ // TODO: this logging (and rplan's) is confusing, especially around `uid` values
WITH_VERBOSE(query) {
KR_DNAME_GET_STR(name_str, query->sname);
KR_RRTYPE_GET_STR(type_str, query->stype);
@@ -1083,7 +1085,7 @@ static int resolve(kr_layer_t *ctx, knot_pkt_t *pkt)
VERBOSE_MSG("<= malformed response (parsed %d)\n", (int)pkt->parsed);
query->server_selection.error(query, req->upstream.transport, KR_SELECTION_MALFORMED);
return KR_STATE_FAIL;
- } else if (!is_paired_to_query(pkt, query)) {
+ } else if (!query->flags.CACHED && !is_paired_to_query(pkt, query)) {
WITH_VERBOSE(query) {
const char *ns_str =
req->upstream.transport ? kr_straddr(&req->upstream.transport->address.ip) : "(internal)";
diff --git a/lib/meson.build b/lib/meson.build
index d7d2d0e8..48185e17 100644
--- a/lib/meson.build
+++ b/lib/meson.build
@@ -25,6 +25,7 @@ libkres_src = files([
'log.c',
'rules/api.c',
'rules/defaults.c',
+ 'rules/forward.c',
'rules/zonefile.c',
'module.c',
'resolve.c',
diff --git a/lib/resolve-produce.c b/lib/resolve-produce.c
index ed78f633..72b0709f 100644
--- a/lib/resolve-produce.c
+++ b/lib/resolve-produce.c
@@ -461,10 +461,15 @@ static int trust_chain_check(struct kr_request *request, struct kr_query *qry)
return KR_STATE_PRODUCE;
}
-/** @internal Check current zone cut status and credibility, spawn subrequests if needed. */
+/// Check current zone cut status and credibility, spawn subrequests if needed.
+/// \return KR_STATE_FAIL, KR_STATE_DONE, kr_ok()
+/// TODO: careful review might be nice
static int zone_cut_check(struct kr_request *request, struct kr_query *qry, knot_pkt_t *packet)
-/* TODO: using cache on this point in this way just isn't nice; remove in time */
{
+ // Set up nameserver+cut if overridden by policy.
+ int ret = kr_rule_data_src_check(qry, packet);
+ if (ret) return KR_STATE_FAIL;
+
/* Stub mode, just forward and do not solve cut. */
if (qry->flags.STUB) {
return KR_STATE_PRODUCE;
@@ -486,7 +491,7 @@ static int zone_cut_check(struct kr_request *request, struct kr_query *qry, knot
* now it's the time to look up closest zone cut from cache. */
struct kr_cache *cache = &request->ctx->cache;
if (!kr_cache_is_open(cache)) {
- int ret = kr_zonecut_set_sbelt(request->ctx, &qry->zone_cut);
+ ret = kr_zonecut_set_sbelt(request->ctx, &qry->zone_cut);
if (ret != 0) {
return KR_STATE_FAIL;
}
@@ -590,6 +595,7 @@ static int ns_resolve_addr(struct kr_query *qry, struct kr_request *param, struc
int kr_resolve_produce(struct kr_request *request, struct kr_transport **transport, knot_pkt_t *packet)
{
+ kr_require(request && transport && packet);
struct kr_rplan *rplan = &request->rplan;
/* No query left for resolution */
@@ -599,11 +605,6 @@ int kr_resolve_produce(struct kr_request *request, struct kr_transport **transpo
struct kr_query *qry = array_tail(rplan->pending);
- /* Initialize server selection */
- if (!qry->server_selection.initialized) {
- kr_server_selection_init(qry);
- }
-
/* If we have deferred answers, resume them. */
if (qry->deferred != NULL) {
/* @todo: Refactoring validator, check trust chain before resuming. */
@@ -633,13 +634,11 @@ int kr_resolve_produce(struct kr_request *request, struct kr_transport **transpo
} else {
/* Caller is interested in always tracking a zone cut, even if the answer is cached
* this is normally not required, and incurs another cache lookups for cached answer. */
- if (qry->flags.ALWAYS_CUT) {
- if (!(qry->flags.STUB)) {
- switch(zone_cut_check(request, qry, packet)) {
- case KR_STATE_FAIL: return KR_STATE_FAIL;
- case KR_STATE_DONE: return KR_STATE_PRODUCE;
- default: break;
- }
+ if (qry->flags.ALWAYS_CUT) { // LATER: maybe the flag doesn't work well anymore
+ switch(zone_cut_check(request, qry, packet)) {
+ case KR_STATE_FAIL: return KR_STATE_FAIL;
+ case KR_STATE_DONE: return KR_STATE_PRODUCE;
+ default: break;
}
}
/* Resolve current query and produce dependent or finish */
@@ -663,7 +662,7 @@ int kr_resolve_produce(struct kr_request *request, struct kr_transport **transpo
ITERATE_LAYERS(request, qry, reset);
return kr_rplan_empty(rplan) ? KR_STATE_DONE : KR_STATE_PRODUCE;
}
-
+ /* At this point we need to send a query upstream to proceed towards success. */
/* This query has RD=0 or is ANY, stop here. */
if (qry->stype == KNOT_RRTYPE_ANY ||
@@ -673,16 +672,13 @@ int kr_resolve_produce(struct kr_request *request, struct kr_transport **transpo
}
/* Update zone cut, spawn new subrequests. */
- if (!(qry->flags.STUB)) {
- int state = zone_cut_check(request, qry, packet);
- switch(state) {
- case KR_STATE_FAIL: return KR_STATE_FAIL;
- case KR_STATE_DONE: return KR_STATE_PRODUCE;
- default: break;
- }
+ int state = zone_cut_check(request, qry, packet);
+ switch(state) {
+ case KR_STATE_FAIL: return KR_STATE_FAIL;
+ case KR_STATE_DONE: return KR_STATE_PRODUCE;
+ default: break;
}
-
const struct kr_qflags qflg = qry->flags;
const bool retry = qflg.TCP || qflg.BADCOOKIE_AGAIN;
if (!qflg.FORWARD && !qflg.STUB && !retry) { /* Keep NS when requerying/stub/badcookie. */
diff --git a/lib/resolve.h b/lib/resolve.h
index 4b15764a..34e02619 100644
--- a/lib/resolve.h
+++ b/lib/resolve.h
@@ -330,11 +330,12 @@ int kr_resolve_consume(struct kr_request *request, struct kr_transport **transpo
/**
* Produce either next additional query or finish.
*
- * If the CONSUME is returned then dst, type and packet will be filled with
+ * If the CONSUME is returned then *transport and *packet will be filled with
* appropriate values and caller is responsible to send them and receive answer.
* If it returns any other state, then content of the variables is undefined.
*
* Implemented in its own file ./resolve-produce.c
+ * FIXME: more issues in this doc-comment
*
* @param request request state (in PRODUCE state)
* @param dst [out] possible address of the next nameserver
diff --git a/lib/rplan.h b/lib/rplan.h
index 09dec987..4998bf05 100644
--- a/lib/rplan.h
+++ b/lib/rplan.h
@@ -8,6 +8,7 @@
#include <libknot/dname.h>
#include <libknot/codes.h>
+#include "lib/rules/api.h"
#include "lib/selection.h"
#include "lib/zonecut.h"
@@ -36,7 +37,8 @@ struct kr_qflags {
bool DNSSEC_INSECURE : 1;/**< Query response is DNSSEC insecure. */
bool DNSSEC_CD : 1; /**< Instruction to set CD bit in request. */
bool STUB : 1; /**< Stub resolution, accept received answer as solved. */
- bool ALWAYS_CUT : 1; /**< Always recover zone cut (even if cached). */
+ bool ALWAYS_CUT : 1; /**< Always recover zone cut (even if cached).
+ * This flag might be broken and/or not useful anymore. */
bool DNSSEC_WEXPAND : 1; /**< Query response has wildcard expansion. */
bool PERMISSIVE : 1; /**< Permissive resolver mode. */
bool STRICT : 1; /**< Strict resolver mode. */
@@ -74,8 +76,12 @@ void kr_qflags_clear(struct kr_qflags *fl1, struct kr_qflags fl2);
typedef int32_t (*kr_stale_cb)(int32_t ttl, const knot_dname_t *owner, uint16_t type,
const struct kr_query *qry);
-/**
- * Single query representation.
+/** A part of kr_request's resolution when sname and stype don't change.
+ *
+ * A kr_request can contain multiple kr_query parts. A new one is needed when:
+ * - CNAME jump occurs (or DNAME and similar)
+ * - some other records are needed to proceed, e.g. DS/DNSKEY for validation or NS addresses
+ * - generally, see kr_rplan_push() calls
*/
struct kr_query {
struct kr_query *parent;
@@ -96,6 +102,13 @@ struct kr_query {
struct timeval timestamp; /**< Real time for TTL+DNSSEC checks (.tv_sec only). */
struct kr_zonecut zone_cut;
struct kr_layer_pickle *deferred;
+ struct kr_query_data_src { // named struct to work around a bug in doc generator
+ bool initialized; /// !initialized -> all meaningless and zeroed
+ bool all_set;
+ uint8_t rule_depth; /// the number of labels for the apex
+ kr_rule_fwd_flags_t flags;
+ knot_db_val_t targets_ptr; /// pointer to targets inside rule DB
+ } data_src; /// information about "data source" for this sname+stype (+tags?)
/** Current xNAME depth, set by iterator. 0 = uninitialized, 1 = no CNAME, ...
* See also KR_CNAME_CHAIN_LIMIT. */
diff --git a/lib/rules/api.c b/lib/rules/api.c
index cdf2a434..4d003641 100644
--- a/lib/rules/api.c
+++ b/lib/rules/api.c
@@ -251,11 +251,19 @@ static size_t key_common_subtree(knot_db_val_t k1, knot_db_val_t k2, size_t lf_s
} while (true);
}
-int kr_rule_local_data_answer(struct kr_query *qry, knot_pkt_t *pkt)
+int rule_local_data_answer(struct kr_query *qry, knot_pkt_t *pkt)
{
+ // return shorthands; see doc-comment for kr_rule_local_data_answer()
+ static const int RET_CONT_CACHE = 0;
+ static const int RET_ANSWERED = 1;
+
kr_require(the_rules);
// TODO: implement EDE codes somehow
+ //if (kr_fails_assert(!qry->data_src.initialized)) // low-severity assertion
+ if (qry->data_src.initialized) // TODO: why does it happen?
+ memset(&qry->data_src, 0, sizeof(qry->data_src));
+
const uint16_t rrtype = qry->stype;
// Init the SNAME-based part of key; it's pretty static.
@@ -276,7 +284,8 @@ int kr_rule_local_data_answer(struct kr_query *qry, knot_pkt_t *pkt)
knot_db_val_t key_rsk = { .data = key_rs, .len = sizeof(key_rs) };
ret = ruledb_op(read, &key_rsk, &rulesets, 1);
}
- if (ret != 0) return ret; // including ENOENT: no rulesets -> no rule used
+ if (ret == kr_error(ENOENT)) return RET_CONT_CACHE; // no rulesets -> no rule used
+ if (ret != 0) return kr_error(ret);
const char *rulesets_str = rulesets.data;
// Iterate over all rulesets.
@@ -307,13 +316,14 @@ int kr_rule_local_data_answer(struct kr_query *qry, knot_pkt_t *pkt)
switch (ret) {
case -ENOENT: continue;
case 0: break;
- default: return ret;
+ default: return kr_error(ret);
}
if (!kr_rule_consume_tags(&val, qry->request)) continue;
// We found a rule that applies to the dname+rrtype+req.
- return answer_exact_match(qry, pkt, types[i],
- val.data, val.data + val.len);
+ ret = answer_exact_match(qry, pkt, types[i],
+ val.data, val.data + val.len);
+ return ret ? kr_error(ret) : RET_ANSWERED;
}
/* Find the closest zone-like apex that applies.
@@ -360,10 +370,25 @@ int kr_rule_local_data_answer(struct kr_query *qry, knot_pkt_t *pkt)
while (key_leq.len > lf_start_i && data[--key_leq.len] != '\0') ;
continue;
}
- // Tags OK; get ZLA type and TTL.
+ // Tags OK; get ZLA type and deal with special _FORWARD case
val_zla_type_t ztype;
if (deserialize_fails_assert(&val, &ztype))
return kr_error(EILSEQ);
+ if (ztype == VAL_ZLAT_FORWARD) {
+ knot_dname_t apex_name[KNOT_DNAME_MAXLEN];
+ ret = knot_dname_lf2wire(apex_name, zla_lf.len, zla_lf.data);
+ if (kr_fails_assert(ret > 0)) return kr_error(ret);
+ if (val.len > 0 // zero len -> default flags
+ && deserialize_fails_assert(&val, &qry->data_src.flags)) {
+ return kr_error(EILSEQ);
+ }
+
+ qry->data_src.initialized = true;
+ qry->data_src.targets_ptr = val;
+ qry->data_src.rule_depth = knot_dname_labels(apex_name, NULL);
+ return RET_CONT_CACHE;
+ }
+ // The other types optionally specify TTL.
uint32_t ttl = RULE_TTL_DEFAULT;
if (val.len >= sizeof(ttl)) // allow omitting -> can't kr_assert
deserialize_fails_assert(&val, &ttl);
@@ -381,14 +406,15 @@ int kr_rule_local_data_answer(struct kr_query *qry, knot_pkt_t *pkt)
goto shorten;
return ret;
case VAL_ZLAT_REDIRECT:
- return answer_zla_redirect(qry, pkt, ruleset_name, zla_lf, ttl);
+ ret = answer_zla_redirect(qry, pkt, ruleset_name, zla_lf, ttl);
+ return ret ? kr_error(ret) : RET_ANSWERED;
default:
return kr_error(EILSEQ);
}
} while (true);
}
- return kr_error(ENOENT);
+ return RET_CONT_CACHE;
}
/** SOA RDATA content, used as default in negative answers.
diff --git a/lib/rules/api.h b/lib/rules/api.h
index 7e2dd138..71549d7f 100644
--- a/lib/rules/api.h
+++ b/lib/rules/api.h
@@ -7,6 +7,7 @@
struct kr_query;
struct kr_request;
struct knot_pkt;
+struct sockaddr;
#include <libknot/db/db.h>
typedef uint64_t kr_rule_tags_t;
@@ -20,9 +21,9 @@ int kr_rules_init(void);
KR_EXPORT
void kr_rules_deinit(void);
-/** Try answering the query from local data.
+/** Try answering the query from local data; WIP: otherwise determine data source overrides.
*
- * \return kr_error(): notably -ENOENT or 0
+ * \return kr_error() on errors, >0 if answered, 0 otherwise (also when forwarding)
*
* FIXME: we probably want to ensure AA flags in answer as appropriate.
* Perhaps approach it like AD? Tweak flags in ranked_rr_array_entry
@@ -30,6 +31,9 @@ void kr_rules_deinit(void);
*/
int kr_rule_local_data_answer(struct kr_query *qry, struct knot_pkt *pkt);
+/** Set up nameserver+cut if overridden by policy. \return kr_error() */
+int kr_rule_data_src_check(struct kr_query *qry, struct knot_pkt *pkt);
+
/** Select the view action to perform.
*
* \param selected The action string from kr_view_insert_action()
@@ -125,3 +129,20 @@ struct kr_rule_zonefile_config {
KR_EXPORT
int kr_rule_zonefile(const struct kr_rule_zonefile_config *c);
+
+struct kr_rule_fwd_flags {
+ /// Beware of ABI: this struct is memcpy'd to/from rule DB.
+ bool
+ is_auth : 1,
+ is_tcp : 1, /// forced TCP (e.g. DoT)
+ is_nods : 1; /// disable local DNSSEC validation
+};
+typedef struct kr_rule_fwd_flags kr_rule_fwd_flags_t;
+/** Insert/overwrite a forwarding rule.
+ *
+ * Into the default rule-set ATM.
+ * \param targets NULL-terminated array. */
+KR_EXPORT
+int kr_rule_forward(const knot_dname_t *apex, kr_rule_fwd_flags_t flags,
+ const struct sockaddr * targets[]);
+
diff --git a/lib/rules/forward.c b/lib/rules/forward.c
new file mode 100644
index 00000000..289d13aa
--- /dev/null
+++ b/lib/rules/forward.c
@@ -0,0 +1,168 @@
+/* Copyright (C) CZ.NIC, z.s.p.o. <knot-resolver@labs.nic.cz>
+ * SPDX-License-Identifier: GPL-3.0-or-later
+ */
+
+#include "lib/rules/api.h"
+#include "lib/rules/impl.h"
+
+#include "lib/layer/iterate.h"
+#include "lib/resolve.h"
+
+static void setup_fwd_flags(struct kr_query *qry)
+{
+ if (qry->flags.FORWARD || qry->flags.STUB)
+ return; // someone else has set it unexpectedly - policy?
+ // TODO: disallow or restrict somehow?
+ //if (kr_fails_assert(!qry->flags.FORWARD && !qry->flags.STUB))
+
+ if (!qry->data_src.initialized) {
+ // no VAL_ZLAT_FORWARD -> standard iteration
+ qry->data_src.initialized = true;
+ qry->data_src.rule_depth = 0;
+ qry->data_src.flags.is_auth = true;
+ return;
+ }
+
+ const kr_rule_fwd_flags_t zf = qry->data_src.flags;
+
+ qry->flags.TCP |= zf.is_tcp;
+
+ if (!zf.is_auth && !zf.is_nods) { // mostly like policy.(TLS_)FORWARD
+ qry->flags.FORWARD = true;
+ qry->flags.NO_MINIMIZE = true;
+ // this ^^ probably won't be needed after moving iterator's produce
+ return;
+ }
+
+ if (!zf.is_auth && zf.is_nods) { // mostly like policy.STUB
+ qry->flags.STUB = true;
+ return;
+ }
+
+ if (zf.is_auth) {
+ return;
+ }
+
+ kr_require(false);
+}
+
+// Wrapper around rule_local_data_answer() to finish forwarding-related flags.
+int kr_rule_local_data_answer(struct kr_query *qry, knot_pkt_t *pkt)
+{
+ int ret = rule_local_data_answer(qry, pkt); // the main body of work
+ if (ret < 0)
+ kr_log_debug(RULES, "policy rules failed: %s\n", kr_strerror(ret));
+ // deal with setting up .FORWARD and .STUB, so that cache lookup knows
+ setup_fwd_flags(qry);
+ // unfortunately, changing flags can change this from iterator
+ if (ret == 0 && (qry->flags.FORWARD || qry->flags.STUB))
+ ret = kr_make_query(qry, pkt);
+
+ //kr_assert(qry->data_src.initialized); // TODO: broken by old policy.FORWARD, etc.
+ return ret;
+}
+
+int kr_rule_data_src_check(struct kr_query *qry, struct knot_pkt *pkt)
+{
+ if (qry->data_src.all_set)
+ return kr_ok(); // everything should be in order from before
+
+ if (/*kr_fails_assert!*/(!qry->data_src.initialized)) { // FIXME ci val_ad_qtype_ds
+ // fall back to standard iteration
+ goto fallback;
+ }
+
+ if (!qry->data_src.flags.is_auth && qry->data_src.targets_ptr.data) {
+ struct kr_request *req = qry->request;
+ // In old policy this used to be done by kr_forward_add_target()
+ // For TLS see policy.TLS_FORWARD() and net_tls_client()
+ // The mapping from address+port to parameters are in tls_client_param_t
+ kr_sockaddr_array_t *targets = &req->selection_context.forwarding_targets;
+ const size_t t_bytes = qry->data_src.targets_ptr.len;
+ kr_assert(t_bytes > 0 && t_bytes % sizeof(targets->at[0]) == 0);
+ targets->cap = targets->len = t_bytes / sizeof(targets->at[0]);
+ targets->at = mm_alloc(&req->pool, t_bytes);
+ memcpy(targets->at, qry->data_src.targets_ptr.data, t_bytes);
+ qry->data_src.all_set = true;
+
+ kr_server_selection_init(qry); // this assumes `forwarding_targets` was filled
+ return kr_ok();
+ }
+
+ if (qry->data_src.flags.is_auth) {
+ if (!qry->data_src.targets_ptr.data)
+ goto fallback; // default iteration falls here
+ const knot_dname_t *apex = qry->sname;
+ for (int labels = knot_dname_labels(apex, NULL);
+ labels > qry->data_src.rule_depth;
+ --labels, apex = knot_wire_next_label(apex, NULL));
+ kr_zonecut_set(&qry->zone_cut, apex);
+ knot_db_val_t targets = qry->data_src.targets_ptr;
+ kr_assert(targets.len > 0);
+ while (targets.len > 0) {
+ union kr_sockaddr target;
+ if (deserialize_fails_assert(&targets, &target))
+ goto fallback;
+ int ret = kr_zonecut_add(&qry->zone_cut,
+ (const knot_dname_t *)"\2ns\7invalid",
+ kr_inaddr(&target.ip), kr_inaddr_len(&target.ip));
+ if (kr_fails_assert(ret == 0))
+ goto fallback;
+ }
+ kr_assert(targets.len == 0);
+ qry->flags.AWAIT_CUT = false;
+ qry->data_src.all_set = true;
+ kr_server_selection_init(qry);
+ // unfortunately, zone cut depth might've changed
+ return kr_make_query(qry, pkt);
+ }
+
+ kr_assert(false);
+fallback:
+ qry->data_src.initialized = true;
+ qry->data_src.rule_depth = 0;
+ qry->data_src.all_set = true;
+ kr_server_selection_init(qry);
+ return kr_ok();
+}
+
+int kr_rule_forward(const knot_dname_t *apex, kr_rule_fwd_flags_t flags,
+ const struct sockaddr * targets[])
+{
+ const kr_rule_tags_t tags = KR_RULE_TAGS_ALL;
+ const val_zla_type_t ztype = VAL_ZLAT_FORWARD;
+
+ int count = 0;
+ if (targets) {
+ while (targets[count])
+ ++count;
+ }
+
+ uint8_t key_data[KEY_MAXLEN];
+ knot_db_val_t key = zla_key(apex, key_data);
+
+ const size_t targets_bytes = count * sizeof(union kr_sockaddr);
+ knot_db_val_t val = {
+ .data = NULL,
+ .len = sizeof(tags) + sizeof(ztype) + sizeof(flags) + targets_bytes,
+ };
+ int ret = ruledb_op(write, &key, &val, 1);
+ if (kr_fails_assert(ret >= 0))
+ return kr_error(ret);
+ memcpy(val.data, &tags, sizeof(tags));
+ val.data += sizeof(tags);
+ memcpy(val.data, &ztype, sizeof(ztype));
+ val.data += sizeof(ztype);
+ memcpy(val.data, &flags, sizeof(flags));
+ val.data += sizeof(flags);
+ for (int i = 0; i < count; ++i) {
+ // targets[i] may be shorter than union kr_sockaddr, so we zero-pad
+ // LATER: for is_auth we really drop anything but address (e.g. port!=53)
+ union kr_sockaddr a = { 0 };
+ memcpy(&a, targets[i], kr_sockaddr_len(targets[i]));
+ memcpy(val.data, &a, sizeof(a));
+ val.data += sizeof(a);
+ }
+
+ return ruledb_op(commit);
+}
diff --git a/lib/rules/impl.h b/lib/rules/impl.h
index 398ca864..80eaad8d 100644
--- a/lib/rules/impl.h
+++ b/lib/rules/impl.h
@@ -35,6 +35,9 @@ int local_data_ins(knot_db_val_t key, const knot_rrset_t *rrs,
/** Construct key for a zone-like-apex entry. It's stored in `key_data`. */
knot_db_val_t zla_key(const knot_dname_t *apex, uint8_t key_data[KEY_MAXLEN]);
+/** Almost the whole kr_rule_local_data_answer() */
+int rule_local_data_answer(struct kr_query *qry, knot_pkt_t *pkt);
+
/** The first byte of zone-like apex value is its type. */
typedef uint8_t val_zla_type_t;
enum {
@@ -51,6 +54,8 @@ enum {
VAL_ZLAT_NODATA,
/** Redirect: anything beneath has the same data as apex (except NS+SOA). */
VAL_ZLAT_REDIRECT,
+ /** Forward, i.e. override upstream for this subtree (resolver or auth). */
+ VAL_ZLAT_FORWARD,
};
/** For now see kr_rule_local_data_emptyzone() and friends.
*
diff --git a/modules/policy/policy.lua b/modules/policy/policy.lua
index cc77e487..b4030375 100644
--- a/modules/policy/policy.lua
+++ b/modules/policy/policy.lua
@@ -856,6 +856,58 @@ function policy.TAGS_ASSIGN(names)
return 'policy.tags_assign_bitmap(' .. tostring(bitmap) .. ')'
end
+--[[ Insert a forwarding rule, i.e. override upstream for one DNS subtree.
+
+Throws lua exceptions when detecting something fishy.
+
+\param subtree plain string
+\param options
+ .auth targets are authoritative (false by default = resolver)
+ .tls use DoT (false by default, only for resolvers)
+ .dnssec if overridden to false, don't validate DNSSEC locally
+ - for resolvers we still do *not* send CD=1 upstream,
+ i.e. we trust their DNSSEC validation.
+ - for auths this inserts a negative trust anchor
+ Beware that setting .set_insecure() *later* would override that.
+\param targets same format as policy.TLS_FORWARD()
+--]]
+function policy.rule_forward_add(subtree, options, targets)
+ local port_default = 53
+ if options.tls or false then
+ port_default = 853
+ -- lots of code; easiest to just call it this way; checks and throws
+ policy.TLS_FORWARD(targets)
+ end
+
+ local targets_2 = {}
+ for _, target in ipairs(targets) do
+ -- this also throws on failure
+ local sock = addr2sock(target[1], port_default)
+ if options.auth then
+ local port = ffi.C.kr_inaddr_port(sock)
+ assert(not options.tls and port == port_default)
+ end
+ table.insert(targets_2, sock)
+ end
+ local targets_3 = ffi.new('const struct sockaddr * [?]', #targets_2 + 1, targets_2)
+ targets_3[#targets_2] = nil
+
+ local subtree_dname = todname(subtree)
+ assert(ffi.C.kr_rule_forward(subtree_dname,
+ { is_tcp = options.tls
+ , is_nods = options.dnssec == false
+ , is_auth = options.auth
+ },
+ targets_3
+ ) == 0)
+
+ -- Probably the best way to turn off DNSSEC validation for auth is negative TA.
+ if options.auth and options.dnssec == false then
+ local ntas = kres.context().negative_anchors
+ assert(ffi.C.kr_ta_add(ntas, subtree_dname, kres.type.DS, 0, nil, 0) == 0)
+ end
+end
+
local view_action_buf = ffi.new('knot_db_val_t[1]')