summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorVladimír Čunát <vladimir.cunat@nic.cz>2024-09-30 15:34:11 +0200
committerVladimír Čunát <vladimir.cunat@nic.cz>2024-09-30 15:34:11 +0200
commit3b815e8f6989d64ce1facaa24dd0f94c585b819d (patch)
tree48943e52d37bdb89b313dc6ba5320eb7c19ef140 /lib
parentfixup! defer: add request and idle timeouts, limit on waiting queries (diff)
parentMerge branch 'python-constants-module' into 'master' (diff)
downloadknot-resolver-3b815e8f6989d64ce1facaa24dd0f94c585b819d.tar.xz
knot-resolver-3b815e8f6989d64ce1facaa24dd0f94c585b819d.zip
Merge branch 'master' into rrl-wip
Diffstat (limited to 'lib')
-rw-r--r--lib/README.rst5
-rw-r--r--lib/cache/api.c4
-rw-r--r--lib/cache/peek.c6
-rw-r--r--lib/dnssec.c26
-rw-r--r--lib/dnssec.h32
-rw-r--r--lib/generic/array.h2
-rw-r--r--lib/layer/iterate.c13
-rw-r--r--lib/layer/validate.c164
-rw-r--r--lib/log.h2
-rw-r--r--lib/resolve-produce.c12
-rw-r--r--lib/resolve.c17
-rw-r--r--lib/resolve.h1
-rw-r--r--lib/rules/api.c133
-rw-r--r--lib/rules/api.h3
-rw-r--r--lib/rules/zonefile.c6
-rw-r--r--lib/utils.c4
-rw-r--r--lib/utils.h6
17 files changed, 334 insertions, 102 deletions
diff --git a/lib/README.rst b/lib/README.rst
index b631fe7b..f2463d4a 100644
--- a/lib/README.rst
+++ b/lib/README.rst
@@ -4,11 +4,6 @@
Knot Resolver library
*********************
-Requirements
-============
-
-* libknot_ 2.0 (Knot DNS high-performance DNS library.)
-
For users
=========
diff --git a/lib/cache/api.c b/lib/cache/api.c
index 0cd18534..046dae20 100644
--- a/lib/cache/api.c
+++ b/lib/cache/api.c
@@ -237,9 +237,7 @@ int32_t get_new_ttl(const struct entry_h *entry, const struct kr_query *qry,
int res_stale = qry->stale_cb(res, owner, type, qry);
if (res_stale >= 0) {
VERBOSE_MSG(qry, "responding with stale answer\n");
- /* LATER: Perhaps we could use a more specific Stale
- * NXDOMAIN Answer code for applicable responses. */
- kr_request_set_extended_error(qry->request, KNOT_EDNS_EDE_STALE, "6Q6X");
+ qry->request->stale_accounted = true;
return res_stale;
}
}
diff --git a/lib/cache/peek.c b/lib/cache/peek.c
index d12031fc..46a4868c 100644
--- a/lib/cache/peek.c
+++ b/lib/cache/peek.c
@@ -214,6 +214,7 @@ int peek_nosync(kr_layer_t *ctx, knot_pkt_t *pkt)
/* Try the NSEC* parameters in order, until success.
* Let's not mix different parameters for NSEC* RRs in a single proof. */
+ bool is_synthesized = false;
for (int i = 0; ;) {
int32_t log_new_ttl = -123456789; /* visually recognizable value */
ret = nsec_p_ttl(el[i], qry->timestamp.tv_sec, &log_new_ttl);
@@ -234,6 +235,7 @@ int peek_nosync(kr_layer_t *ctx, knot_pkt_t *pkt)
/**** 2. and 3. inside */
ret = peek_encloser(k, &ans, sname_labels,
lowest_rank, qry, cache);
+ is_synthesized = (ret == 0);
nsec_p_cleanup(&ans.nsec_p);
if (!ret) break;
if (ret < 0) return ctx->state;
@@ -316,6 +318,10 @@ int peek_nosync(kr_layer_t *ctx, knot_pkt_t *pkt)
qf->CACHED = true;
qf->NO_MINIMIZE = true;
+ if (is_synthesized && qry == req->rplan.initial) {
+ kr_request_set_extended_error(req, KNOT_EDNS_EDE_SYNTHESIZED,
+ "2NEP: synthesized from aggressive cache");
+ }
return KR_STATE_DONE;
}
diff --git a/lib/dnssec.c b/lib/dnssec.c
index 77cec796..169ce2bf 100644
--- a/lib/dnssec.c
+++ b/lib/dnssec.c
@@ -63,6 +63,10 @@ static int validate_rrsig_rr(int *flags, int cov_labels,
if (kr_fails_assert(flags && rrsigs && vctx && vctx->zone_name)) {
return kr_error(EINVAL);
}
+ if (knot_rrsig_sig_expiration(rrsigs) < knot_rrsig_sig_inception(rrsigs)) {
+ vctx->rrs_counters.expired_before_inception++;
+ return kr_error(EINVAL);
+ }
/* bullet 5 */
if (knot_rrsig_sig_expiration(rrsigs) < vctx->timestamp) {
vctx->rrs_counters.expired++;
@@ -435,26 +439,32 @@ finish:
return vctx->result;
}
-bool kr_ds_algo_support(const knot_rrset_t *ta)
+int kr_ds_algo_support(const knot_rrset_t *ta)
{
if (kr_fails_assert(ta && ta->type == KNOT_RRTYPE_DS && ta->rclass == KNOT_CLASS_IN))
- return false;
+ return kr_error(EINVAL);
/* Check if at least one DS has a usable algorithm pair. */
+ int ret = kr_error(ENOENT);
knot_rdata_t *rdata_i = ta->rrs.rdata;
for (uint16_t i = 0; i < ta->rrs.count;
++i, rdata_i = knot_rdataset_next(rdata_i)) {
- if (dnssec_algorithm_digest_support(knot_ds_digest_type(rdata_i))
- && dnssec_algorithm_key_support(knot_ds_alg(rdata_i))) {
- return true;
- }
+ if (dnssec_algorithm_digest_support(knot_ds_digest_type(rdata_i))) {
+ if (dnssec_algorithm_key_support(knot_ds_alg(rdata_i)))
+ return kr_ok();
+ else
+ ret = DNSSEC_INVALID_KEY_ALGORITHM;
+ } else
+ ret = DNSSEC_INVALID_DIGEST_ALGORITHM;
}
- return false;
+ return ret;
}
-// Now we instantiate these two as non-inline externally linkable code here (for lua).
+// Now we instantiate these three as non-inline externally linkable code here (for lua).
KR_EXPORT extern inline KR_PURE
bool kr_dnssec_key_sep_flag(const uint8_t *dnskey_rdata);
KR_EXPORT extern inline KR_PURE
+bool kr_dnssec_key_zonekey_flag(const uint8_t *dnskey_rdata);
+KR_EXPORT extern inline KR_PURE
bool kr_dnssec_key_revoked(const uint8_t *dnskey_rdata);
int kr_dnskeys_trusted(kr_rrset_validation_ctx_t *vctx, const knot_rdataset_t *sigs,
diff --git a/lib/dnssec.h b/lib/dnssec.h
index 52465042..b9f854d0 100644
--- a/lib/dnssec.h
+++ b/lib/dnssec.h
@@ -56,8 +56,9 @@ struct kr_rrset_validation_ctx {
const struct kr_query *log_qry; /*!< The query; just for logging purposes. */
struct {
unsigned int matching_name_type; /*!< Name + type matches */
- unsigned int expired;
- unsigned int notyet;
+ unsigned int expired; /*!< Number of expired signatures */
+ unsigned int notyet; /*!< Number of signatures not yet valid (inception > now) */
+ unsigned int expired_before_inception; /*!< Number of signatures already expired before inception time */
unsigned int signer_invalid; /*!< Signer is not zone apex */
unsigned int labels_invalid; /*!< Number of labels in RRSIG */
unsigned int key_invalid; /*!< Algorithm/keytag/key owner */
@@ -78,10 +79,17 @@ typedef struct kr_rrset_validation_ctx kr_rrset_validation_ctx_t;
int kr_rrset_validate(kr_rrset_validation_ctx_t *vctx, knot_rrset_t *covered);
/**
- * Return true iff the RRset contains at least one usable DS. See RFC6840 5.2.
+ * Check whether the RRset contains at least one usable DS.
+ *
+ * See RFC6840 5.2.
+ * @param ta Pointer to TA RRSet.
+ * @return kr_ok() if at least one DS is supported
+ * DNSSEC_INVALID_KEY_ALGORITHM if all DSes are not supported, because of their key algorithm
+ * DNSSEC_INVALID_DIGEST_ALGORITHM if all DSes are not supported, because of their digest algorithm
+ * @note Given that entries are iterated until a supported DS is found, the error refers to the last one.
*/
KR_EXPORT KR_PURE
-bool kr_ds_algo_support(const knot_rrset_t *ta);
+int kr_ds_algo_support(const knot_rrset_t *ta);
/**
* Check whether the DNSKEY rrset matches the supplied trust anchor RRSet.
@@ -97,13 +105,20 @@ int kr_dnskeys_trusted(kr_rrset_validation_ctx_t *vctx, const knot_rdataset_t *s
// flags: https://www.iana.org/assignments/dnskey-flags/dnskey-flags.xhtml
// https://datatracker.ietf.org/doc/html/rfc4034#section-2.1
-/** Return true if the DNSKEY has the SEP flag (normally ignored). */
+/** Return true if the DNSKEY has the SEP flag/bit set (normally ignored). */
KR_EXPORT inline KR_PURE
bool kr_dnssec_key_sep_flag(const uint8_t *dnskey_rdata)
{
return dnskey_rdata[1] & 0x01;
}
+/** Return true if the DNSKEY has the Zone Key flag/bit set. */
+KR_EXPORT inline KR_PURE
+bool kr_dnssec_key_zonekey_flag(const uint8_t *dnskey_rdata)
+{
+ return dnskey_rdata[0] & 0x01;
+}
+
/** Return true if the DNSKEY is revoked. */
KR_EXPORT inline KR_PURE
bool kr_dnssec_key_revoked(const uint8_t *dnskey_rdata)
@@ -111,11 +126,14 @@ bool kr_dnssec_key_revoked(const uint8_t *dnskey_rdata)
return dnskey_rdata[1] & 0x80;
}
-/** Return true if the DNSKEY could be used to validate zone records. */
+/**
+ * Return true if the DNSKEY could be used to validate zone records, meaning
+ * it correctly has the Zone Key flag/bit set to 1 and it is not revoked.
+ */
static inline KR_PURE
bool kr_dnssec_key_usable(const uint8_t *dnskey_rdata)
{
- return (dnskey_rdata[0] & 0x01) && !kr_dnssec_key_revoked(dnskey_rdata);
+ return kr_dnssec_key_zonekey_flag(dnskey_rdata) && !kr_dnssec_key_revoked(dnskey_rdata);
}
/** Return DNSKEY tag.
diff --git a/lib/generic/array.h b/lib/generic/array.h
index 9bea546b..eb1f7bc2 100644
--- a/lib/generic/array.h
+++ b/lib/generic/array.h
@@ -122,7 +122,7 @@ static inline void array_std_free(void *baton, void *p)
* @return element index on success, <0 on failure
*/
#define array_push_mm(array, val, reserve, baton) \
- (int)((array).len < (array).cap ? ((array).at[(array).len] = (val), (array).len++) \
+ (ssize_t)((array).len < (array).cap ? ((array).at[(array).len] = (val), (array).len++) \
: (array_reserve_mm(array, ((array).cap + 1), reserve, baton) < 0 ? -1 \
: ((array).at[(array).len] = (val), (array).len++)))
diff --git a/lib/layer/iterate.c b/lib/layer/iterate.c
index 69fe344c..3cc641cd 100644
--- a/lib/layer/iterate.c
+++ b/lib/layer/iterate.c
@@ -922,14 +922,15 @@ static int begin(kr_layer_t *ctx)
}
struct kr_query *qry = ctx->req->current_query;
- /* Avoid any other classes, and avoid any meta-types ~~except for ANY~~. */
- if (qry->sclass != KNOT_CLASS_IN
- || (knot_rrtype_is_metatype(qry->stype)
- /* && qry->stype != KNOT_RRTYPE_ANY hmm ANY seems broken ATM */)) {
+ /* Avoid any other classes, and avoid any meta-types. */
+ if (qry->sclass != KNOT_CLASS_IN || knot_rrtype_is_metatype(qry->stype)) {
knot_pkt_t *ans = kr_request_ensure_answer(ctx->req);
- if (!ans) return ctx->req->state;
+ if (!ans)
+ return ctx->req->state;
+ /* This RCODE is explicitly suggested for meta QTYPEs in RFC 8906 sec.7 */
knot_wire_set_rcode(ans->wire, KNOT_RCODE_NOTIMPL);
- return KR_STATE_FAIL;
+ kr_request_set_extended_error(ctx->req, KNOT_EDNS_EDE_NOTSUP, "57CK");
+ return KR_STATE_DONE;
}
return reset(ctx);
diff --git a/lib/layer/validate.c b/lib/layer/validate.c
index af20b2e4..321b0a25 100644
--- a/lib/layer/validate.c
+++ b/lib/layer/validate.c
@@ -12,6 +12,7 @@
#include <libknot/rrtype/rdname.h>
#include <libknot/rrtype/rrsig.h>
#include <libdnssec/error.h>
+#include <libdnssec/key.h>
#include "lib/dnssec/nsec.h"
#include "lib/dnssec/nsec3.h"
@@ -137,6 +138,7 @@ do_downgrade: // we do this deep inside calls because of having signer name avai
VERBOSE_MSG(qry,
"<= DNSSEC downgraded due to expensive NSEC3: %d iterations, %d salt length\n",
(int)knot_nsec3_iters(rd), (int)knot_nsec3_salt_len(rd));
+ kr_request_set_extended_error(qry->request, KNOT_EDNS_EDE_NSEC3_ITERS, "AUO2");
qry->flags.DNSSEC_WANT = false;
qry->flags.DNSSEC_INSECURE = true;
rank_records(qry, true, KR_RANK_INSECURE, vctx->zone_name);
@@ -242,7 +244,9 @@ static int validate_section(kr_rrset_validation_ctx_t *vctx, struct kr_query *qr
} else {
kr_rank_set(&entry->rank, KR_RANK_BOGUS);
vctx->err_cnt += 1;
- if (vctx->rrs_counters.expired > 0)
+ if (vctx->rrs_counters.expired_before_inception > 0)
+ kr_request_set_extended_error(req, KNOT_EDNS_EDE_EXPIRED_INV, "XXAP");
+ else if (vctx->rrs_counters.expired > 0)
kr_request_set_extended_error(req, KNOT_EDNS_EDE_SIG_EXPIRED, "YFJ2");
else if (vctx->rrs_counters.notyet > 0)
kr_request_set_extended_error(req, KNOT_EDNS_EDE_SIG_NOTYET, "UBBS");
@@ -368,7 +372,12 @@ static int validate_keyset(struct kr_request *req, knot_pkt_t *answer, bool has_
}
}
if (sig_index < 0) {
- kr_request_set_extended_error(req, KNOT_EDNS_EDE_RRSIG_MISS, "EZDC");
+ if (!kr_dnssec_key_zonekey_flag(qry->zone_cut.key->rrs.rdata->data)) {
+ kr_request_set_extended_error(req, KNOT_EDNS_EDE_DNSKEY_BIT, "YQEH");
+ } else {
+ kr_request_set_extended_error(req, KNOT_EDNS_EDE_RRSIG_MISS,
+ "EZDC: no valid RRSIGs for DNSKEY");
+ }
return kr_error(ENOENT);
}
const knot_rdataset_t *sig_rds = &req->answ_selected.at[sig_index]->rr->rrs;
@@ -395,15 +404,49 @@ static int validate_keyset(struct kr_request *req, knot_pkt_t *answer, bool has_
ret == 0 ? KR_RANK_SECURE : KR_RANK_BOGUS);
if (ret != 0) {
- log_bogus_rrsig(&vctx, qry->zone_cut.key, "bogus key");
- knot_rrset_free(qry->zone_cut.key, qry->zone_cut.pool);
- qry->zone_cut.key = NULL;
- if (vctx.rrs_counters.expired > 0)
+ const knot_rdataset_t *ds = &qry->zone_cut.trust_anchor->rrs;
+ int sep_keytag = kr_dnssec_key_tag(KNOT_RRTYPE_DS, ds->rdata->data, ds->rdata->len);
+ int dnskey_keytag = -1;
+ bool have_zone_key_bit = true, dnskey_algo_supported = true;
+ knot_rdata_t *rdata_sep = NULL, *rdata_i = qry->zone_cut.key->rrs.rdata;
+ for (uint8_t i = 0; i < qry->zone_cut.key->rrs.count;
+ ++i, rdata_i = knot_rdataset_next(rdata_i)) {
+ if (dnskey_keytag != sep_keytag) {
+ dnskey_keytag = kr_dnssec_key_tag(KNOT_RRTYPE_DNSKEY, rdata_i->data, rdata_i->len);
+ rdata_sep = rdata_i;
+ }
+
+ if (!kr_dnssec_key_zonekey_flag(rdata_i->data))
+ have_zone_key_bit = false;
+
+ if (!dnssec_algorithm_key_support(knot_dnskey_alg(rdata_i)))
+ dnskey_algo_supported = false;
+ }
+ bool sep_matches_tag_algo = rdata_sep && sep_keytag == dnskey_keytag &&
+ knot_ds_alg(ds->rdata) == knot_dnskey_alg(rdata_sep);
+
+ if (!have_zone_key_bit)
+ kr_request_set_extended_error(req, KNOT_EDNS_EDE_DNSKEY_BIT, "CYNG");
+ else if (!sep_matches_tag_algo)
+ kr_request_set_extended_error(req, KNOT_EDNS_EDE_DNSKEY_MISS, "NMJZ: no matching SEP");
+ else if (kr_dnssec_key_revoked(rdata_sep->data))
+ kr_request_set_extended_error(req, KNOT_EDNS_EDE_DNSKEY_MISS, "DGVI: DNSKEY matching SEP has the Revoke bit set");
+ else if (!dnskey_algo_supported)
+ kr_request_set_extended_error(req, KNOT_EDNS_EDE_DNSKEY_ALG, "H6OO");
+ else if (vctx.rrs_counters.matching_name_type == 0 && vctx.rrs_counters.key_invalid > 0)
+ kr_request_set_extended_error(req, KNOT_EDNS_EDE_RRSIG_MISS, "7N4Z: no valid RRSIGs for DNSKEY");
+ else if (vctx.rrs_counters.expired_before_inception > 0)
+ kr_request_set_extended_error(req, KNOT_EDNS_EDE_EXPIRED_INV, "4UBF");
+ else if (vctx.rrs_counters.expired > 0)
kr_request_set_extended_error(req, KNOT_EDNS_EDE_SIG_EXPIRED, "6GJV");
else if (vctx.rrs_counters.notyet > 0)
kr_request_set_extended_error(req, KNOT_EDNS_EDE_SIG_NOTYET, "4DJQ");
else
kr_request_set_extended_error(req, KNOT_EDNS_EDE_BOGUS, "EXRU");
+
+ log_bogus_rrsig(&vctx, qry->zone_cut.key, "bogus key");
+ knot_rrset_free(qry->zone_cut.key, qry->zone_cut.pool);
+ qry->zone_cut.key = NULL;
return ret;
}
@@ -1137,7 +1180,7 @@ static int validate(kr_layer_t *ctx, knot_pkt_t *pkt)
count += (knot_pkt_rr(sec, i)->type == KNOT_RRTYPE_NSEC3);
if (count > 8) {
VERBOSE_MSG(qry, "<= too many NSEC3 records in AUTHORITY (%d)\n", count);
- kr_request_set_extended_error(req, 27/*KNOT_EDNS_EDE_NSEC3_ITERS*/,
+ kr_request_set_extended_error(req, KNOT_EDNS_EDE_NSEC3_ITERS,
/* It's not about iteration values per se, but close enough. */
"DYRH: too many NSEC3 records");
qry->flags.DNSSEC_BOGUS = true;
@@ -1147,10 +1190,19 @@ static int validate(kr_layer_t *ctx, knot_pkt_t *pkt)
if (knot_wire_get_aa(pkt->wire) && qtype == KNOT_RRTYPE_DNSKEY) {
const knot_rrset_t *ds = qry->zone_cut.trust_anchor;
- if (ds && !kr_ds_algo_support(ds)) {
- VERBOSE_MSG(qry, ">< all DS entries use unsupported algorithm pairs, going insecure\n");
- /* ^ the message is a bit imprecise to avoid being too verbose */
- kr_request_set_extended_error(req, KNOT_EDNS_EDE_OTHER, "LSLC: unsupported digest/key");
+ ret = ds ? kr_ds_algo_support(ds) : kr_ok();
+ if (ret != kr_ok()) {
+ char *reason = "???";
+ if (ret == DNSSEC_INVALID_KEY_ALGORITHM) {
+ reason = "key";
+ kr_request_set_extended_error(req, KNOT_EDNS_EDE_DNSKEY_ALG, "PBAO");
+ } else if (ret == DNSSEC_INVALID_DIGEST_ALGORITHM) {
+ reason = "digest";
+ kr_request_set_extended_error(req, KNOT_EDNS_EDE_DS_DIGEST, "DDDV");
+ }
+ VERBOSE_MSG(qry,
+ ">< all DS entries are unsupported (last error: %s algorithm), going insecure\n",
+ reason);
qry->flags.DNSSEC_WANT = false;
qry->flags.DNSSEC_INSECURE = true;
rank_records(qry, true, KR_RANK_INSECURE, qry->zone_cut.name);
@@ -1320,37 +1372,6 @@ static int validate(kr_layer_t *ctx, knot_pkt_t *pkt)
VERBOSE_MSG(qry, "<= answer valid, OK\n");
return KR_STATE_DONE;
}
-
-/** Hide RRsets which did not validate from clients. */
-static int hide_bogus(kr_layer_t *ctx) {
- if (knot_wire_get_cd(ctx->req->qsource.packet->wire)) {
- return ctx->state;
- }
- /* We don't want to send bogus answers to clients, not even in SERVFAIL
- * answers, but we cannot drop whole sections. If a CNAME chain
- * SERVFAILs somewhere, the steps that were OK should be put into
- * answer.
- *
- * There is one specific issue: currently we follow CNAME *before*
- * we validate it, because... iterator comes before validator.
- * Therefore some rrsets might be added into req->*_selected before
- * we detected failure in validator.
- * TODO: better approach, probably during work on parallel queries.
- */
- const ranked_rr_array_t *sel[] = kr_request_selected(ctx->req);
- for (knot_section_t sect = KNOT_ANSWER; sect <= KNOT_ADDITIONAL; ++sect) {
- for (size_t i = 0; i < sel[sect]->len; ++i) {
- ranked_rr_array_entry_t *e = sel[sect]->at[i];
- e->to_wire = e->to_wire
- && !kr_rank_test(e->rank, KR_RANK_INDET)
- && !kr_rank_test(e->rank, KR_RANK_BOGUS)
- && !kr_rank_test(e->rank, KR_RANK_MISMATCH)
- && !kr_rank_test(e->rank, KR_RANK_MISSING);
- }
- }
- return ctx->state;
-}
-
static int validate_wrapper(kr_layer_t *ctx, knot_pkt_t *pkt) {
// Wrapper for now.
int ret = validate(ctx, pkt);
@@ -1358,25 +1379,64 @@ static int validate_wrapper(kr_layer_t *ctx, knot_pkt_t *pkt) {
struct kr_query *qry = req->current_query;
if (ret & KR_STATE_FAIL && qry->flags.DNSSEC_BOGUS)
qry->server_selection.error(qry, req->upstream.transport, KR_SELECTION_DNSSEC_ERROR);
- if (ret & KR_STATE_DONE && !qry->flags.DNSSEC_BOGUS) {
- /* Don't report extended DNS errors related to validation
- * when it managed to succeed (e.g. by trying different auth). */
- switch (req->extended_error.info_code) {
+ return ret;
+}
+
+/**
+ * Hide RRsets which did not validate from clients and clear Extended
+ * Error if a query failed validation, but later managed to succeed.
+ */
+static int validate_finalize(kr_layer_t *ctx) {
+ if (!knot_wire_get_cd(ctx->req->qsource.packet->wire)) {
+ /* We don't want to send bogus answers to clients, not even in SERVFAIL
+ * answers, but we cannot drop whole sections. If a CNAME chain
+ * SERVFAILs somewhere, the steps that were OK should be put into
+ * answer.
+ *
+ * There is one specific issue: currently we follow CNAME *before*
+ * we validate it, because... iterator comes before validator.
+ * Therefore some rrsets might be added into req->*_selected before
+ * we detected failure in validator.
+ * TODO: better approach, probably during work on parallel queries.
+ */
+ const ranked_rr_array_t *sel[] = kr_request_selected(ctx->req);
+ for (knot_section_t sect = KNOT_ANSWER; sect <= KNOT_ADDITIONAL; ++sect) {
+ for (size_t i = 0; i < sel[sect]->len; ++i) {
+ ranked_rr_array_entry_t *e = sel[sect]->at[i];
+ e->to_wire = e->to_wire
+ && !kr_rank_test(e->rank, KR_RANK_INDET)
+ && !kr_rank_test(e->rank, KR_RANK_BOGUS)
+ && !kr_rank_test(e->rank, KR_RANK_MISMATCH)
+ && !kr_rank_test(e->rank, KR_RANK_MISSING);
+ }
+ }
+ }
+
+ /* Clear DNSSEC-related Extended Error in case the request managed to succeed somehow. */
+ if (ctx->state == KR_STATE_DONE) {
+ switch (ctx->req->extended_error.info_code) {
+ case KNOT_EDNS_EDE_DNSKEY_ALG:
+ case KNOT_EDNS_EDE_DS_DIGEST:
+ case KNOT_EDNS_EDE_NSEC3_ITERS: ;
+ /* These EDEs are meant to result into _INSECURE success. */
+ const struct kr_query *qry = kr_rplan_resolved(&ctx->req->rplan);
+ if (qry->flags.DNSSEC_INSECURE)
+ break;
case KNOT_EDNS_EDE_BOGUS:
case KNOT_EDNS_EDE_NSEC_MISS:
case KNOT_EDNS_EDE_RRSIG_MISS:
case KNOT_EDNS_EDE_SIG_EXPIRED:
+ case KNOT_EDNS_EDE_EXPIRED_INV:
case KNOT_EDNS_EDE_SIG_NOTYET:
- kr_request_set_extended_error(req, KNOT_EDNS_EDE_NONE, NULL);
- break;
- case KNOT_EDNS_EDE_DNSKEY_MISS:
case KNOT_EDNS_EDE_DNSKEY_BIT:
- kr_assert(false); /* These EDE codes aren't used. */
+ case KNOT_EDNS_EDE_DNSKEY_MISS:
+ kr_request_set_extended_error(ctx->req, KNOT_EDNS_EDE_NONE, NULL);
break;
default: break; /* Remaining codes don't indicate hard DNSSEC failure. */
}
}
- return ret;
+
+ return ctx->state;
}
@@ -1385,7 +1445,7 @@ int validate_init(struct kr_module *self)
{
static const kr_layer_api_t layer = {
.consume = &validate_wrapper,
- .answer_finalize = &hide_bogus,
+ .answer_finalize = &validate_finalize,
};
self->layer = &layer;
return kr_ok();
diff --git a/lib/log.h b/lib/log.h
index a3887e57..d3bc9145 100644
--- a/lib/log.h
+++ b/lib/log.h
@@ -109,7 +109,7 @@ enum kr_log_group {
#define LOG_GRP_VALIDATOR_TAG "valdtr" /**< ``valdtr``: operations related to validate layer */
#define LOG_GRP_RESOLVER_TAG "resolv" /**< ``resolv``: operations related to resolving */
#define LOG_GRP_SELECTION_TAG "select" /**< ``select``: operations related to server selection */
-#define LOG_GRP_ZCUT_TAG "zoncut" /**< ``zonecut``: operations related to zone cut */
+#define LOG_GRP_ZCUT_TAG "zoncut" /**< ``zoncut``: operations related to zone cut */
#define LOG_GRP_COOKIES_TAG "cookie" /**< ``cookie``: operations related to cookies */
#define LOG_GRP_STATISTICS_TAG "statis" /**< ``statis``: operations related to statistics */
#define LOG_GRP_REBIND_TAG "rebind" /**< ``rebind``: operations related to rebinding */
diff --git a/lib/resolve-produce.c b/lib/resolve-produce.c
index 563a2ca2..a3a2401e 100644
--- a/lib/resolve-produce.c
+++ b/lib/resolve-produce.c
@@ -697,6 +697,18 @@ int kr_resolve_produce(struct kr_request *request, struct kr_transport **transpo
if (qry->flags.NO_NS_FOUND) {
ITERATE_LAYERS(request, qry, reset);
kr_rplan_pop(rplan, qry);
+
+ /* Construct EDE message. We need it on mempool. */
+ char cut_buf[KR_DNAME_STR_MAXLEN];
+ char *msg = knot_dname_to_str(cut_buf, qry->zone_cut.name, sizeof(cut_buf));
+ if (!kr_fails_assert(msg)) {
+ if (*qry->zone_cut.name != '\0') /* Strip trailing dot. */
+ cut_buf[strlen(cut_buf) - 1] = '\0';
+ msg = kr_strcatdup_pool(&request->pool, 2,
+ "P3CD: delegation ", cut_buf);
+ }
+ kr_request_set_extended_error(request, KNOT_EDNS_EDE_NREACH_AUTH, msg);
+
return KR_STATE_FAIL;
} else {
/* FIXME: This is probably quite inefficient:
diff --git a/lib/resolve.c b/lib/resolve.c
index 4730f105..bc00471b 100644
--- a/lib/resolve.c
+++ b/lib/resolve.c
@@ -738,6 +738,17 @@ int kr_resolve_consume(struct kr_request *request, struct kr_transport **transpo
qry->flags.NO_NS_FOUND = true;
return KR_STATE_PRODUCE;
}
+
+ /* Construct EDE message. We need it on mempool. */
+ char cut_buf[KR_DNAME_STR_MAXLEN];
+ char *msg = knot_dname_to_str(cut_buf, qry->zone_cut.name, sizeof(cut_buf));
+ if (!kr_fails_assert(msg)) {
+ if (*qry->zone_cut.name != '\0') /* Strip trailing dot. */
+ cut_buf[strlen(cut_buf) - 1] = '\0';
+ msg = kr_strcatdup_pool(&request->pool, 2,
+ "OLX2: delegation ", cut_buf);
+ }
+ kr_request_set_extended_error(request, KNOT_EDNS_EDE_NREACH_AUTH, msg);
return KR_STATE_FAIL;
}
} else {
@@ -972,12 +983,15 @@ knot_mm_t *kr_resolve_pool(struct kr_request *request)
static int ede_priority(int info_code)
{
switch(info_code) {
+ case KNOT_EDNS_EDE_TOO_EARLY:
+ return 910;
case KNOT_EDNS_EDE_DNSKEY_BIT:
case KNOT_EDNS_EDE_DNSKEY_MISS:
case KNOT_EDNS_EDE_SIG_EXPIRED:
case KNOT_EDNS_EDE_SIG_NOTYET:
case KNOT_EDNS_EDE_RRSIG_MISS:
case KNOT_EDNS_EDE_NSEC_MISS:
+ case KNOT_EDNS_EDE_EXPIRED_INV:
return 900; /* Specific DNSSEC failures */
case KNOT_EDNS_EDE_BOGUS:
return 800; /* Generic DNSSEC failure */
@@ -990,6 +1004,7 @@ static int ede_priority(int info_code)
return 600; /* Policy related */
case KNOT_EDNS_EDE_DNSKEY_ALG:
case KNOT_EDNS_EDE_DS_DIGEST:
+ case KNOT_EDNS_EDE_NSEC3_ITERS:
return 500; /* Non-critical DNSSEC issues */
case KNOT_EDNS_EDE_STALE:
case KNOT_EDNS_EDE_STALE_NXD:
@@ -1002,10 +1017,12 @@ static int ede_priority(int info_code)
case KNOT_EDNS_EDE_NREACH_AUTH:
case KNOT_EDNS_EDE_NETWORK:
case KNOT_EDNS_EDE_INV_DATA:
+ case KNOT_EDNS_EDE_SYNTHESIZED:
return 200; /* Assorted codes */
case KNOT_EDNS_EDE_OTHER:
return 100; /* Most generic catch-all error */
case KNOT_EDNS_EDE_NONE:
+ case KNOT_EDNS_EDE_NONCONF_POLICY: /* Defined by an expired Internet Draft */
return 0; /* No error - allow overriding */
default:
kr_assert(false); /* Unknown info_code */
diff --git a/lib/resolve.h b/lib/resolve.h
index 443fef29..cbc20877 100644
--- a/lib/resolve.h
+++ b/lib/resolve.h
@@ -260,6 +260,7 @@ struct kr_request {
ranked_rr_array_t add_selected;
bool answ_validated; /**< internal to validator; beware of caching, etc. */
bool auth_validated; /**< see answ_validated ^^ ; TODO */
+ bool stale_accounted;
/** Overall rank for the request.
*
diff --git a/lib/rules/api.c b/lib/rules/api.c
index 5ecbe29e..53ebbf7e 100644
--- a/lib/rules/api.c
+++ b/lib/rules/api.c
@@ -46,8 +46,12 @@ static int answer_exact_match(struct kr_query *qry, knot_pkt_t *pkt, uint16_t ty
const uint8_t *data, const uint8_t *data_bound);
static int answer_zla_empty(val_zla_type_t type, struct kr_query *qry, knot_pkt_t *pkt,
knot_db_val_t zla_lf, uint32_t ttl);
+static int answer_zla_dname(val_zla_type_t type, struct kr_query *qry, knot_pkt_t *pkt,
+ knot_db_val_t zla_lf, uint32_t ttl, knot_db_val_t *val);
static int answer_zla_redirect(struct kr_query *qry, knot_pkt_t *pkt, const char *ruleset_name,
knot_db_val_t zla_lf, uint32_t ttl);
+static int rule_local_subtree(const knot_dname_t *apex, enum kr_rule_sub_t type,
+ const knot_dname_t *target, uint32_t ttl, kr_rule_tags_t tags);
// LATER: doing tag_names_default() and kr_rule_tag_add() inside a RW transaction would be better.
static int tag_names_default(void)
@@ -418,25 +422,30 @@ int rule_local_data_answer(struct kr_query *qry, knot_pkt_t *pkt)
uint32_t ttl = KR_RULE_TTL_DEFAULT;
if (val.len >= sizeof(ttl)) // allow omitting -> can't kr_assert
deserialize_fails_assert(&val, &ttl);
- if (kr_fails_assert(val.len == 0)) {
- kr_log_error(RULES, "ERROR: unused bytes: %zu\n", val.len);
- return kr_error(EILSEQ);
- }
+
// Finally execute the rule.
switch (ztype) {
case KR_RULE_SUB_EMPTY:
case KR_RULE_SUB_NXDOMAIN:
case KR_RULE_SUB_NODATA:
ret = answer_zla_empty(ztype, qry, pkt, zla_lf, ttl);
- if (ret == kr_error(EAGAIN))
- goto shorten;
- return ret ? ret : RET_ANSWERED;
+ break;
case KR_RULE_SUB_REDIRECT:
ret = answer_zla_redirect(qry, pkt, ruleset_name, zla_lf, ttl);
- return ret ? kr_error(ret) : RET_ANSWERED;
+ break;
+ case KR_RULE_SUB_DNAME:
+ ret = answer_zla_dname(ztype, qry, pkt, zla_lf, ttl, &val);
+ break;
default:
return kr_error(EILSEQ);
}
+ if (kr_fails_assert(val.len == 0)) {
+ kr_log_error(RULES, "ERROR: unused bytes: %zu\n", val.len);
+ return kr_error(EILSEQ);
+ }
+ if (ret == kr_error(EAGAIN))
+ goto shorten;
+ return ret ? kr_error(ret) : RET_ANSWERED;
} while (true);
}
@@ -570,7 +579,17 @@ int local_data_ins(knot_db_val_t key, const knot_rrset_t *rrs,
int ret = ruledb_op(write, &key, &val, 1); // TODO: overwriting on ==tags?
// ENOSPC seems to be the only expectable error.
kr_assert(ret == 0 || ret == kr_error(ENOSPC));
- return ret;
+
+ if (ret || rrs->type != KNOT_RRTYPE_DNAME)
+ return ret;
+ // Now we do special handling for DNAMEs
+ // - we inserted as usual, so that it works with QTYPE == DNAME
+ // - now we insert a ZLA to handle generating CNAMEs
+ // - yes, some edge cases won't work as in real DNS zones (e.g. occlusion)
+ if (kr_fails_assert(rrs->rrs.count))
+ return kr_error(EINVAL);
+ return rule_local_subtree(rrs->owner, KR_RULE_SUB_DNAME,
+ knot_dname_target(rrs->rrs.rdata), rrs->ttl, tags);
}
int kr_rule_local_data_del(const knot_rrset_t *rrs, kr_rule_tags_t tags)
{
@@ -697,6 +716,78 @@ static int answer_zla_empty(val_zla_type_t type, struct kr_query *qry, knot_pkt_
return kr_ok();
}
+static int answer_zla_dname(val_zla_type_t type, struct kr_query *qry, knot_pkt_t *pkt,
+ const knot_db_val_t zla_lf, uint32_t ttl, knot_db_val_t *val)
+{
+ if (kr_fails_assert(type == KR_RULE_SUB_DNAME))
+ return kr_error(EINVAL);
+
+ const knot_dname_t *dname_target = val->data;
+ // Theoretically this check could overread the val->len, but that's OK,
+ // as the policy DB contents wouldn't be directly written by a malicious party.
+ // Moreover, an overread shouldn't cause worse than a clean segfault.
+ if (kr_fails_assert(knot_dname_size(dname_target) == val->len))
+ return kr_error(EILSEQ);
+ { // update *val; avoiding void* arithmetics complicates this
+ char *tmp = val->data;
+ tmp += val->len;
+ val->data = tmp;
+
+ val->len = 0;
+ }
+
+ knot_dname_t apex_name[KNOT_DNAME_MAXLEN];
+ int ret = knot_dname_lf2wire(apex_name, zla_lf.len, zla_lf.data);
+ CHECK_RET(ret);
+
+ const bool hit_apex = knot_dname_is_equal(qry->sname, apex_name);
+ if (hit_apex && type == KR_RULE_SUB_DNAME)
+ return kr_error(EAGAIN); // LATER: maybe a type that matches apex
+
+ // Start constructing the (pseudo-)packet.
+ ret = pkt_renew(pkt, qry->sname, qry->stype);
+ CHECK_RET(ret);
+ struct answer_rrset arrset;
+ memset(&arrset, 0, sizeof(arrset));
+
+ arrset.set.rr = knot_rrset_new(qry->sname, KNOT_RRTYPE_CNAME,
+ KNOT_CLASS_IN, ttl, &pkt->mm);
+ if (kr_fails_assert(arrset.set.rr))
+ return kr_error(ENOMEM);
+ const knot_dname_t *cname_target = knot_dname_replace_suffix(qry->sname,
+ knot_dname_labels(apex_name, NULL), dname_target, &pkt->mm);
+ const int rdata_len = knot_dname_size(cname_target);
+ const bool cname_fits = rdata_len <= KNOT_DNAME_MAXLEN;
+ if (cname_fits) {
+ ret = knot_rrset_add_rdata(arrset.set.rr, cname_target,
+ knot_dname_size(cname_target), &pkt->mm);
+ CHECK_RET(ret);
+ }
+
+ arrset.set.rank = KR_RANK_SECURE | KR_RANK_AUTH; // local data has high trust
+ arrset.set.expiring = false;
+
+ if (cname_fits) {
+ knot_wire_set_rcode(pkt->wire, KNOT_RCODE_NOERROR);
+ ret = knot_pkt_begin(pkt, KNOT_ANSWER);
+ CHECK_RET(ret);
+
+ // Put links to the RR into the pkt.
+ ret = pkt_append(pkt, &arrset);
+ CHECK_RET(ret);
+ } else {
+ knot_wire_set_rcode(pkt->wire, KNOT_RCODE_YXDOMAIN);
+ }
+
+ // Finishing touches.
+ qry->flags.EXPIRING = false;
+ qry->flags.CACHED = true;
+ qry->flags.NO_MINIMIZE = true;
+
+ VERBOSE_MSG(qry, "=> satisfied by local data (DNAME)\n");
+ return kr_ok();
+}
+
static int answer_zla_redirect(struct kr_query *qry, knot_pkt_t *pkt, const char *ruleset_name,
const knot_db_val_t zla_lf, uint32_t ttl)
{
@@ -760,6 +851,11 @@ nodata: // Want NODATA answer (or NOERROR if it hits apex SOA).
return kr_ok();
}
+int kr_rule_local_subtree(const knot_dname_t *apex, enum kr_rule_sub_t type,
+ uint32_t ttl, kr_rule_tags_t tags)
+{
+ return rule_local_subtree(apex, type, NULL, ttl, tags);
+}
knot_db_val_t zla_key(const knot_dname_t *apex, uint8_t key_data[KEY_MAXLEN])
{
kr_require(the_rules);
@@ -775,11 +871,16 @@ knot_db_val_t zla_key(const knot_dname_t *apex, uint8_t key_data[KEY_MAXLEN])
key.len = key_data + KEY_DNAME_END_OFFSET - (uint8_t *)key.data;
return key;
}
-int kr_rule_local_subtree(const knot_dname_t *apex, enum kr_rule_sub_t type,
- uint32_t ttl, kr_rule_tags_t tags)
+static int rule_local_subtree(const knot_dname_t *apex, enum kr_rule_sub_t type,
+ const knot_dname_t *target, uint32_t ttl, kr_rule_tags_t tags)
{
// type-check
+ const bool has_target = (type == KR_RULE_SUB_DNAME);
switch (type) {
+ case KR_RULE_SUB_DNAME:
+ if (kr_fails_assert(!!target == has_target))
+ return kr_error(EINVAL);
+ break;
case KR_RULE_SUB_EMPTY:
case KR_RULE_SUB_NXDOMAIN:
case KR_RULE_SUB_NODATA:
@@ -797,8 +898,10 @@ int kr_rule_local_subtree(const knot_dname_t *apex, enum kr_rule_sub_t type,
knot_db_val_t key = zla_key(apex, key_data);
// Prepare the data into a temporary buffer.
- const bool has_ttl = ttl != KR_RULE_TTL_DEFAULT;
- const int val_len = sizeof(tags) + sizeof(ztype) + (has_ttl ? sizeof(ttl) : 0);
+ const int target_len = has_target ? knot_dname_size(target) : 0;
+ const bool has_ttl = ttl != KR_RULE_TTL_DEFAULT || has_target;
+ const int val_len = sizeof(tags) + sizeof(ztype) + (has_ttl ? sizeof(ttl) : 0)
+ + target_len;
uint8_t buf[val_len], *data = buf;
memcpy(data, &tags, sizeof(tags));
data += sizeof(tags);
@@ -808,6 +911,10 @@ int kr_rule_local_subtree(const knot_dname_t *apex, enum kr_rule_sub_t type,
memcpy(data, &ttl, sizeof(ttl));
data += sizeof(ttl);
}
+ if (has_target) {
+ memcpy(data, target, target_len);
+ data += target_len;
+ }
kr_require(data == buf + val_len);
knot_db_val_t val = { .data = buf, .len = val_len };
diff --git a/lib/rules/api.h b/lib/rules/api.h
index f1737a19..c7d1dd29 100644
--- a/lib/rules/api.h
+++ b/lib/rules/api.h
@@ -156,11 +156,14 @@ enum kr_rule_sub_t {
KR_RULE_SUB_NODATA,
/// Redirect: anything beneath has the same data as apex (except NS+SOA).
KR_RULE_SUB_REDIRECT,
+ /// Act similar to DNAME: rebase everything underneath by generated CNAMEs.
+ KR_RULE_SUB_DNAME,
};
/** Insert a simple sub-tree rule.
*
* - into the default rule-set
* - SOA and NS for generated answers aren't overridable.
+ * - type: you can't use _DNAME via this function; insert it by kr_rule_local_data_ins()
*/
KR_EXPORT
int kr_rule_local_subtree(const knot_dname_t *apex, enum kr_rule_sub_t type,
diff --git a/lib/rules/zonefile.c b/lib/rules/zonefile.c
index d308f375..773ca937 100644
--- a/lib/rules/zonefile.c
+++ b/lib/rules/zonefile.c
@@ -47,8 +47,10 @@ static void rr_scan2trie(zs_scanner_t *s)
rr->ttl = s->r_ttl; // we could also warn here
} else {
rr = *rr_p = mm_alloc(s_data->pool, sizeof(*rr));
- knot_rrset_init(rr, NULL, s->r_type, KNOT_CLASS_IN, s->r_ttl);
- // we don't ^^ need owner so save allocation
+ knot_dname_t *owner = NULL; // we only utilize owner for DNAMEs
+ if (s->r_type == KNOT_RRTYPE_DNAME) // Nit: copy could be done a bit faster
+ owner = knot_dname_copy(s->r_owner, s_data->pool);
+ knot_rrset_init(rr, owner, s->r_type, KNOT_CLASS_IN, s->r_ttl);
}
int ret = knot_rrset_add_rdata(rr, s->r_data, s->r_data_length, s_data->pool);
kr_assert(!ret);
diff --git a/lib/utils.c b/lib/utils.c
index de7c02cb..3af2fd04 100644
--- a/lib/utils.c
+++ b/lib/utils.c
@@ -107,7 +107,7 @@ static inline int u16tostr(uint8_t *dst, uint16_t num)
return 5;
}
-char* kr_strcatdup(unsigned n, ...)
+char* kr_strcatdup_pool(knot_mm_t *pool, unsigned n, ...)
{
if (n < 1) {
return NULL;
@@ -132,7 +132,7 @@ char* kr_strcatdup(unsigned n, ...)
char *result = NULL;
if (total_len > 0) {
if (unlikely(total_len == SIZE_MAX)) return NULL;
- result = malloc(total_len + 1);
+ result = mm_alloc(pool, total_len + 1);
}
if (result) {
char *stream = result;
diff --git a/lib/utils.h b/lib/utils.h
index 9fdc2d48..8c1ef8c1 100644
--- a/lib/utils.h
+++ b/lib/utils.h
@@ -170,9 +170,11 @@ typedef struct kr_http_header_array_entry {
/** Array of HTTP headers for DoH. */
typedef array_t(kr_http_header_array_entry_t) kr_http_header_array_t;
-/** Concatenate N strings. */
+/** Concatenate N strings and put the result into a mempool. */
KR_EXPORT
-char* kr_strcatdup(unsigned n, ...);
+char* kr_strcatdup_pool(knot_mm_t *pool, unsigned n, ...);
+/** Concatenate N strings. */
+#define kr_strcatdup(n, ...) kr_strcatdup_pool(NULL, n, ## __VA_ARGS__)
/** Construct absolute file path, without resolving symlinks.
* \return malloc-ed string or NULL (+errno in that case) */