summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorVladimír Čunát <vladimir.cunat@nic.cz>2018-07-02 14:40:58 +0200
committerVladimír Čunát <vladimir.cunat@nic.cz>2018-08-14 10:36:10 +0200
commitb00ee5fa8f6fddffce3dedc2d3eaa7990643eb21 (patch)
treeb001276ae2cc3312c6f9f145c99250f383bf6472 /lib
parentlib/utils: naive refactoring of kr_dname_lf() (diff)
downloadknot-resolver-b00ee5fa8f6fddffce3dedc2d3eaa7990643eb21.tar.xz
knot-resolver-b00ee5fa8f6fddffce3dedc2d3eaa7990643eb21.zip
TTL changes: moved in libknot from rdata to rrset
To work on RRSIG TTLs, libknot >= 2.7.1 is needed.
Diffstat (limited to 'lib')
-rw-r--r--lib/cache/api.c13
-rw-r--r--lib/cache/api.h2
-rw-r--r--lib/cache/entry_pkt.c35
-rw-r--r--lib/cache/entry_rr.c21
-rw-r--r--lib/cache/knot_pkt.c1
-rw-r--r--lib/cache/nsec1.c2
-rw-r--r--lib/dnssec/ta.c4
-rw-r--r--lib/resolve.c2
-rw-r--r--lib/utils.c4
-rw-r--r--lib/zonecut.c12
10 files changed, 39 insertions, 57 deletions
diff --git a/lib/cache/api.c b/lib/cache/api.c
index c7f63262..0fedc22c 100644
--- a/lib/cache/api.c
+++ b/lib/cache/api.c
@@ -514,17 +514,8 @@ static ssize_t stash_rrset(struct kr_cache *cache, const struct kr_query *qry,
if (ret) return kr_ok(); /* some aren't really errors */
assert(val_new_entry.data);
- /* Compute TTL, just in case they weren't equal. */
- uint32_t ttl = -1;
- const knot_rdataset_t *rdatasets[] = { &rr->rrs, rds_sigs, NULL };
- for (int j = 0; rdatasets[j]; ++j) {
- knot_rdata_t *rd = rdatasets[j]->data;
- assert(rdatasets[j]->rr_count);
- for (uint16_t l = 0; l < rdatasets[j]->rr_count; ++l) {
- ttl = MIN(ttl, knot_rdata_ttl(rd));
- rd = kr_rdataset_next(rd);
- }
- } /* TODO: consider expirations of RRSIGs as well, just in case. */
+ const uint32_t ttl = rr->ttl;
+ /* FIXME: consider TTLs and expirations of RRSIGs as well, just in case. */
/* Write the entry itself. */
struct entry_h *eh = val_new_entry.data;
diff --git a/lib/cache/api.h b/lib/cache/api.h
index 35ac4ba7..ed73eb7c 100644
--- a/lib/cache/api.h
+++ b/lib/cache/api.h
@@ -134,6 +134,6 @@ int32_t kr_cache_ttl(const struct kr_cache_p *peek, const struct kr_query *qry,
/*TODO: reorder*/
KR_EXPORT
int kr_cache_materialize(knot_rdataset_t *dst, const struct kr_cache_p *ref,
- uint32_t new_ttl, knot_mm_t *pool);
+ knot_mm_t *pool);
diff --git a/lib/cache/entry_pkt.c b/lib/cache/entry_pkt.c
index 9f061e77..9b2e804a 100644
--- a/lib/cache/entry_pkt.c
+++ b/lib/cache/entry_pkt.c
@@ -38,8 +38,7 @@ static uint32_t packet_ttl(const knot_pkt_t *pkt, bool is_negative)
if (is_negative) {
/* Use SOA minimum TTL for negative answers. */
if (rr->type == KNOT_RRTYPE_SOA) {
- return MIN(knot_rrset_ttl(rr),
- knot_soa_minimum(&rr->rrs));
+ return MIN(rr->ttl, knot_soa_minimum(rr->rrs.rdata));
} else {
continue; /* Use SOA only for negative answers. */
}
@@ -47,13 +46,7 @@ static uint32_t packet_ttl(const knot_pkt_t *pkt, bool is_negative)
if (knot_rrtype_is_metatype(rr->type)) {
continue; /* Skip metatypes. */
}
- /* Find minimum TTL in the record set */
- knot_rdata_t *rd = rr->rrs.data;
- for (uint16_t j = 0; j < rr->rrs.rr_count; ++j) {
- has_ttl = true;
- ttl = MIN(ttl, knot_rdata_ttl(rd));
- rd = kr_rdataset_next(rd);
- }
+ ttl = MIN(ttl, rr->ttl);
}
}
/* If no valid TTL present, go with zero (will get clamped to minimum). */
@@ -200,21 +193,15 @@ int answer_from_pkt(kr_layer_t *ctx, knot_pkt_t *pkt, uint16_t type,
for (knot_section_t i = KNOT_ANSWER; i <= KNOT_ADDITIONAL; ++i) {
const knot_pktsection_t *sec = knot_pkt_section(pkt, i);
for (unsigned k = 0; k < sec->count; ++k) {
- const knot_rrset_t *rr = knot_pkt_rr(sec, k);
- knot_rdata_t *rd = rr->rrs.data;
- for (uint16_t i = 0; i < rr->rrs.rr_count; ++i) {
- /* We need to be careful:
- * due to enforcing minimum TTL on packet,
- * some records may be below that value.
- * We keep those records at TTL 0. */
- uint32_t ttl = knot_rdata_ttl(rd);
- if (drift <= ttl) {
- ttl -= drift;
- } else {
- ttl = 0;
- }
- knot_rdata_set_ttl(rd, ttl);
- rd = kr_rdataset_next(rd);
+ knot_rrset_t *rrs = // vv FIXME??
+ /*const-cast*/(knot_rrset_t *)knot_pkt_rr(sec, k);
+ /* We need to be careful: due to enforcing minimum TTL
+ * on packet, some records may be below that value.
+ * We keep those records at TTL 0. */
+ if (rrs->ttl >= drift) {
+ rrs->ttl -= drift;
+ } else {
+ rrs->ttl = 0;
}
}
}
diff --git a/lib/cache/entry_rr.c b/lib/cache/entry_rr.c
index 5cfe747e..7d3f13b2 100644
--- a/lib/cache/entry_rr.c
+++ b/lib/cache/entry_rr.c
@@ -48,8 +48,9 @@ int rdataset_dematerialize(const knot_rdataset_t *rds, uint8_t * restrict data)
* Return the number of bytes consumed or an error code.
*/
static int rdataset_materialize(knot_rdataset_t * restrict rds, const uint8_t * const data,
- const uint8_t *data_bound, uint32_t ttl, knot_mm_t *pool)
+ const uint8_t *data_bound, knot_mm_t *pool)
{
+ /* FIXME: rdataset_t and cache's rdataset have the same binary format now */
assert(rds && data && data_bound && data_bound > data && !rds->data);
assert(pool); /* not required, but that's our current usage; guard leaks */
const uint8_t *d = data; /* iterates over the cache data */
@@ -74,8 +75,8 @@ static int rdataset_materialize(knot_rdataset_t * restrict rds, const uint8_t *
d += sizeof(len) + len;
rdata_len_sum += len;
}
- /* Each item in knot_rdataset_t needs TTL (4B) + rdlength (2B) + rdata */
- rds->data = mm_alloc(pool, rdata_len_sum + ((size_t)rds->rr_count) * (4 + 2));
+ /* Each item in knot_rdataset_t needs rdlength (2B) + rdata */
+ rds->data = mm_alloc(pool, rdata_len_sum + (size_t)rds->rr_count * 2);
if (!rds->data) {
return kr_error(ENOMEM);
}
@@ -86,20 +87,20 @@ static int rdataset_materialize(knot_rdataset_t * restrict rds, const uint8_t *
uint16_t len;
memcpy(&len, d, sizeof(len));
d += sizeof(len);
- knot_rdata_init(d_out, len, d, ttl);
+ knot_rdata_init(d_out, len, d);
d += len;
//d_out = kr_rdataset_next(d_out);
- d_out += 4 + 2 + len; /* TTL + rdlen + rdata */
+ d_out += 2 + len; /* rdlen + rdata */
}
//VERBOSE_MSG(NULL, "materialized from %d B\n", (int)(d - data));
return d - data;
}
int kr_cache_materialize(knot_rdataset_t *dst, const struct kr_cache_p *ref,
- uint32_t new_ttl, knot_mm_t *pool)
+ knot_mm_t *pool)
{
struct entry_h *eh = ref->raw_data;
- return rdataset_materialize(dst, eh->data, ref->raw_bound, new_ttl, pool);
+ return rdataset_materialize(dst, eh->data, ref->raw_bound, pool);
}
@@ -118,12 +119,12 @@ int entry2answer(struct answer *ans, int id,
}
/* Materialize the base RRset. */
knot_rrset_t *rr = ans->rrsets[id].set.rr
- = knot_rrset_new(owner, type, KNOT_CLASS_IN, ans->mm);
+ = knot_rrset_new(owner, type, KNOT_CLASS_IN, new_ttl, ans->mm);
if (!rr) {
assert(!ENOMEM);
return kr_error(ENOMEM);
}
- int ret = rdataset_materialize(&rr->rrs, eh->data, eh_bound, new_ttl, ans->mm);
+ int ret = rdataset_materialize(&rr->rrs, eh->data, eh_bound, ans->mm);
if (ret < 0) goto fail;
size_t data_off = ret;
ans->rrsets[id].set.rank = eh->rank;
@@ -132,7 +133,7 @@ int entry2answer(struct answer *ans, int id,
bool want_rrsigs = true; /* LATER(optim.): might be omitted in some cases. */
if (want_rrsigs) {
ret = rdataset_materialize(&ans->rrsets[id].sig_rds, eh->data + data_off,
- eh_bound, new_ttl, ans->mm);
+ eh_bound, ans->mm);
if (ret < 0) goto fail;
/* Sanity check: we consumed exactly all data. */
int unused_bytes = eh_bound - (uint8_t *)eh->data - data_off - ret;
diff --git a/lib/cache/knot_pkt.c b/lib/cache/knot_pkt.c
index a7368980..6a889d95 100644
--- a/lib/cache/knot_pkt.c
+++ b/lib/cache/knot_pkt.c
@@ -91,6 +91,7 @@ int pkt_append(knot_pkt_t *pkt, const struct answer_rrset *rrset, uint8_t rank)
pkt->rr[pkt->rrset_count] = (knot_rrset_t){
.owner = knot_dname_copy(rrset->set.rr->owner, &pkt->mm),
/* ^^ well, another copy isn't really needed */
+ .ttl = rrset->set.rr->ttl,
.type = KNOT_RRTYPE_RRSIG,
.rclass = KNOT_CLASS_IN,
.rrs = *rdss[i],
diff --git a/lib/cache/nsec1.c b/lib/cache/nsec1.c
index 0ae05c3d..9898683e 100644
--- a/lib/cache/nsec1.c
+++ b/lib/cache/nsec1.c
@@ -443,7 +443,7 @@ int nsec1_src_synth(struct key *k, struct answer *ans, const knot_dname_t *clenc
assert(nsec_rr);
const uint32_t new_ttl_log =
- kr_verbose_status ? knot_rrset_ttl(nsec_rr) : -1;
+ kr_verbose_status ? nsec_rr->ttl : -1;
uint8_t *bm = NULL;
uint16_t bm_size;
knot_nsec_bitmap(&nsec_rr->rrs, &bm, &bm_size);
diff --git a/lib/dnssec/ta.c b/lib/dnssec/ta.c
index 2489d348..38f71f06 100644
--- a/lib/dnssec/ta.c
+++ b/lib/dnssec/ta.c
@@ -83,11 +83,11 @@ static int insert_ta(map_t *trust_anchors, const knot_dname_t *name,
bool is_new_key = false;
knot_rrset_t *ta_rr = kr_ta_get(trust_anchors, name);
if (!ta_rr) {
- ta_rr = knot_rrset_new(name, KNOT_RRTYPE_DS, KNOT_CLASS_IN, NULL);
+ ta_rr = knot_rrset_new(name, KNOT_RRTYPE_DS, KNOT_CLASS_IN, ttl, NULL);
is_new_key = true;
}
/* Merge-in new key data */
- if (!ta_rr || (rdlen > 0 && knot_rrset_add_rdata(ta_rr, rdata, rdlen, ttl, NULL) != 0)) {
+ if (!ta_rr || (rdlen > 0 && knot_rrset_add_rdata(ta_rr, rdata, rdlen, NULL) != 0)) {
knot_rrset_free(&ta_rr, NULL);
return kr_error(ENOMEM);
}
diff --git a/lib/resolve.c b/lib/resolve.c
index 7cf8e08a..39d7a9e3 100644
--- a/lib/resolve.c
+++ b/lib/resolve.c
@@ -156,7 +156,7 @@ static int invalidate_ns(struct kr_rplan *rplan, struct kr_query *qry)
size_t addr_len = kr_inaddr_len(&qry->ns.addr[0].ip);
/* @warning _NOT_ thread-safe */
static knot_rdata_t rdata_arr[RDATA_ARR_MAX];
- knot_rdata_init(rdata_arr, addr_len, (const uint8_t *)addr, 0);
+ knot_rdata_init(rdata_arr, addr_len, (const uint8_t *)addr);
return kr_zonecut_del(&qry->zone_cut, qry->ns.name, rdata_arr);
} else {
return kr_zonecut_del_all(&qry->zone_cut, qry->ns.name);
diff --git a/lib/utils.c b/lib/utils.c
index 16218a86..fe79cad0 100644
--- a/lib/utils.c
+++ b/lib/utils.c
@@ -308,12 +308,12 @@ int kr_pkt_put(knot_pkt_t *pkt, const knot_dname_t *name, uint32_t ttl,
}
/* Create empty RR */
knot_rrset_t rr;
- knot_rrset_init(&rr, knot_dname_copy(name, &pkt->mm), rtype, rclass);
+ knot_rrset_init(&rr, knot_dname_copy(name, &pkt->mm), rtype, rclass, ttl);
/* Create RDATA
* @warning _NOT_ thread safe.
*/
static knot_rdata_t rdata_arr[RDATA_ARR_MAX];
- knot_rdata_init(rdata_arr, rdlen, rdata, ttl);
+ knot_rdata_init(rdata_arr, rdlen, rdata);
knot_rdataset_add(&rr.rrs, rdata_arr, &pkt->mm);
/* Append RR */
return knot_pkt_put(pkt, 0, &rr, KNOT_PF_FREE);
diff --git a/lib/zonecut.c b/lib/zonecut.c
index ae8425f5..d578132a 100644
--- a/lib/zonecut.c
+++ b/lib/zonecut.c
@@ -298,8 +298,9 @@ static void fetch_addr(struct kr_zonecut *cut, struct kr_cache *cache,
}
knot_rrset_t cached_rr;
- knot_rrset_init(&cached_rr, /*const-cast*/(knot_dname_t *)ns, rrtype, KNOT_CLASS_IN);
- if (kr_cache_materialize(&cached_rr.rrs, &peek, new_ttl, cut->pool) < 0) {
+ knot_rrset_init(&cached_rr, /*const-cast*/(knot_dname_t *)ns, rrtype,
+ KNOT_CLASS_IN, new_ttl);
+ if (kr_cache_materialize(&cached_rr.rrs, &peek, cut->pool) < 0) {
return;
}
knot_rdata_t *rd = cached_rr.rrs.data;
@@ -331,7 +332,7 @@ static int fetch_ns(struct kr_context *ctx, struct kr_zonecut *cut,
}
/* Materialize the rdataset temporarily, for simplicity. */
knot_rdataset_t ns_rds = { 0, NULL };
- ret = kr_cache_materialize(&ns_rds, &peek, new_ttl, cut->pool);
+ ret = kr_cache_materialize(&ns_rds, &peek, cut->pool);
if (ret < 0) {
return ret;
}
@@ -393,8 +394,9 @@ static int fetch_secure_rrset(knot_rrset_t **rr, struct kr_cache *cache,
*rr = NULL;
return kr_error(ENOMEM);
}
- knot_rrset_init(*rr, /*const-cast*/(knot_dname_t *)owner, type, KNOT_CLASS_IN);
- ret = kr_cache_materialize(&(*rr)->rrs, &peek, new_ttl, pool);
+ knot_rrset_init(*rr, /*const-cast*/(knot_dname_t *)owner, type,
+ KNOT_CLASS_IN, new_ttl);
+ ret = kr_cache_materialize(&(*rr)->rrs, &peek, pool);
if (ret < 0) {
knot_rrset_free(rr, pool);
return ret;