summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVladimír Čunát <vladimir.cunat@nic.cz>2016-08-31 17:21:47 +0200
committerVladimír Čunát <vladimir.cunat@nic.cz>2016-11-02 11:14:05 +0100
commit9d5beac575f15fb7b13b270ca406ecad3ec594e3 (patch)
treec5d359fca26ff620ccb019511146b46a2064f553
parentbench: make bench, dataset for lru, cleanup (diff)
downloadknot-resolver-9d5beac575f15fb7b13b270ca406ecad3ec594e3.tar.xz
knot-resolver-9d5beac575f15fb7b13b270ca406ecad3ec594e3.zip
lru: new implementation and also interface
The implementation is now similar to set-associative caches that x86 CPU use. Also the API is changed a bit, leading to slight simplification of our use patterns.
-rw-r--r--Makefile2
-rw-r--r--bench/bench.mk12
-rw-r--r--bench/bench_lru.c85
-rw-r--r--daemon/bindings.c9
-rw-r--r--daemon/engine.c42
-rw-r--r--lib/cookies/lru_cache.c4
-rw-r--r--lib/cookies/lru_cache.h2
-rw-r--r--lib/generic/lru.c210
-rw-r--r--lib/generic/lru.h373
-rw-r--r--lib/lib.mk2
-rw-r--r--lib/nsrep.c13
-rw-r--r--lib/nsrep.h2
-rw-r--r--lib/utils.h6
-rw-r--r--lib/zonecut.c3
-rw-r--r--modules/stats/stats.c64
-rw-r--r--tests/test_lru.c20
16 files changed, 541 insertions, 308 deletions
diff --git a/Makefile b/Makefile
index 3b1896e2..8de51d07 100644
--- a/Makefile
+++ b/Makefile
@@ -5,7 +5,7 @@ include platform.mk
all: info lib daemon modules
install: lib-install daemon-install modules-install etc-install
check: all tests
-clean: contrib-clean lib-clean daemon-clean modules-clean tests-clean doc-clean
+clean: contrib-clean lib-clean daemon-clean modules-clean tests-clean doc-clean bench-clean
doc: doc-html
.PHONY: all install check clean doc info
diff --git a/bench/bench.mk b/bench/bench.mk
index b1defbd9..dfd06ee7 100644
--- a/bench/bench.mk
+++ b/bench/bench.mk
@@ -27,9 +27,9 @@ $(foreach bench,$(bench_BIN),$(eval $(call make_bench,$(bench))))
.PHONY: bench bench-clean
bench-clean: $(foreach bench,$(bench_BIN),$(bench)-clean)
bench: $(foreach bench,$(bench_BIN),bench/$(bench))
- # Test LRU with increasing overfill, misses should increase ~ linearly
- @./bench/bench_lru 22 bench/bench_lru_set1.tsv - 65536 # fill = 1
- @./bench/bench_lru 23 bench/bench_lru_set1.tsv - 32768 # fill = 2
- @./bench/bench_lru 23 bench/bench_lru_set1.tsv - 16384 # fill = 4
- @./bench/bench_lru 23 bench/bench_lru_set1.tsv - 8192 # fill = 8
- @./bench/bench_lru 23 bench/bench_lru_set1.tsv - 4096 # fill = 16 \ No newline at end of file
+ @echo "Test LRU with increasing overfill, misses should increase ~ linearly" >&2
+ @./bench/bench_lru 23 bench/bench_lru_set1.tsv - 65536 # fill ~ 1
+ @./bench/bench_lru 23 bench/bench_lru_set1.tsv - 32768 # fill ~ 2
+ @./bench/bench_lru 23 bench/bench_lru_set1.tsv - 16384 # fill ~ 4
+ @./bench/bench_lru 23 bench/bench_lru_set1.tsv - 8192 # fill ~ 8
+ @./bench/bench_lru 23 bench/bench_lru_set1.tsv - 4096 # fill ~ 16
diff --git a/bench/bench_lru.c b/bench/bench_lru.c
index ec549174..1befb776 100644
--- a/bench/bench_lru.c
+++ b/bench/bench_lru.c
@@ -17,6 +17,7 @@
#include <math.h>
#include <stdlib.h>
#include <stdint.h>
+#include <stdio.h>
#include <sys/time.h>
#include <unistd.h>
@@ -26,17 +27,25 @@
typedef kr_nsrep_lru_t lru_bench_t;
+#define p_out(...) do { \
+ printf(__VA_ARGS__); \
+ fflush(stdout); \
+ } while (0)
+#define p_err(...) fprintf(stderr, __VA_ARGS__)
-static int die(const char *cause) {
+static int die(const char *cause)
+{
fprintf(stderr, "%s: %s\n", cause, strerror(errno));
exit(1);
}
-static void time_get(struct timeval *tv) {
+static void time_get(struct timeval *tv)
+{
if (gettimeofday(tv, NULL))
die("gettimeofday");
}
-static void time_print_diff(struct timeval *tv, size_t op_count) {
+static void time_print_diff(struct timeval *tv, size_t op_count)
+{
struct timeval now;
time_get(&now);
now.tv_sec -= tv->tv_sec;
@@ -48,11 +57,16 @@ static void time_print_diff(struct timeval *tv, size_t op_count) {
size_t speed = round((double)(op_count) / 1000
/ (now.tv_sec + (double)(now.tv_usec)/1000000));
- printf("\t%ld.%06d s, \t %zd kop/s\n", now.tv_sec, (int)now.tv_usec, speed);
+
+ p_out("%ld.%06d", now.tv_sec, (int)now.tv_usec);
+ p_err(" s"); p_out(","); p_err("\t");
+ p_out("%zd", speed);
+ p_err(" kops/s"); p_out(","); p_err("\n");
}
/// initialize seed for random()
-static int ssrandom(char *s) {
+static int ssrandom(char *s)
+{
if (*s == '-') { // initialize from time
struct timeval now;
time_get(&now);
@@ -75,7 +89,8 @@ struct key {
};
/// read lines from a file and reorder them randomly
-static struct key * read_lines(const char *fname, size_t *count) {
+static struct key * read_lines(const char *fname, size_t *count, char **pfree)
+{
// read the file at once
int fd = open(fname, O_RDONLY);
if (fd < 0)
@@ -85,6 +100,7 @@ static struct key * read_lines(const char *fname, size_t *count) {
die("stat");
size_t flen = (size_t)st.st_size;
char *fbuf = malloc(flen + 1);
+ *pfree = fbuf;
if (fbuf == NULL)
die("malloc");
if (read(fd, fbuf, flen) < 0)
@@ -101,7 +117,11 @@ static struct key * read_lines(const char *fname, size_t *count) {
}
*count = lines;
size_t avg_len = (flen + 1) / lines - 1;
- printf("%zu lines read, average length %zu\n", lines, avg_len);
+
+ p_err("lines read: ");
+ p_out("%zu,", lines);
+ p_err("\taverage length ");
+ p_out("%zu,", avg_len);
struct key *result = calloc(lines, sizeof(struct key));
result[0].chars = fbuf;
@@ -137,26 +157,33 @@ static struct key * read_lines(const char *fname, size_t *count) {
#define lru_get_try lru_get
#endif
-static void usage(const char *progname) {
- fprintf(stderr, "usage: %s <log_count> <input> <seed> [lru_size]\n"
- "The seed must be at least 12 characters or \"-\".\n" , progname);
+static void usage(const char *progname)
+{
+ p_err("usage: %s <log_count> <input> <seed> [lru_size]\n", progname);
+ p_err("The seed must be at least 12 characters or \"-\".\n"
+ "Standard output contains csv-formatted lines.\n");
exit(1);
}
-int main(int argc, char ** argv) {
+
+int main(int argc, char ** argv)
+{
if (argc != 4 && argc != 5)
usage(argv[0]);
if (ssrandom(argv[3]) < 0)
usage(argv[0]);
+ p_out("\n");
size_t key_count;
- struct key *keys = read_lines(argv[2], &key_count);
+ char *data_to_free = NULL;
+ struct key *keys = read_lines(argv[2], &key_count, &data_to_free);
size_t run_count;
{
size_t run_log = atoi(argv[1]);
assert(run_log < 64);
run_count = 1ULL << run_log;
- printf("test run length: 2^%zd\n", run_log);
+ p_err("\ntest run length:\t2^");
+ p_out("%zd,", run_log);
}
struct timeval time;
@@ -164,7 +191,7 @@ int main(int argc, char ** argv) {
lru_bench_t *lru;
#ifdef lru_create
- lru_create(&lru, lru_size, NULL);
+ lru_create(&lru, lru_size, NULL, NULL);
#else
lru = malloc(lru_size(lru_bench_t, lru_size));
if (lru)
@@ -172,12 +199,19 @@ int main(int argc, char ** argv) {
#endif
if (!lru)
die("malloc");
- printf("LRU size:\t%d\n", lru_size);
+ p_err("\nLRU capacity:\t");
+ p_out("%d,",
+ #ifdef lru_capacity
+ lru_capacity(lru) // report real capacity, if provided
+ #else
+ lru_size
+ #endif
+ );
size_t miss = 0;
+ p_err("\nload everything:\t");
time_get(&time);
- printf("load everything:");
- for (size_t i = 0, ki = key_count; i < run_count; ++i, --ki) {
+ for (size_t i = 0, ki = key_count - 1; i < run_count; ++i, --ki) {
unsigned *r = lru_get_new(lru, keys[ki].chars, keys[ki].len);
if (!r || *r == 0)
++miss;
@@ -187,12 +221,14 @@ int main(int argc, char ** argv) {
ki = key_count;
}
time_print_diff(&time, run_count);
- printf("LRU misses:\t%zd%%\n", (miss * 100 + 50) / run_count);
+ p_err("LRU misses [%%]:\t");
+ p_out("%zd,",(miss * 100 + 50) / run_count);
+ p_err("\n");
unsigned accum = 0; // compute something to make sure compiler can't remove code
+ p_err("search everything:\t");
time_get(&time);
- printf("search everything:");
- for (size_t i = 0, ki = key_count; i < run_count; ++i, --ki) {
+ for (size_t i = 0, ki = key_count - 1; i < run_count; ++i, --ki) {
unsigned *r = lru_get_try(lru, keys[ki].chars, keys[ki].len);
if (r)
accum += *r;
@@ -200,7 +236,14 @@ int main(int argc, char ** argv) {
ki = key_count;
}
time_print_diff(&time, run_count);
- printf("ignore: %u\n", accum);
+ p_err("ignore: %u\n", accum);
+
+ // free memory, at least with new LRU
+ #ifdef lru_create
+ lru_free(lru);
+ #endif
+ free(keys);
+ free(data_to_free);
return 0;
}
diff --git a/daemon/bindings.c b/daemon/bindings.c
index e80b80b7..08228658 100644
--- a/daemon/bindings.c
+++ b/daemon/bindings.c
@@ -660,12 +660,9 @@ static int cache_clear(lua_State *L)
}
/* Clear reputation tables */
- lru_deinit(engine->resolver.cache_rtt);
- lru_deinit(engine->resolver.cache_rep);
- lru_init(engine->resolver.cache_rtt, LRU_RTT_SIZE);
- lru_init(engine->resolver.cache_rep, LRU_REP_SIZE);
- lru_deinit(engine->resolver.cache_cookie);
- lru_init(engine->resolver.cache_cookie, LRU_COOKIES_SIZE);
+ lru_reset(engine->resolver.cache_rtt);
+ lru_reset(engine->resolver.cache_rep);
+ lru_reset(engine->resolver.cache_cookie);
lua_pushboolean(L, true);
return 1;
}
diff --git a/daemon/engine.c b/daemon/engine.c
index 15757d60..9977f434 100644
--- a/daemon/engine.c
+++ b/daemon/engine.c
@@ -443,18 +443,9 @@ static int init_resolver(struct engine *engine)
kr_zonecut_init(&engine->resolver.root_hints, (const uint8_t *)"", engine->pool);
kr_zonecut_set_sbelt(&engine->resolver, &engine->resolver.root_hints);
/* Open NS rtt + reputation cache */
- engine->resolver.cache_rtt = mm_alloc(engine->pool, lru_size(kr_nsrep_lru_t, LRU_RTT_SIZE));
- if (engine->resolver.cache_rtt) {
- lru_init(engine->resolver.cache_rtt, LRU_RTT_SIZE);
- }
- engine->resolver.cache_rep = mm_alloc(engine->pool, lru_size(kr_nsrep_lru_t, LRU_REP_SIZE));
- if (engine->resolver.cache_rep) {
- lru_init(engine->resolver.cache_rep, LRU_REP_SIZE);
- }
- engine->resolver.cache_cookie = mm_alloc(engine->pool, lru_size(kr_cookie_lru_t, LRU_COOKIES_SIZE));
- if (engine->resolver.cache_cookie) {
- lru_init(engine->resolver.cache_cookie, LRU_COOKIES_SIZE);
- }
+ lru_create(&engine->resolver.cache_rtt, LRU_RTT_SIZE, engine->pool, NULL);
+ lru_create(&engine->resolver.cache_rep, LRU_REP_SIZE, engine->pool, NULL);
+ lru_create(&engine->resolver.cache_cookie, LRU_COOKIES_SIZE, engine->pool, NULL);
/* Load basic modules */
engine_register(engine, "iterate", NULL, NULL);
@@ -507,20 +498,17 @@ static int init_state(struct engine *engine)
return kr_ok();
}
+static enum lru_apply_do update_stat_item(const char *key, uint len,
+ unsigned *rtt, void *baton)
+{
+ return *rtt > KR_NS_LONG ? LRU_APPLY_DO_EVICT : LRU_APPLY_DO_NOTHING;
+}
+/** @internal Walk RTT table, clearing all entries with bad score
+ * to compensate for intermittent network issues or temporary bad behaviour. */
static void update_state(uv_timer_t *handle)
{
struct engine *engine = handle->data;
-
- /* Walk RTT table, clearing all entries with bad score
- * to compensate for intermittent network issues or temporary bad behaviour. */
- kr_nsrep_lru_t *table = engine->resolver.cache_rtt;
- for (size_t i = 0; i < table->size; ++i) {
- if (!table->slots[i].key)
- continue;
- if (table->slots[i].data > KR_NS_LONG) {
- lru_evict(table, i);
- }
- }
+ lru_apply(engine->resolver.cache_rtt, update_stat_item, NULL);
}
int engine_init(struct engine *engine, knot_mm_t *pool)
@@ -573,9 +561,11 @@ void engine_deinit(struct engine *engine)
network_deinit(&engine->net);
kr_zonecut_deinit(&engine->resolver.root_hints);
kr_cache_close(&engine->resolver.cache);
- lru_deinit(engine->resolver.cache_rtt);
- lru_deinit(engine->resolver.cache_rep);
- lru_deinit(engine->resolver.cache_cookie);
+
+ /* The lru keys are currently malloc-ated and need to be freed. */
+ lru_free(engine->resolver.cache_rtt);
+ lru_free(engine->resolver.cache_rep);
+ lru_free(engine->resolver.cache_cookie);
/* Clear IPC pipes */
for (size_t i = 0; i < engine->ipc_set.len; ++i) {
diff --git a/lib/cookies/lru_cache.c b/lib/cookies/lru_cache.c
index 5ac40bc4..feff44ff 100644
--- a/lib/cookies/lru_cache.c
+++ b/lib/cookies/lru_cache.c
@@ -33,7 +33,7 @@ const uint8_t *kr_cookie_lru_get(kr_cookie_lru_t *cache,
return NULL;
}
- struct cookie_opt_data *cached = lru_get(cache, addr, addr_len);
+ struct cookie_opt_data *cached = lru_get_try(cache, addr, addr_len);
return cached ? cached->opt_data : NULL;
}
@@ -61,7 +61,7 @@ int kr_cookie_lru_set(kr_cookie_lru_t *cache, const struct sockaddr *sa,
return kr_error(EINVAL);
}
- struct cookie_opt_data *cached = lru_set(cache, addr, addr_len);
+ struct cookie_opt_data *cached = lru_get_new(cache, addr, addr_len);
if (!cached) {
return kr_error(ENOMEM);
}
diff --git a/lib/cookies/lru_cache.h b/lib/cookies/lru_cache.h
index d5d694ea..257b94e5 100644
--- a/lib/cookies/lru_cache.h
+++ b/lib/cookies/lru_cache.h
@@ -43,7 +43,7 @@ struct cookie_opt_data {
/**
* DNS cookies tracking.
*/
-typedef lru_hash(struct cookie_opt_data) kr_cookie_lru_t;
+typedef lru_t(struct cookie_opt_data) kr_cookie_lru_t;
/**
* @brief Obtain LRU cache entry.
diff --git a/lib/generic/lru.c b/lib/generic/lru.c
new file mode 100644
index 00000000..63a07fed
--- /dev/null
+++ b/lib/generic/lru.c
@@ -0,0 +1,210 @@
+/* Copyright (C) 2016 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#include "lib/generic/lru.h"
+#include "contrib/murmurhash3/murmurhash3.h"
+
+typedef struct lru_group lru_group_t;
+
+struct lru_item {
+ uint16_t key_len, val_len; /**< Two bytes should be enough for our purposes. */
+ char data[]; /**< Place for both key and value. */
+};
+
+/** @internal Compute offset of value in struct lru_item. */
+static uint val_offset(uint key_len)
+{
+ uint key_end = offsetof(struct lru_item, data) + key_len;
+ // align it to the closest multiple of four
+ return round_power(key_end, 2);
+}
+
+/** @internal Return pointer to value in an item. */
+static void * item_val(struct lru_item *it)
+{
+ return it->data + val_offset(it->key_len) - offsetof(struct lru_item, data);
+}
+
+/** @internal Compute the size of an item. ATM we don't align/pad the end of it. */
+static uint item_size(uint key_len, uint val_len)
+{
+ return val_offset(key_len) + val_len;
+}
+
+/** @internal Free each item. */
+KR_EXPORT void lru_free_items_impl(struct lru *lru)
+{
+ assert(lru);
+ for (size_t i = 0; i < (1 << (size_t)lru->log_groups); ++i) {
+ lru_group_t *g = &lru->groups[i];
+ for (int j = 0; j < LRU_ASSOC; ++j)
+ mm_free(lru->mm, g->items[j]);
+ }
+}
+
+/** @internal See lru_apply. */
+KR_EXPORT void lru_apply_impl(struct lru *lru, lru_apply_fun f, void *baton)
+{
+ bool ok = lru && f;
+ if (!ok) {
+ assert(false);
+ return;
+ }
+ for (size_t i = 0; i < (1 << (size_t)lru->log_groups); ++i) {
+ lru_group_t *g = &lru->groups[i];
+ for (uint j = 0; j < LRU_ASSOC; ++j) {
+ struct lru_item *it = g->items[j];
+ if (!it)
+ continue;
+ enum lru_apply_do ret =
+ f(it->data, it->key_len, item_val(it), baton);
+ switch(ret) {
+ case LRU_APPLY_DO_EVICT: // evict
+ mm_free(lru->mm, it);
+ g->items[j] = NULL;
+ g->counts[j] = 0;
+ g->hashes[j] = 0;
+ break;
+ default:
+ assert(ret == LRU_APPLY_DO_NOTHING);
+ }
+ }
+ }
+}
+
+/** @internal See lru_create. */
+KR_EXPORT struct lru * lru_create_impl(uint max_slots, knot_mm_t *mm_array, knot_mm_t *mm)
+{
+ assert(max_slots);
+ if (!max_slots)
+ return NULL;
+ // let lru->log_groups = ceil(log2(max_slots / (float) assoc))
+ // without trying for efficiency
+ uint group_count = (max_slots - 1) / LRU_ASSOC + 1;
+ uint log_groups = 0;
+ for (uint s = group_count - 1; s; s /= 2)
+ ++log_groups;
+ group_count = 1 << log_groups;
+ assert(max_slots <= group_count * LRU_ASSOC && group_count * LRU_ASSOC < 2 * max_slots);
+
+ size_t size = offsetof(struct lru, groups[group_count]);
+ struct lru *lru = mm_alloc(mm_array, size);
+ if (unlikely(lru == NULL))
+ return NULL;
+ *lru = (struct lru){
+ .mm = mm,
+ .mm_array = mm_array,
+ .log_groups = log_groups,
+ };
+ // zeros are a good init
+ memset(lru->groups, 0, size - offsetof(struct lru, groups));
+ return lru;
+}
+
+/** @internal Decrement all counters within a group. */
+static void group_dec_counts(lru_group_t *g) {
+ g->counts[LRU_TRACKED] = LRU_TRACKED;
+ for (uint i = 0; i < LRU_TRACKED + 1; ++i)
+ if (likely(g->counts[i]))
+ --g->counts[i];
+}
+
+/** @internal Increment a counter within a group. */
+static void group_inc_count(lru_group_t *g, int i) {
+ if (likely(++(g->counts[i])))
+ return;
+ g->counts[i] = -1;
+ // We could've decreased or halved all of them, but let's keep the max.
+}
+
+/** @internal Implementation of both getting and insertion.
+ * Note: val_len is only meaningful if do_insert. */
+KR_EXPORT void * lru_get_impl(struct lru *lru, const char *key, uint key_len,
+ uint val_len, bool do_insert)
+{
+ bool ok = lru && (key || !key_len) && key_len <= UINT16_MAX
+ && (!do_insert || val_len <= UINT16_MAX);
+ if (!ok) {
+ assert(false);
+ return NULL; // reasonable fallback when not debugging
+ }
+ // find the right group
+ uint32_t khash = hash(key, key_len);
+ uint16_t khash_top = khash >> 16;
+ lru_group_t *g = &lru->groups[khash & ((1 << lru->log_groups) - 1)];
+ struct lru_item *it = NULL;
+ uint i;
+ // scan the *stored* elements in the group
+ for (i = 0; i < LRU_ASSOC; ++i) {
+ if (g->hashes[i] == khash_top) {
+ it = g->items[i];
+ if (likely(it && it->key_len == key_len
+ && memcmp(it->data, key, key_len) == 0))
+ goto found; // to reduce huge nesting depth
+ }
+ }
+ // key not found; first try an empty/counted-out place to insert
+ if (do_insert)
+ for (i = 0; i < LRU_ASSOC; ++i)
+ if (g->items[i] == NULL || g->counts[i] == 0)
+ goto insert;
+ // check if we track key's count at least
+ for (i = LRU_ASSOC; i < LRU_TRACKED; ++i) {
+ if (g->hashes[i] == khash_top) {
+ group_inc_count(g, i);
+ if (!do_insert)
+ return NULL;
+ // check if we trumped some stored key
+ for (uint j = 0; j < LRU_ASSOC; ++j)
+ if (unlikely(g->counts[i] > g->counts[j])) {
+ // evict key j, i.e. swap with i
+ --g->counts[i]; // we increment it below
+ SWAP(g->counts[i], g->counts[j]);
+ SWAP(g->hashes[i], g->hashes[j]);
+ i = j;
+ goto insert;
+ }
+ return NULL;
+ }
+ }
+ // not found at all: decrement all counts but only on every LRU_TRACKED occasion
+ if (g->counts[LRU_TRACKED])
+ --g->counts[LRU_TRACKED];
+ else
+ group_dec_counts(g);
+ return NULL;
+insert: // insert into position i (incl. key)
+ assert(i < LRU_ASSOC);
+ g->hashes[i] = khash_top;
+ it = g->items[i];
+ uint new_size = item_size(key_len, val_len);
+ if (it == NULL || new_size != item_size(it->key_len, it->val_len)) {
+ // (re)allocate
+ mm_free(lru->mm, it);
+ it = g->items[i] = mm_alloc(lru->mm, new_size);
+ if (it == NULL)
+ return NULL;
+ }
+ it->key_len = key_len;
+ it->val_len = val_len;
+ memcpy(it->data, key, key_len);
+ memset(item_val(it), 0, val_len); // clear the value
+found: // key and hash OK on g->items[i]; now update stamps
+ assert(i < LRU_ASSOC);
+ group_inc_count(g, i);
+ return item_val(g->items[i]);
+}
+
diff --git a/lib/generic/lru.h b/lib/generic/lru.h
index 8622c96f..e632b61d 100644
--- a/lib/generic/lru.h
+++ b/lib/generic/lru.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2013 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>
+/* Copyright (C) 2016 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -15,48 +15,47 @@
*/
/**
* @file lru.h
- * @brief LRU-like cache.
+ * @brief A lossy cache.
*
- * @note This is a naive LRU implementation with a simple slot stickiness counting.
- * Each write access increases stickiness on success, and decreases on collision.
- * A slot is freed if the stickiness decreases to zero. This makes it less likely,
- * that often-updated entries are jousted out of cache.
+ * @note The implementation tries to keep frequent keys and avoid others,
+ * even if "used recently", so it may refuse to store it on lru_get_new().
+ * It uses hashing to split the problem pseudo-randomly into smaller groups,
+ * and within each it tries to approximate relative usage counts of several
+ * most frequent keys/hashes. This tracking is done for *more* keys than
+ * those that are actually stored.
*
* # Example usage:
*
* @code{.c}
* // Define new LRU type
- * typedef lru_hash(int) lru_int_t;
+ * typedef lru_t(int) lru_int_t;
*
- * // Create LRU on stack
- * size_t lru_size = lru_size(lru_int_t, 10);
- * lru_int_t lru[lru_size];
- * lru_init(&lru, 5);
+ * // Create LRU
+ * lru_int_t *lru;
+ * lru_create(&lru, 5, NULL);
*
* // Insert some values
- * *lru_set(&lru, "luke", strlen("luke")) = 42;
- * *lru_set(&lru, "leia", strlen("leia")) = 24;
+ * int *pi = lru_get_new(lru, "luke", strlen("luke"));
+ * if (pi)
+ * *pi = 42;
+ * pi = lru_get_new(lru, "leia", strlen("leia"));
+ * if (pi)
+ * *pi = 24;
*
* // Retrieve values
- * int *ret = lru_get(&lru, "luke", strlen("luke");
- * if (ret) printf("luke dropped out!\n");
- * else printf("luke's number is %d\n", *ret);
- *
- * // Set up eviction function, this is going to get called
- * // on entry eviction (baton refers to baton in 'lru' structure)
- * void on_evict(void *baton, void *data_) {
- * int *data = (int *) data;
- * printf("number %d dropped out!\n", *data);
- * }
+ * int *ret = lru_get_try(lru, "luke", strlen("luke"));
+ * if (!ret) printf("luke dropped out!\n");
+ * else printf("luke's number is %d\n", *ret);
+ *
* char *enemies[] = {"goro", "raiden", "subzero", "scorpion"};
* for (int i = 0; i < 4; ++i) {
- * int *val = lru_set(&lru, enemies[i], strlen(enemies[i]));
+ * int *val = lru_get_new(lru, enemies[i], strlen(enemies[i]));
* if (val)
* *val = i;
* }
*
* // We're done
- * lru_deinit(&lru);
+ * lru_free(lru);
* @endcode
*
* \addtogroup generics
@@ -65,191 +64,185 @@
#pragma once
-#include <stdlib.h>
+#include <assert.h>
#include <stdint.h>
-#include "contrib/murmurhash3/murmurhash3.h"
-
-#define lru_slot_struct \
- char *key; /**< Slot key */ \
- uint16_t len; /**< Slot length */ \
- uint16_t refs; /**< Slot importance (#writes - #collisions) */ \
-/** @brief Slot header. */
-struct lru_slot {
- lru_slot_struct
-};
+#include <stddef.h>
-/** @brief Return boolean true if slot matches key/len pair. */
-static inline int lru_slot_match(struct lru_slot *slot, const char *key, uint32_t len)
-{
- return (slot->len == len) && (memcmp(slot->key, key, len) == 0);
-}
+#include "contrib/ucw/lib.h"
+#include "lib/utils.h"
+#include "libknot/mm_ctx.h"
-#define lru_slot_offset(table) \
- (size_t)((void *)&((table)->slots[0].data) - (void *)&((table)->slots[0]))
-
-/** @brief Callback definitions. */
-typedef void (*lru_free_f)(void *baton, void *ptr);
-
-/** @brief LRU structure base. */
-#define lru_hash_struct \
- uint32_t size; /**< Number of slots */ \
- uint32_t evictions; /**< Number of evictions. */ \
- uint32_t stride; /**< Stride of the 'slots' array */ \
- lru_free_f evict; /**< Eviction function */ \
- void *baton; /**< Passed to eviction function */
-/** @internal Object base of any other lru_hash type. */
-struct lru_hash_base {
- lru_hash_struct
- char slots[];
-};
-
-/** @brief User-defined hashtable. */
-#define lru_hash(type) \
-struct { \
- lru_hash_struct \
- struct { \
- lru_slot_struct \
- type data; \
- } slots[]; \
-}
+/* ================================ Interface ================================ */
-/** Get slot at given index. */
-static inline void *lru_slot_at(struct lru_hash_base *lru, uint32_t id)
-{
- if (id >= lru->size) {
- return NULL;
+/** @brief The type for LRU, parametrized by value type. */
+#define lru_t(type) \
+ union { \
+ type *pdata_t; /* only the *type* information is used */ \
+ struct lru lru; \
}
- return (struct lru_slot *)(lru->slots + (id * lru->stride));
-}
-
-/** Get pointer to slot value. */
-static inline void *lru_slot_val(struct lru_slot *slot, size_t offset)
-{
- return ((char *)slot) + offset;
-}
-
-/** @internal Slot data getter */
-static inline void *lru_slot_get(struct lru_hash_base *lru, const char *key, uint16_t len, size_t offset)
-{
- if (!lru || !key || len == 0) {
- return NULL;
- }
- uint32_t id = hash(key, len) % lru->size;
- struct lru_slot *slot = lru_slot_at(lru, id);
- if (lru_slot_match(slot, key, len)) {
- return lru_slot_val(slot, offset);
- }
- return NULL;
-}
-
-static inline int lru_slot_evict(struct lru_hash_base *lru, uint32_t id, size_t offset)
-{
- struct lru_slot *slot = lru_slot_at(lru, id);
- if (!slot || !slot->key) {
- return -1;
- }
- lru->evictions += 1;
- free(slot->key);
- if (lru->evict) {
- lru->evict(lru->baton, lru_slot_val(slot, offset));
- }
- memset(slot, 0, lru->stride);
- return 0;
-}
-
-/** @internal Slot data setter */
-static inline void *lru_slot_set(struct lru_hash_base *lru, const char *key, uint16_t len, size_t offset)
-{
- if (!lru || !key || len == 0) {
- return NULL;
- }
- uint32_t id = hash(key, len) % lru->size;
- struct lru_slot *slot = lru_slot_at(lru, id);
- if (lru_slot_match(slot, key, len)) {
- slot->refs += 1; /* Increase slot significance */
- } else {
- if (slot->key) {
- slot->refs -= 1; /* Decrease slot significance */
- if (slot->refs > 0) {
- return NULL; /* Couldn't joust former key. */
- }
- if (lru_slot_evict(lru, id, offset) < 0) {
- return NULL;
- }
- }
- memset(slot, 0, lru->stride);
- slot->key = malloc(len);
- if (!slot->key) {
- return NULL;
- }
- memcpy(slot->key, key, len);
- slot->len = len;
- slot->refs = 1;
- }
- return lru_slot_val(slot, offset);
-}
-
-/**
- * @brief Return size of the LRU structure with given number of slots.
- * @param type type of LRU structure
- * @param max_slots number of slots
- */
-#define lru_size(type, max_slots) \
- (sizeof(type) + (max_slots) * sizeof(((type *)NULL)->slots[0]))
/**
- * @brief Initialize hash table.
- * @param table hash table
+ * @brief Allocate and initialize an LRU with default associativity.
+ *
+ * The real limit on the number of slots can be a bit larger but less than double.
+ *
+ * @param ptable pointer to a pointer to the LRU
* @param max_slots number of slots
+ * @param mm_ctx_array memory context to use for the huge array, NULL for default
+ * @param mm_ctx memory context to use for individual key-value pairs, NULL for default
+ *
+ * @note The pointers to memory contexts need to remain valid
+ * during the whole life of the structure (or be NULL).
*/
-#define lru_init(table, max_slots) \
- (memset((table), 0, sizeof(*table) + (max_slots) * sizeof((table)->slots[0])), \
- (table)->stride = sizeof((table)->slots[0]), (table)->size = (max_slots))
+#define lru_create(ptable, max_slots, mm_ctx_array, mm_ctx) do { \
+ (void)(((__typeof__((*(ptable))->pdata_t))0) == (void *)0); /* typecheck lru_t */ \
+ *((struct lru **)(ptable)) = \
+ lru_create_impl((max_slots), (mm_ctx_array), (mm_ctx)); \
+ } while (false)
-/**
- * @brief Free all keys and evict all values.
- * @param table hash table
- */
-#define lru_deinit(table) if (table) { \
- for (uint32_t i = 0; i < (table)->size; ++i) { \
- if ((table)->slots[i].key) { \
- if ((table)->evict) { \
- (table)->evict((table)->baton, &(table)->slots[i].data); \
- } \
- free((table)->slots[i].key); \
- } \
- } \
-}
+/** @brief Free an LRU created by lru_create (it can be NULL). */
+#define lru_free(table) \
+ lru_free_impl(&(table)->lru)
+
+/** @brief Reset an LRU to the empty state (but preserve any settings). */
+#define lru_reset(table) \
+ lru_reset_impl(&(table)->lru)
/**
- * @brief Find key in the hash table and return pointer to it's value.
- * @param table hash table
+ * @brief Find key in the LRU and return pointer to the corresponding value.
+ *
+ * @param table pointer to LRU
* @param key_ lookup key
* @param len_ key length
- * @return pointer to data or NULL
+ * @return pointer to data or NULL if not found
*/
-#define lru_get(table, key_, len_) \
- (__typeof__(&(table)->slots[0].data)) \
- lru_slot_get((struct lru_hash_base *)(table), (key_), (len_), lru_slot_offset(table))
+#define lru_get_try(table, key_, len_) \
+ (__typeof__((table)->pdata_t)) \
+ lru_get_impl(&(table)->lru, (key_), (len_), -1, false)
/**
- * @brief Return pointer to value (create/replace if needed)
- * @param table hash table
+ * @brief Return pointer to value, inserting if needed (zeroed).
+ *
+ * @param table pointer to LRU
* @param key_ lookup key
- * @param len_ key length
- * @return pointer to data or NULL
+ * @param len_ key lengthkeys
+ * @return pointer to data or NULL (can be even if memory could be allocated!)
*/
-#define lru_set(table, key_, len_) \
- (__typeof__(&(table)->slots[0].data)) \
- lru_slot_set((struct lru_hash_base *)(table), (key_), (len_), lru_slot_offset(table))
+#define lru_get_new(table, key_, len_) \
+ (__typeof__((table)->pdata_t)) \
+ lru_get_impl(&(table)->lru, (key_), (len_), sizeof(*(table)->pdata_t), true)
+
+
+/**
+ * @brief Apply a function to every item in LRU.
+ *
+ * @param table pointer to LRU
+ * @param function enum lru_apply_do (*function)(const char *key, uint len, val_type *val, void *baton)
+ * See enum lru_apply_do for the return type meanings.
+ * @param baton extra pointer passed to each function invocation
+ */
+#define lru_apply(table, function, baton) do { \
+ lru_apply_fun_g(fun_dummy, __typeof__(*(table)->pdata_t)) = 0; \
+ (void)(fun_dummy == (function)); /* produce a warning with incompatible function type */ \
+ lru_apply_impl(&(table)->lru, (lru_apply_fun)(function), (baton)); \
+ } while (false)
+
+/** @brief Possible actions to do with an element. */
+enum lru_apply_do {
+ LRU_APPLY_DO_NOTHING,
+ LRU_APPLY_DO_EVICT,
+ /* maybe more in future*/
+};
/**
- * @brief Evict element at index.
- * @param table hash table
- * @param pos_ element position
- * @return 0 if successful, negative integer if failed
+ * @brief Return the real capacity - maximum number of keys holdable within.
+ *
+ * @param table pointer to LRU
*/
-#define lru_evict(table, pos_) \
- lru_slot_evict((struct lru_hash_base *)(table), (pos_), lru_slot_offset(table))
+#define lru_capacity(table) lru_capacity_impl(&(table)->lru)
+
+
+/** @brief Round the value up to a multiple of (1 << power). */
+static inline uint round_power(uint size, uint power)
+{
+ uint res = ((size - 1) & ~((1 << power) - 1)) + (1 << power);
+ assert(__builtin_ctz(res) >= power);
+ assert(size <= res && res < size + (1 << power));
+ return res;
+}
+
+
+/* ======================== Inlined part of implementation ======================== */
+/** @cond internal */
+
+#define lru_apply_fun_g(name, val_type) \
+ enum lru_apply_do (*(name))(const char *key, uint len, val_type *val, void *baton)
+typedef lru_apply_fun_g(lru_apply_fun, void);
+
+#if __GNUC__ >= 4
+ #define CACHE_ALIGNED __attribute__((aligned(64)))
+#else
+ #define CACHE_ALIGNED
+#endif
+
+struct lru;
+void lru_free_items_impl(struct lru *lru);
+struct lru * lru_create_impl(uint max_slots, knot_mm_t *mm_array, knot_mm_t *mm);
+void * lru_get_impl(struct lru *lru, const char *key, uint key_len,
+ uint val_len, bool do_insert);
+void lru_apply_impl(struct lru *lru, lru_apply_fun f, void *baton);
+
+struct lru_item;
+
+#if SIZE_MAX > (1 << 32)
+ /** @internal The number of keys stored within each group. */
+ #define LRU_ASSOC 3
+#else
+ #define LRU_ASSOC 4
+#endif
+/** @internal The number of hashes tracked within each group: 10-1 or 12-1. */
+#define LRU_TRACKED ((64 - sizeof(size_t) * LRU_ASSOC) / 4 - 1)
+
+struct lru_group {
+ uint16_t counts[LRU_TRACKED+1]; /*!< Occurence counters; the last one is special. */
+ uint16_t hashes[LRU_TRACKED+1]; /*!< Top halves of hashes; the last one is unused. */
+ struct lru_item *items[LRU_ASSOC]; /*!< The full items. */
+} CACHE_ALIGNED;
+
+/* The sizes are chosen so lru_group just fits into a single x86 cache line. */
+static_assert(64 == sizeof(struct lru_group)
+ && 64 == LRU_ASSOC * sizeof(void*) + (LRU_TRACKED+1) * 4,
+ "bad sizing for your sizeof(void*)");
+
+struct lru {
+ struct knot_mm *mm, /**< Memory context to use for keys. */
+ *mm_array; /**< Memory context to use for this structure itself. */
+ uint log_groups; /**< Logarithm of the number of LRU groups. */
+ struct lru_group groups[] CACHE_ALIGNED; /**< The groups of items. */
+};
+
+/** @internal See lru_free. */
+static inline void lru_free_impl(struct lru *lru)
+{
+ if (!lru)
+ return;
+ lru_free_items_impl(lru);
+ mm_free(lru->mm_array, lru);
+}
+
+/** @internal See lru_reset. */
+static inline void lru_reset_impl(struct lru *lru)
+{
+ lru_free_items_impl(lru);
+ memset(lru->groups, 0, sizeof(lru->groups[0]) * (1 << lru->log_groups));
+}
+
+/** @internal See lru_capacity. */
+static inline uint lru_capacity_impl(struct lru *lru)
+{
+ assert(lru);
+ return (1 << lru->log_groups) * LRU_ASSOC;
+}
+
+/** @endcond */
-/** @} */
diff --git a/lib/lib.mk b/lib/lib.mk
index 01857b8e..b8569a3d 100644
--- a/lib/lib.mk
+++ b/lib/lib.mk
@@ -1,4 +1,5 @@
libkres_SOURCES := \
+ lib/generic/lru.c \
lib/generic/map.c \
lib/layer/iterate.c \
lib/layer/validate.c \
@@ -20,6 +21,7 @@ libkres_SOURCES := \
libkres_HEADERS := \
lib/generic/array.h \
+ lib/generic/lru.h \
lib/generic/map.h \
lib/generic/set.h \
lib/layer.h \
diff --git a/lib/nsrep.c b/lib/nsrep.c
index 6b2df27b..cc737f23 100644
--- a/lib/nsrep.c
+++ b/lib/nsrep.c
@@ -98,7 +98,7 @@ static unsigned eval_addr_set(pack_t *addr_set, kr_nsrep_lru_t *rttcache, unsign
}
/* Get RTT for this address (if known) */
if (is_valid) {
- unsigned *cached = rttcache ? lru_get(rttcache, val, len) : NULL;
+ unsigned *cached = rttcache ? lru_get_try(rttcache, val, len) : NULL;
unsigned addr_score = (cached) ? *cached : KR_NS_GLUED;
if (addr_score < score + favour) {
/* Shake down previous contenders */
@@ -124,7 +124,8 @@ static int eval_nsrep(const char *k, void *v, void *baton)
/* Fetch NS reputation */
if (ctx->cache_rep) {
- unsigned *cached = lru_get(ctx->cache_rep, k, knot_dname_size((const uint8_t *)k));
+ unsigned *cached = lru_get_try(ctx->cache_rep, k,
+ knot_dname_size((const uint8_t *)k));
if (cached) {
reputation = *cached;
}
@@ -188,7 +189,9 @@ int kr_nsrep_set(struct kr_query *qry, size_t index, uint8_t *addr, size_t addr_
/* Retrieve RTT from cache */
if (addr && addr_len > 0) {
struct kr_context *ctx = qry->ns.ctx;
- unsigned *score = ctx ? lru_get(ctx->cache_rtt, (const char *)addr, addr_len) : NULL;
+ unsigned *score = ctx
+ ? lru_get_try(ctx->cache_rtt, (const char *)addr, addr_len)
+ : NULL;
if (score) {
qry->ns.score = MIN(qry->ns.score, *score);
}
@@ -255,7 +258,7 @@ int kr_nsrep_update_rtt(struct kr_nsrep *ns, const struct sockaddr *addr,
addr_len = sizeof(struct in6_addr);
}
}
- unsigned *cur = lru_set(cache, addr_in, addr_len);
+ unsigned *cur = lru_get_new(cache, addr_in, addr_len);
if (!cur) {
return kr_error(ENOMEM);
}
@@ -290,7 +293,7 @@ int kr_nsrep_update_rep(struct kr_nsrep *ns, unsigned reputation, kr_nsrep_lru_t
/* Store in the struct */
ns->reputation = reputation;
/* Store reputation in the LRU cache */
- unsigned *cur = lru_set(cache, (const char *)ns->name, knot_dname_size(ns->name));
+ unsigned *cur = lru_get_new(cache, (const char *)ns->name, knot_dname_size(ns->name));
if (!cur) {
return kr_error(ENOMEM);
}
diff --git a/lib/nsrep.h b/lib/nsrep.h
index a52b382e..73d93c2b 100644
--- a/lib/nsrep.h
+++ b/lib/nsrep.h
@@ -62,7 +62,7 @@ enum kr_ns_update_mode {
/**
* NS reputation/QoS tracking.
*/
-typedef lru_hash(unsigned) kr_nsrep_lru_t;
+typedef lru_t(unsigned) kr_nsrep_lru_t;
/* Maximum count of addresses probed in one go (last is left empty) */
#define KR_NSREP_MAXADDR 4
diff --git a/lib/utils.h b/lib/utils.h
index 0eab195c..83c57677 100644
--- a/lib/utils.h
+++ b/lib/utils.h
@@ -16,6 +16,7 @@
#pragma once
+#include <assert.h>
#include <stdio.h>
#include <stdbool.h>
#include <sys/time.h>
@@ -44,6 +45,11 @@ KR_EXPORT void kr_log_debug(const char *fmt, ...);
#define WITH_DEBUG if(0)
#endif
+/* C11 compatibility, but without any implementation so far. */
+#ifndef static_assert
+#define static_assert(cond, msg)
+#endif
+
/** @cond Memory alloc routines */
static inline void *mm_alloc(knot_mm_t *mm, size_t size)
{
diff --git a/lib/zonecut.c b/lib/zonecut.c
index 610327dd..2ec4d684 100644
--- a/lib/zonecut.c
+++ b/lib/zonecut.c
@@ -341,7 +341,8 @@ static int fetch_ns(struct kr_context *ctx, struct kr_zonecut *cut, const knot_d
const knot_dname_t *ns_name = knot_ns_name(&rr_copy.rrs, i);
kr_zonecut_add(cut, ns_name, NULL);
/* Fetch NS reputation and decide whether to prefetch A/AAAA records. */
- unsigned *cached = lru_get(ctx->cache_rep, (const char *)ns_name, knot_dname_size(ns_name));
+ unsigned *cached = lru_get_try(ctx->cache_rep,
+ (const char *)ns_name, knot_dname_size(ns_name));
unsigned reputation = (cached) ? *cached : 0;
if (!(reputation & KR_NS_NOIP4) && !(ctx->options & QUERY_NO_IPV4)) {
fetch_addr(cut, &ctx->cache, ns_name, KNOT_RRTYPE_A, timestamp);
diff --git a/modules/stats/stats.c b/modules/stats/stats.c
index d564e8a3..97a38290 100644
--- a/modules/stats/stats.c
+++ b/modules/stats/stats.c
@@ -76,7 +76,7 @@ static struct const_metric_elm const_metrics[] = {
/** @endcond */
/** @internal LRU hash of most frequent names. */
-typedef lru_hash(unsigned) namehash_t;
+typedef lru_t(unsigned) namehash_t;
typedef array_t(struct sockaddr_in6) addrlist_t;
/** @internal Stats data structure. */
@@ -142,12 +142,12 @@ static void collect_sample(struct stat_data *data, struct kr_rplan *rplan, knot_
}
int key_len = collect_key(key, qry->sname, qry->stype);
if (qry->flags & QUERY_EXPIRING) {
- unsigned *count = lru_set(data->queries.expiring, key, key_len);
+ unsigned *count = lru_get_new(data->queries.expiring, key, key_len);
if (count)
*count += 1;
/* Consider 1 in N for frequent sampling. */
} else if (kr_rand_uint(FREQUENT_PSAMPLE) <= 1) {
- unsigned *count = lru_set(data->queries.frequent, key, key_len);
+ unsigned *count = lru_get_new(data->queries.frequent, key, key_len);
if (count)
*count += 1;
}
@@ -325,6 +325,23 @@ static char* stats_list(void *env, struct kr_module *module, const char *args)
return ret;
}
+/** @internal Helper for dump_list: add a single namehash_t item to JSON. */
+static enum lru_apply_do dump_value(const char *key, uint len, unsigned *val, void *baton)
+{
+ uint16_t key_type = 0;
+ char key_name[KNOT_DNAME_MAXLEN], type_str[16];
+ /* Extract query name, type and counter */
+ memcpy(&key_type, key, sizeof(key_type));
+ knot_dname_to_str(key_name, (uint8_t *)key + sizeof(key_type), sizeof(key_name));
+ knot_rrtype_to_string(key_type, type_str, sizeof(type_str));
+ /* Convert to JSON object */
+ JsonNode *json_val = json_mkobject();
+ json_append_member(json_val, "count", json_mknumber(*val));
+ json_append_member(json_val, "name", json_mkstring(key_name));
+ json_append_member(json_val, "type", json_mkstring(type_str));
+ json_append_element((JsonNode *)baton, json_val);
+ return LRU_APPLY_DO_NOTHING; // keep the item
+}
/**
* List frequent names.
*
@@ -335,25 +352,8 @@ static char* dump_list(void *env, struct kr_module *module, const char *args, na
if (!table) {
return NULL;
}
- uint16_t key_type = 0;
- char key_name[KNOT_DNAME_MAXLEN], type_str[16];
JsonNode *root = json_mkarray();
- for (unsigned i = 0; i < table->size; ++i) {
- struct lru_slot *slot = lru_slot_at((struct lru_hash_base *)table, i);
- if (slot->key) {
- /* Extract query name, type and counter */
- memcpy(&key_type, slot->key, sizeof(key_type));
- knot_dname_to_str(key_name, (uint8_t *)slot->key + sizeof(key_type), sizeof(key_name));
- knot_rrtype_to_string(key_type, type_str, sizeof(type_str));
- unsigned *slot_val = lru_slot_val(slot, lru_slot_offset(table));
- /* Convert to JSON object */
- JsonNode *json_val = json_mkobject();
- json_append_member(json_val, "count", json_mknumber(*slot_val));
- json_append_member(json_val, "name", json_mkstring(key_name));
- json_append_member(json_val, "type", json_mkstring(type_str));
- json_append_element(root, json_val);
- }
- }
+ lru_apply(table, dump_value, root);
char *ret = json_encode(root);
json_delete(root);
return ret;
@@ -368,8 +368,7 @@ static char* dump_frequent(void *env, struct kr_module *module, const char *args
static char* clear_frequent(void *env, struct kr_module *module, const char *args)
{
struct stat_data *data = module->data;
- lru_deinit(data->queries.frequent);
- lru_init(data->queries.frequent, FREQUENT_COUNT);
+ lru_reset(data->queries.frequent);
return NULL;
}
@@ -382,8 +381,7 @@ static char* dump_expiring(void *env, struct kr_module *module, const char *args
static char* clear_expiring(void *env, struct kr_module *module, const char *args)
{
struct stat_data *data = module->data;
- lru_deinit(data->queries.expiring);
- lru_init(data->queries.expiring, FREQUENT_COUNT);
+ lru_reset(data->queries.expiring);
return NULL;
}
@@ -450,14 +448,8 @@ int stats_init(struct kr_module *module)
memset(data, 0, sizeof(*data));
data->map = map_make();
module->data = data;
- data->queries.frequent = malloc(lru_size(namehash_t, FREQUENT_COUNT));
- if (data->queries.frequent) {
- lru_init(data->queries.frequent, FREQUENT_COUNT);
- }
- data->queries.expiring = malloc(lru_size(namehash_t, FREQUENT_COUNT));
- if (data->queries.expiring) {
- lru_init(data->queries.expiring, FREQUENT_COUNT);
- }
+ lru_create(&data->queries.frequent, FREQUENT_COUNT, NULL, NULL);
+ lru_create(&data->queries.expiring, FREQUENT_COUNT, NULL, NULL);
/* Initialize ring buffer of recently visited upstreams */
array_init(data->upstreams.q);
if (array_reserve(data->upstreams.q, UPSTREAMS_COUNT) != 0) {
@@ -476,10 +468,8 @@ int stats_deinit(struct kr_module *module)
struct stat_data *data = module->data;
if (data) {
map_clear(&data->map);
- lru_deinit(data->queries.frequent);
- lru_deinit(data->queries.expiring);
- free(data->queries.frequent);
- free(data->queries.expiring);
+ lru_free(data->queries.frequent);
+ lru_free(data->queries.expiring);
array_clear(data->upstreams.q);
free(data);
}
diff --git a/tests/test_lru.c b/tests/test_lru.c
index 82f82e12..ff8c7649 100644
--- a/tests/test_lru.c
+++ b/tests/test_lru.c
@@ -22,7 +22,7 @@
#include "tests/test.h"
#include "lib/generic/lru.h"
-typedef lru_hash(int) lru_int_t;
+typedef lru_t(int) lru_int_t;
#define HASH_SIZE 1024
#define KEY_LEN(x) (strlen(x) + 1)
@@ -61,10 +61,10 @@ static void test_insert(void **state)
int i;
for (i = 0; i < dict_size; i++) {
- int *data = lru_set(lru, dict[i], KEY_LEN(dict[i]));
+ int *data = lru_get_new(lru, dict[i], KEY_LEN(dict[i]));
assert_non_null(data);
*data = i;
- assert_true(*lru_get(lru, dict[i], KEY_LEN(dict[i])) == i);
+ assert_true(*lru_get_try(lru, dict[i], KEY_LEN(dict[i])) == i);
}
}
@@ -72,7 +72,7 @@ static void test_missing(void **state)
{
lru_int_t *lru = *state;
const char *notin = "not in lru";
- assert_true(lru_get(lru, notin, KEY_LEN(notin)) == NULL);
+ assert_true(lru_get_try(lru, notin, KEY_LEN(notin)) == NULL);
}
static void test_eviction(void **state)
@@ -81,12 +81,12 @@ static void test_eviction(void **state)
char key[16];
for (unsigned i = 0; i < HASH_SIZE; ++i) {
test_randstr(key, sizeof(key));
- int *data = lru_set(lru, key, sizeof(key));
+ int *data = lru_get_new(lru, key, sizeof(key));
if (!data) {
assert_true(0);
}
*data = i;
- if (*lru_get(lru, key, sizeof(key)) != i) {
+ if (*lru_get_try(lru, key, sizeof(key)) != i) {
assert_true(0);
}
}
@@ -94,18 +94,16 @@ static void test_eviction(void **state)
static void test_init(void **state)
{
- lru_int_t *lru = malloc(lru_size(lru_int_t, HASH_SIZE));
+ lru_int_t *lru;
+ lru_create(&lru, HASH_SIZE, NULL, NULL);
assert_non_null(lru);
- lru_init(lru, HASH_SIZE);
- assert_int_equal(lru->size, HASH_SIZE);
*state = lru;
}
static void test_deinit(void **state)
{
lru_int_t *lru = *state;
- lru_deinit(lru);
- free(lru);
+ lru_free(lru);
}
/* Program entry point */