summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMarek Vavruša <marek.vavrusa@nic.cz>2015-07-19 00:53:25 +0200
committerMarek Vavruša <marek.vavrusa@nic.cz>2015-07-19 01:23:53 +0200
commit87bad998218d4d09eaf30840f3077c0a3e6997bd (patch)
treef15967cd7c2de5a38bedc4089b6d36565d7382f8
parentmodules/prefetch: frequent sampling, fetch iteration (diff)
downloadknot-resolver-87bad998218d4d09eaf30840f3077c0a3e6997bd.tar.xz
knot-resolver-87bad998218d4d09eaf30840f3077c0a3e6997bd.zip
modules/predict: cleaned up implementation, doc
-rw-r--r--doc/modules.rst2
-rw-r--r--modules/modules.mk2
-rw-r--r--modules/predict/README.rst48
-rw-r--r--modules/predict/predict.lua166
-rw-r--r--modules/predict/predict.mk2
-rw-r--r--modules/prefetch/README.rst9
-rw-r--r--modules/prefetch/prefetch.lua147
-rw-r--r--modules/prefetch/prefetch.mk2
-rw-r--r--modules/stats/README.rst21
9 files changed, 233 insertions, 166 deletions
diff --git a/doc/modules.rst b/doc/modules.rst
index 5f6f2bb7..b91583fe 100644
--- a/doc/modules.rst
+++ b/doc/modules.rst
@@ -12,7 +12,7 @@ Implemented modules
.. include:: ../modules/hints/README.rst
.. include:: ../modules/block/README.rst
.. include:: ../modules/stats/README.rst
-.. include:: ../modules/prefetch/README.rst
+.. include:: ../modules/predict/README.rst
.. include:: ../modules/cachectl/README.rst
.. include:: ../modules/graphite/README.rst
.. include:: ../modules/ketcd/README.rst
diff --git a/modules/modules.mk b/modules/modules.mk
index 583e842f..de98b7b9 100644
--- a/modules/modules.mk
+++ b/modules/modules.mk
@@ -17,7 +17,7 @@ ifeq ($(HAS_lua),yes)
modules_TARGETS += ketcd \
graphite \
block \
- prefetch
+ predict
endif
# List of Golang modules
diff --git a/modules/predict/README.rst b/modules/predict/README.rst
new file mode 100644
index 00000000..e24598e8
--- /dev/null
+++ b/modules/predict/README.rst
@@ -0,0 +1,48 @@
+.. _mod-predict:
+
+Prefetching records
+-------------------
+
+The module tracks expiring records (having less than 5% of original TTL) and batches them for predict.
+This improves latency for frequently used records, as they are fetched in advance.
+
+It is also able to learn usage patterns and repetitive queries that the server makes. For example, if
+it makes a query every day at 18:00, the resolver expects that it is needed by that time and prefetches it
+ahead of time. This is helpful to minimize the perceived latency and keeps the cache hot.
+
+.. tip:: The tracking window and period length determine memory requirements. If you have a server with relatively fast query turnover, keep the period low (hour for start) and shorter tracking window (5 minutes). For personal slower resolver, keep the tracking window longer (i.e. 30 minutes) and period longer (a day), as the habitual queries occur daily. Experiment to get the best results.
+
+Example configuration
+^^^^^^^^^^^^^^^^^^^^^
+
+.. warning:: This module requires 'stats' module to be present and loaded.
+
+.. code-block:: lua
+
+ modules = {
+ predict = {
+ window = 15, -- 15 minutes sampling window
+ period = 6*(60/15) -- track last 6 hours
+ }
+ }
+
+Defaults are 15 minutes window, 6 hours period.
+
+Exported metrics
+^^^^^^^^^^^^^^^^
+
+To visualize the efficiency of the predictions, the module exports following statistics.
+
+* ``predict.epoch`` - current prediction epoch (based on time of day and sampling window)
+* ``predict.queue`` - number of queued queries in current window
+* ``predict.learned`` - number of learned queries in current window
+
+
+Properties
+^^^^^^^^^^
+
+.. function:: predict.config({ window = 15, period = 24})
+
+ Reconfigure the predictor to given tracking window and period length. Both parameters are optional.
+ Window length is in minutes, period is a number of windows that can be kept in memory.
+ e.g. if a ``window`` is 15 minutes, a ``period`` of "24" means 6 hours. \ No newline at end of file
diff --git a/modules/predict/predict.lua b/modules/predict/predict.lua
new file mode 100644
index 00000000..4f674da0
--- /dev/null
+++ b/modules/predict/predict.lua
@@ -0,0 +1,166 @@
+-- Speculative prefetching for repetitive and soon-expiring records to reduce latency.
+-- @module predict
+-- @field queue table of scheduled records
+-- @field queue_max maximum length of the queue
+-- @field window length of the coalescing window
+local predict = {
+ queue = {},
+ queue_len = 0,
+ batch = 0,
+ epoch = 0,
+ period = 24,
+ window = 15,
+ log = {},
+}
+
+-- Calculate current epoch (which window fits current time)
+local function current_epoch()
+ return (os.date('%H')*(60/predict.window) +
+ math.floor(os.date('%M')/predict.window)) % predict.period + 1
+end
+
+-- Calculate next sample with jitter [1-2/5 of window]
+local function next_event()
+ local jitter = (predict.window * minute) / 5;
+ return math.random(jitter, 2 * jitter)
+end
+
+-- Resolve queued records and flush the queue
+function predict.drain(ev)
+ local deleted = 0
+ for key, val in pairs(predict.queue) do
+ worker.resolve(string.sub(key, 2), string.byte(key), 1, kres.query.NO_EXPIRING)
+ predict.queue[key] = nil
+ deleted = deleted + 1
+ if deleted >= predict.batch then
+ break
+ end
+ end
+ predict.ev_drain = nil
+ if deleted > 0 then
+ predict.ev_drain = event.after((predict.window * 3) * sec, predict.drain)
+ end
+ predict.queue_len = predict.queue_len - deleted
+ stats['predict.queue'] = predict.queue_len
+ collectgarbage()
+ return 0
+end
+
+-- Enqueue queries from set
+local function enqueue(queries)
+ local queued = 0
+ local nr_queries = #queries
+ for i = 1, nr_queries do
+ local entry = queries[i]
+ local key = string.char(entry.type)..entry.name
+ if not predict.queue[key] then
+ predict.queue[key] = 1
+ queued = queued + 1
+ end
+ end
+ return queued
+end
+
+-- Prefetch soon-to-expire records
+function predict.prefetch()
+ local queries = stats.expiring()
+ stats.clear_expiring()
+ return enqueue(queries)
+end
+
+-- Sample current epoch, return number of sampled queries
+function predict.sample(epoch_now)
+ if not epoch_now then return end
+ local queries = stats.frequent()
+ stats.clear_frequent()
+ local queued = 0
+ local current = predict.log[epoch_now]
+ if predict.epoch ~= epoch_now or current == nil then
+ if current ~= nil then
+ queued = enqueue(current)
+ end
+ current = {}
+ end
+ local nr_samples = #queries
+ for i = 1, nr_samples do
+ local entry = queries[i]
+ local key = string.char(entry.type)..entry.name
+ current[key] = entry.count
+ end
+ predict.log[epoch_now] = current
+ return nr_samples, queued
+end
+
+-- Predict queries for the upcoming epoch
+local function generate(epoch_now)
+ local queued = 0
+ local period = predict.period + 1
+ for i = 1, predict.period / 2 - 1 do
+ local current = predict.log[(epoch_now - i) % period]
+ local past = predict.log[(epoch_now - 2*i) % period]
+ if current and past then
+ for k, v in pairs(current) do
+ if past[k] ~= nil and not predict.queue[k] then
+ queued = queued + 1
+ predict.queue[k] = 1
+ end
+ end
+ end
+ end
+ return queued
+end
+
+-- Process current epoch
+function predict.process(ev)
+ -- Start a new epoch, or continue sampling
+ predict.ev_sample = nil
+ local epoch_now = current_epoch()
+ local nr_learned, nr_queued = predict.sample(epoch_now)
+ -- End of epoch, predict next
+ if predict.epoch ~= epoch_now then
+ predict.epoch = epoch_now
+ nr_queued = nr_queued + generate(epoch_now)
+ end
+ -- Prefetch expiring records
+ nr_queued = nr_queued + predict.prefetch()
+ -- Dispatch predicted queries
+ if nr_queued > 0 then
+ predict.queue_len = predict.queue_len + nr_queued
+ predict.batch = predict.queue_len / 5
+ if not predict.ev_drain then
+ predict.ev_drain = event.after(0, predict.drain)
+ end
+ end
+ predict.ev_sample = event.after(next_event(), predict.process)
+ stats['predict.epoch'] = epoch_now
+ stats['predict.queue'] = predict.queue_len
+ stats['predict.learned'] = nr_learned
+ collectgarbage()
+end
+
+function predict.init(module)
+ predict.epoch = current_epoch()
+ predict.ev_sample = event.after(next_event(), predict.process)
+end
+
+function predict.deinit(module)
+ if predict.ev_sample then event.cancel(predict.ev_sample) end
+ if predict.ev_drain then event.cancel(predict.ev_drain) end
+ predict.log = {}
+ collectgarbage()
+end
+
+function predict.config(config)
+ if not stats then
+ error("'stats' module required")
+ end
+ -- Reconfigure
+ if type(config) ~= 'table' then return end
+ if config.window then predict.window = config.window end
+ if config.period then predict.period = config.period end
+ -- Reinitialize to reset timers
+ predict.deinit()
+ predict.init()
+end
+
+return predict
diff --git a/modules/predict/predict.mk b/modules/predict/predict.mk
new file mode 100644
index 00000000..51a1a6bd
--- /dev/null
+++ b/modules/predict/predict.mk
@@ -0,0 +1,2 @@
+predict_SOURCES := predict.lua
+$(call make_lua_module,predict)
diff --git a/modules/prefetch/README.rst b/modules/prefetch/README.rst
deleted file mode 100644
index 8d561902..00000000
--- a/modules/prefetch/README.rst
+++ /dev/null
@@ -1,9 +0,0 @@
-.. _mod-prefetch:
-
-Prefetching records
--------------------
-
-The module tracks expiring records (having less than 5% of original TTL) and batches them for prefetch.
-This improves latency for frequently used records, as they are fetched in advance.
-
-.. todo:: Learn usage patterns from browser history, track usage pattern over time. \ No newline at end of file
diff --git a/modules/prefetch/prefetch.lua b/modules/prefetch/prefetch.lua
deleted file mode 100644
index bc187158..00000000
--- a/modules/prefetch/prefetch.lua
+++ /dev/null
@@ -1,147 +0,0 @@
--- Speculative prefetching for repetitive and soon-expiring records to reduce latency.
--- @module prefetch
--- @field queue table of scheduled records
--- @field queue_max maximum length of the queue
--- @field window length of the coalescing window
-local prefetch = {
- queue = {},
- queue_len = 0,
- batch = 0,
- epoch = 0,
- period = 24,
- window = 10,
- sample = 0,
- log = {}
-}
-
--- Calculate current epoch (which window fits current time)
-local function current_epoch()
- return (os.date('%H')*(60/prefetch.window) + math.floor(os.date('%M')/prefetch.window)) % prefetch.period + 1
-end
-
--- Calculate next sample with jitter [1/5 +20% of window]
-local function next_event()
- local jitter = (prefetch.window * minute) / 5;
- return math.random(jitter, 1.2 * jitter)
-end
-
--- Resolve queued records and flush the queue
-function prefetch.drain(ev)
- local deleted = 0
- for key, val in pairs(prefetch.queue) do
- worker.resolve(string.sub(key, 2), string.byte(key))
- prefetch.queue[key] = nil
- deleted = deleted + 1
- if deleted >= prefetch.batch then
- break
- end
- end
- if deleted > 0 then
- event.after((prefetch.window * 6) * sec, prefetch.drain)
- end
- prefetch.queue_len = prefetch.queue_len - deleted
- stats['predict.queue'] = prefetch.queue_len
- collectgarbage()
- return 0
-end
-
--- Enqueue queries from set
-local function enqueue(queries)
- local queued = 0
- local nr_queries = #queries
- for i = 1, nr_queries do
- local entry = queries[i]
- local key = string.char(entry.type)..entry.name
- if not prefetch.queue[key] then
- prefetch.queue[key] = 1
- queued = queued + 1
- end
- end
- return queued
-end
-
--- Prefetch soon-to-expire records
-local function refresh()
- local queries = stats.expiring()
- stats.clear_expiring()
- return enqueue(queries)
-end
-
--- Sample current epoch, return number of sampled queries
-local function sample(epoch_now)
- local queries = stats.frequent()
- stats.clear_frequent()
- local queued = 0
- local current = prefetch.log[epoch_now]
- if prefetch.epoch ~= epoch_now or current == nil then
- if current ~= nil then
- queued = enqueue(current)
- end
- current = {}
- end
- local nr_samples = #queries
- for i = 1, nr_samples do
- local entry = queries[i]
- local key = string.char(entry.type)..entry.name
- current[key] = entry.count
- end
- prefetch.log[epoch_now] = current
- prefetch.sample = prefetch.sample + 1
- return nr_samples, queued
-end
-
--- Sample current epoch, return number of sampled queries
-local function predict(epoch_now)
- local queued = 0
- local period = prefetch.period + 1
- for i = 1, prefetch.period / 2 - 1 do
- local current = prefetch.log[(epoch_now - i) % period]
- local past = prefetch.log[(epoch_now - 2*i) % period]
- if current and past then
- for k, v in pairs(current) do
- if past[k] ~= nil and not prefetch.queue[k] then
- queued = queued + 1
- prefetch.queue[k] = 1
- end
- end
- end
- end
- return queued
-end
-
--- Process current epoch
-function prefetch.process(ev)
- -- Start a new epoch, or continue sampling
- local epoch_now = current_epoch()
- local nr_learned, nr_queued = sample(epoch_now)
- -- End of epoch, predict next
- if prefetch.epoch ~= epoch_now then
- prefetch.epoch = epoch_now
- prefetch.sample = 0
- nr_queued = nr_queued + predict(epoch_now)
- end
- -- Prefetch expiring records
- nr_queued = nr_queued + refresh()
- -- Dispatch prefetch requests
- if nr_queued > 0 then
- prefetch.queue_len = prefetch.queue_len + nr_queued
- prefetch.batch = prefetch.queue_len / 5
- event.after(0, prefetch.drain)
- end
- event.after(next_event(), prefetch.process)
- stats['predict.epoch'] = epoch_now
- stats['predict.queue'] = prefetch.queue_len
- stats['predict.learned'] = nr_learned
- collectgarbage()
-end
-
-function prefetch.init(module)
- prefetch.epoch = current_epoch()
- event.after(next_event(), prefetch.process)
-end
-
-function prefetch.deinit(module)
- if prefetch.ev then event.cancel(prefetch.ev) end
-end
-
-return prefetch
diff --git a/modules/prefetch/prefetch.mk b/modules/prefetch/prefetch.mk
deleted file mode 100644
index a4867745..00000000
--- a/modules/prefetch/prefetch.mk
+++ /dev/null
@@ -1,2 +0,0 @@
-prefetch_SOURCES := prefetch.lua
-$(call make_lua_module,prefetch)
diff --git a/modules/stats/README.rst b/modules/stats/README.rst
index f36ecbc8..04fb1061 100644
--- a/modules/stats/README.rst
+++ b/modules/stats/README.rst
@@ -29,7 +29,7 @@ in new ones.
5
-- Fetch most common queries
- > stats.queries()
+ > stats.frequent()
[1] => {
[type] => 2
[count] => 4
@@ -37,7 +37,7 @@ in new ones.
}
-- Fetch most common queries (sorted by frequency)
- > table.sort(stats.queries(), function (a, b) return a.count > b.count end)
+ > table.sort(stats.frequent(), function (a, b) return a.count > b.count end)
Properties
^^^^^^^^^^
@@ -62,19 +62,28 @@ Set nominal value of given metric.
Outputs collected metrics as a JSON dictionary.
-.. function:: stats.queries()
+.. function:: stats.frequent()
Outputs list of most frequent iterative queries as a JSON array. The queries are sampled probabilistically,
-and include subrequests. The list maximum size is 1000 entries, make diffs if you want to track it over time.
+and include subrequests. The list maximum size is 5000 entries, make diffs if you want to track it over time.
-.. function:: stats.queries_clear()
+.. function:: stats.clear_frequent()
Clear the list of most frequent iterative queries.
+.. function:: stats.expiring()
+
+Outputs list of soon-to-expire records as a JSON array.
+The list maximum size is 5000 entries, make diffs if you want to track it over time.
+
+.. function:: stats.clear_expiring()
+
+Clear the list of soon expiring records.
+
Built-in statistics
^^^^^^^^^^^^^^^^^^^
-* ``answer.total`` - total number of answerered queries
+* ``answer.total`` - total number of answered queries
* ``answer.cached`` - number of queries answered from cache
* ``answer.noerror`` - number of **NOERROR** answers
* ``answer.nxdomain`` - number of **NXDOMAIN** answers