summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVladimír Čunát <vladimir.cunat@nic.cz>2024-09-30 15:34:11 +0200
committerVladimír Čunát <vladimir.cunat@nic.cz>2024-09-30 15:34:11 +0200
commit3b815e8f6989d64ce1facaa24dd0f94c585b819d (patch)
tree48943e52d37bdb89b313dc6ba5320eb7c19ef140
parentfixup! defer: add request and idle timeouts, limit on waiting queries (diff)
parentMerge branch 'python-constants-module' into 'master' (diff)
downloadknot-resolver-3b815e8f6989d64ce1facaa24dd0f94c585b819d.tar.xz
knot-resolver-3b815e8f6989d64ce1facaa24dd0f94c585b819d.zip
Merge branch 'master' into rrl-wip
-rw-r--r--.gitignore12
-rw-r--r--.gitlab-ci.manager.yml (renamed from manager/.gitlab-ci.yml)9
-rw-r--r--.gitlab-ci.yml17
-rw-r--r--.python-version5
-rw-r--r--ARCHITECTURE.md (renamed from manager/ARCHITECTURE.md)0
-rw-r--r--Dockerfile2
-rw-r--r--ERROR_HANDLING.md (renamed from manager/ERROR_HANDLING.md)0
-rw-r--r--NEWS27
-rw-r--r--README.md64
-rw-r--r--bench/bench_lru.c14
-rw-r--r--bench/meson.build2
-rw-r--r--build_c_extensions.py (renamed from manager/build_c_extensions.py)4
-rwxr-xr-xci/gh_actions.py2
-rw-r--r--ci/images/manager/Dockerfile21
-rwxr-xr-xci/respdiff/run-respdiff-tests.sh2
-rw-r--r--daemon/engine.h1
-rw-r--r--daemon/io.c2
-rw-r--r--daemon/layered-protocols.rst445
-rw-r--r--daemon/lua/kres-gen-33.lua6
-rwxr-xr-xdaemon/lua/kres-gen.sh5
-rw-r--r--daemon/lua/kres.lua5
-rw-r--r--daemon/main.c44
-rw-r--r--daemon/network.c1
-rw-r--r--daemon/network.h7
-rw-r--r--daemon/proxyv2.c48
-rw-r--r--daemon/proxyv2.test/kresd_config.j21
-rw-r--r--daemon/session2.c40
-rw-r--r--daemon/session2.h9
-rw-r--r--daemon/tls.c69
-rw-r--r--daemon/tls.h6
-rw-r--r--daemon/tls_session_ticket-srv.c6
-rw-r--r--daemon/worker.c16
-rw-r--r--distro/config/apkg.toml2
-rw-r--r--distro/pkg/arch/PKGBUILD18
-rw-r--r--distro/pkg/deb/knot-resolver6.install2
-rwxr-xr-xdistro/pkg/deb/rules16
-rw-r--r--distro/pkg/rpm/knot-resolver.spec29
-rw-r--r--distro/tests/extra/all/control43
-rw-r--r--doc/_static/config.schema.json1703
-rw-r--r--doc/dev/build.rst43
-rw-r--r--doc/dev/index.rst1
l---------doc/dev/layered-protocols.rst1
-rw-r--r--doc/dev/manager-dev-env.rst41
-rw-r--r--doc/kresd.8.in11
-rw-r--r--doc/meson.build4
-rw-r--r--doc/user/config-local-data.rst7
-rw-r--r--doc/user/gettingstarted-install.rst2
-rw-r--r--etc/config/.gitignore (renamed from manager/etc/knot-resolver/.gitignore)2
-rw-r--r--etc/config/config.dev.yaml (renamed from manager/etc/knot-resolver/config.dev.yaml)14
-rw-r--r--etc/config/config.example.docker.yaml (renamed from manager/etc/knot-resolver/config.example.docker.yaml)0
-rw-r--r--etc/config/config.example.internal.yaml (renamed from manager/etc/knot-resolver/config.example.internal.yaml)0
-rw-r--r--etc/config/config.example.isp.yaml (renamed from manager/etc/knot-resolver/config.example.isp.yaml)0
-rw-r--r--etc/config/config.example.personal.yaml (renamed from manager/etc/knot-resolver/config.example.personal.yaml)0
-rw-r--r--etc/config/config.yaml (renamed from manager/etc/knot-resolver/config.yaml)0
-rw-r--r--lib/README.rst5
-rw-r--r--lib/cache/api.c4
-rw-r--r--lib/cache/peek.c6
-rw-r--r--lib/dnssec.c26
-rw-r--r--lib/dnssec.h32
-rw-r--r--lib/generic/array.h2
-rw-r--r--lib/layer/iterate.c13
-rw-r--r--lib/layer/validate.c164
-rw-r--r--lib/log.h2
-rw-r--r--lib/resolve-produce.c12
-rw-r--r--lib/resolve.c17
-rw-r--r--lib/resolve.h1
-rw-r--r--lib/rules/api.c133
-rw-r--r--lib/rules/api.h3
-rw-r--r--lib/rules/zonefile.c6
-rw-r--r--lib/utils.c4
-rw-r--r--lib/utils.h6
-rw-r--r--manager/.dockerignore8
-rw-r--r--manager/.flake83
-rw-r--r--manager/.gitignore20
-rw-r--r--manager/.python-version5
-rw-r--r--manager/README.md7
-rw-r--r--manager/knot_resolver_manager/__init__.py1
-rw-r--r--manager/knot_resolver_manager/__main__.py15
-rw-r--r--manager/knot_resolver_manager/cli/__init__.py5
-rw-r--r--manager/knot_resolver_manager/cli/__main__.py4
-rw-r--r--manager/knot_resolver_manager/cli/main.py69
-rw-r--r--manager/knot_resolver_manager/compat/__init__.py3
-rw-r--r--manager/knot_resolver_manager/compat/dataclasses.py68
-rw-r--r--manager/knot_resolver_manager/datamodel/__init__.py3
-rw-r--r--manager/knot_resolver_manager/exceptions.py28
-rw-r--r--manager/meson.build37
-rwxr-xr-xmanager/poe22
-rw-r--r--manager/scripts/_env.sh58
-rwxr-xr-xmanager/scripts/docs10
-rwxr-xr-xmanager/scripts/examples11
-rw-r--r--manager/scripts/install.sh11
-rw-r--r--manager/scripts/make-package.sh71
-rwxr-xr-xmanager/scripts/man9
-rwxr-xr-xmanager/scripts/meson-configure11
-rwxr-xr-xmanager/scripts/run44
-rw-r--r--manager/setup.py54
-rw-r--r--manager/tests/README.md9
-rw-r--r--manager/tests/packaging/control41
-rw-r--r--manager/tests/unit/__init__.py5
-rw-r--r--manager/tests/unit/test_knot_resolver_manager.py5
-rw-r--r--manager/tests/unit/utils/test_dataclasses.py15
-rw-r--r--meson.build28
-rw-r--r--modules/dns64/dns64.lua2
-rw-r--r--modules/policy/policy.lua12
-rw-r--r--modules/serve_stale/serve_stale.lua21
-rw-r--r--modules/stats/README.rst2
-rw-r--r--modules/stats/stats.c8
-rwxr-xr-xmodules/ta_update/ta_update.test.integr/rfc5011/dns2rpl.py2
-rwxr-xr-xmodules/ta_update/ta_update.test.integr/rfc5011/genkeyszones.sh2
-rw-r--r--modules/workarounds/workarounds.lua2
-rwxr-xr-xpoe4
-rw-r--r--pyproject.toml (renamed from manager/pyproject.toml)58
-rw-r--r--python/knot_resolver.py.in10
-rw-r--r--python/knot_resolver/__init__.py6
-rw-r--r--python/knot_resolver/client/__init__.py5
-rw-r--r--python/knot_resolver/client/__main__.py4
-rw-r--r--python/knot_resolver/client/client.py (renamed from manager/knot_resolver_manager/cli/kresctl.py)8
-rw-r--r--python/knot_resolver/client/command.py (renamed from manager/knot_resolver_manager/cli/command.py)33
-rw-r--r--python/knot_resolver/client/commands/cache.py (renamed from manager/knot_resolver_manager/cli/cmd/cache.py)55
-rw-r--r--python/knot_resolver/client/commands/completion.py (renamed from manager/knot_resolver_manager/cli/cmd/completion.py)2
-rw-r--r--python/knot_resolver/client/commands/config.py (renamed from manager/knot_resolver_manager/cli/cmd/config.py)10
-rw-r--r--python/knot_resolver/client/commands/convert.py (renamed from manager/knot_resolver_manager/cli/cmd/convert.py)14
-rw-r--r--python/knot_resolver/client/commands/help.py (renamed from manager/knot_resolver_manager/cli/cmd/help.py)2
-rw-r--r--python/knot_resolver/client/commands/metrics.py (renamed from manager/knot_resolver_manager/cli/cmd/metrics.py)6
-rw-r--r--python/knot_resolver/client/commands/reload.py (renamed from manager/knot_resolver_manager/cli/cmd/reload.py)4
-rw-r--r--python/knot_resolver/client/commands/schema.py (renamed from manager/knot_resolver_manager/cli/cmd/schema.py)8
-rw-r--r--python/knot_resolver/client/commands/stop.py (renamed from manager/knot_resolver_manager/cli/cmd/stop.py)4
-rw-r--r--python/knot_resolver/client/commands/validate.py (renamed from manager/knot_resolver_manager/cli/cmd/validate.py)14
-rw-r--r--python/knot_resolver/client/main.py78
-rw-r--r--python/knot_resolver/constants.py19
-rw-r--r--python/knot_resolver/constants.py.in19
-rw-r--r--python/knot_resolver/controller/__init__.py (renamed from manager/knot_resolver_manager/kresd_controller/__init__.py)6
-rw-r--r--python/knot_resolver/controller/exceptions.py19
-rw-r--r--python/knot_resolver/controller/interface.py (renamed from manager/knot_resolver_manager/kresd_controller/interface.py)10
-rw-r--r--python/knot_resolver/controller/registered_workers.py (renamed from manager/knot_resolver_manager/kresd_controller/registered_workers.py)4
-rw-r--r--python/knot_resolver/controller/supervisord/__init__.py (renamed from manager/knot_resolver_manager/kresd_controller/supervisord/__init__.py)18
-rw-r--r--python/knot_resolver/controller/supervisord/config_file.py (renamed from manager/knot_resolver_manager/kresd_controller/supervisord/config_file.py)43
-rw-r--r--python/knot_resolver/controller/supervisord/plugin/fast_rpcinterface.py (renamed from manager/knot_resolver_manager/kresd_controller/supervisord/plugin/fast_rpcinterface.py)0
-rw-r--r--python/knot_resolver/controller/supervisord/plugin/manager_integration.py (renamed from manager/knot_resolver_manager/kresd_controller/supervisord/plugin/manager_integration.py)2
-rw-r--r--python/knot_resolver/controller/supervisord/plugin/notifymodule.c (renamed from manager/knot_resolver_manager/kresd_controller/supervisord/plugin/notifymodule.c)0
-rw-r--r--python/knot_resolver/controller/supervisord/plugin/patch_logger.py (renamed from manager/knot_resolver_manager/kresd_controller/supervisord/plugin/patch_logger.py)3
-rw-r--r--python/knot_resolver/controller/supervisord/plugin/sd_notify.py (renamed from manager/knot_resolver_manager/kresd_controller/supervisord/plugin/sd_notify.py)2
-rw-r--r--python/knot_resolver/controller/supervisord/supervisord.conf.j2 (renamed from manager/knot_resolver_manager/kresd_controller/supervisord/supervisord.conf.j2)8
-rw-r--r--python/knot_resolver/datamodel/__init__.py3
-rw-r--r--python/knot_resolver/datamodel/cache_schema.py (renamed from manager/knot_resolver_manager/datamodel/cache_schema.py)25
-rw-r--r--python/knot_resolver/datamodel/config_schema.py (renamed from manager/knot_resolver_manager/datamodel/config_schema.py)94
-rw-r--r--python/knot_resolver/datamodel/design-notes.yml (renamed from manager/knot_resolver_manager/datamodel/design-notes.yml)0
-rw-r--r--python/knot_resolver/datamodel/dns64_schema.py (renamed from manager/knot_resolver_manager/datamodel/dns64_schema.py)4
-rw-r--r--python/knot_resolver/datamodel/dnssec_schema.py (renamed from manager/knot_resolver_manager/datamodel/dnssec_schema.py)6
-rw-r--r--python/knot_resolver/datamodel/forward_schema.py (renamed from manager/knot_resolver_manager/datamodel/forward_schema.py)10
-rw-r--r--python/knot_resolver/datamodel/globals.py (renamed from manager/knot_resolver_manager/datamodel/globals.py)4
-rw-r--r--python/knot_resolver/datamodel/local_data_schema.py (renamed from manager/knot_resolver_manager/datamodel/local_data_schema.py)18
-rw-r--r--python/knot_resolver/datamodel/logging_schema.py (renamed from manager/knot_resolver_manager/datamodel/logging_schema.py)23
-rw-r--r--python/knot_resolver/datamodel/lua_schema.py (renamed from manager/knot_resolver_manager/datamodel/lua_schema.py)6
-rw-r--r--python/knot_resolver/datamodel/management_schema.py (renamed from manager/knot_resolver_manager/datamodel/management_schema.py)6
-rw-r--r--python/knot_resolver/datamodel/monitoring_schema.py (renamed from manager/knot_resolver_manager/datamodel/monitoring_schema.py)8
-rw-r--r--python/knot_resolver/datamodel/network_schema.py (renamed from manager/knot_resolver_manager/datamodel/network_schema.py)22
-rw-r--r--python/knot_resolver/datamodel/options_schema.py (renamed from manager/knot_resolver_manager/datamodel/options_schema.py)4
-rw-r--r--python/knot_resolver/datamodel/policy_schema.py (renamed from manager/knot_resolver_manager/datamodel/policy_schema.py)8
-rw-r--r--python/knot_resolver/datamodel/rate_limiting_schema.py (renamed from manager/knot_resolver_manager/datamodel/rate_limiting_schema.py)0
-rw-r--r--python/knot_resolver/datamodel/rpz_schema.py (renamed from manager/knot_resolver_manager/datamodel/rpz_schema.py)6
-rw-r--r--python/knot_resolver/datamodel/slice_schema.py (renamed from manager/knot_resolver_manager/datamodel/slice_schema.py)8
-rw-r--r--python/knot_resolver/datamodel/static_hints_schema.py (renamed from manager/knot_resolver_manager/datamodel/static_hints_schema.py)8
-rw-r--r--python/knot_resolver/datamodel/stub_zone_schema.py (renamed from manager/knot_resolver_manager/datamodel/stub_zone_schema.py)4
-rw-r--r--python/knot_resolver/datamodel/templates/__init__.py (renamed from manager/knot_resolver_manager/datamodel/templates/__init__.py)4
-rw-r--r--python/knot_resolver/datamodel/templates/cache.lua.j2 (renamed from manager/knot_resolver_manager/datamodel/templates/cache.lua.j2)0
-rw-r--r--python/knot_resolver/datamodel/templates/dns64.lua.j2 (renamed from manager/knot_resolver_manager/datamodel/templates/dns64.lua.j2)0
-rw-r--r--python/knot_resolver/datamodel/templates/dnssec.lua.j2 (renamed from manager/knot_resolver_manager/datamodel/templates/dnssec.lua.j2)0
-rw-r--r--python/knot_resolver/datamodel/templates/forward.lua.j2 (renamed from manager/knot_resolver_manager/datamodel/templates/forward.lua.j2)0
-rw-r--r--python/knot_resolver/datamodel/templates/local_data.lua.j2 (renamed from manager/knot_resolver_manager/datamodel/templates/local_data.lua.j2)0
-rw-r--r--python/knot_resolver/datamodel/templates/logging.lua.j2 (renamed from manager/knot_resolver_manager/datamodel/templates/logging.lua.j2)0
-rw-r--r--python/knot_resolver/datamodel/templates/macros/cache_macros.lua.j2 (renamed from manager/knot_resolver_manager/datamodel/templates/macros/cache_macros.lua.j2)0
-rw-r--r--python/knot_resolver/datamodel/templates/macros/common_macros.lua.j2 (renamed from manager/knot_resolver_manager/datamodel/templates/macros/common_macros.lua.j2)0
-rw-r--r--python/knot_resolver/datamodel/templates/macros/forward_macros.lua.j2 (renamed from manager/knot_resolver_manager/datamodel/templates/macros/forward_macros.lua.j2)0
-rw-r--r--python/knot_resolver/datamodel/templates/macros/local_data_macros.lua.j2 (renamed from manager/knot_resolver_manager/datamodel/templates/macros/local_data_macros.lua.j2)0
-rw-r--r--python/knot_resolver/datamodel/templates/macros/network_macros.lua.j2 (renamed from manager/knot_resolver_manager/datamodel/templates/macros/network_macros.lua.j2)0
-rw-r--r--python/knot_resolver/datamodel/templates/macros/policy_macros.lua.j2 (renamed from manager/knot_resolver_manager/datamodel/templates/macros/policy_macros.lua.j2)0
-rw-r--r--python/knot_resolver/datamodel/templates/macros/view_macros.lua.j2 (renamed from manager/knot_resolver_manager/datamodel/templates/macros/view_macros.lua.j2)0
-rw-r--r--python/knot_resolver/datamodel/templates/monitoring.lua.j2 (renamed from manager/knot_resolver_manager/datamodel/templates/monitoring.lua.j2)0
-rw-r--r--python/knot_resolver/datamodel/templates/network.lua.j2 (renamed from manager/knot_resolver_manager/datamodel/templates/network.lua.j2)0
-rw-r--r--python/knot_resolver/datamodel/templates/options.lua.j2 (renamed from manager/knot_resolver_manager/datamodel/templates/options.lua.j2)0
-rw-r--r--python/knot_resolver/datamodel/templates/policy-config.lua.j2 (renamed from manager/knot_resolver_manager/datamodel/templates/policy-config.lua.j2)0
-rw-r--r--python/knot_resolver/datamodel/templates/rate_limiting.lua.j2 (renamed from manager/knot_resolver_manager/datamodel/templates/rate_limiting.lua.j2)0
-rw-r--r--python/knot_resolver/datamodel/templates/static_hints.lua.j2 (renamed from manager/knot_resolver_manager/datamodel/templates/static_hints.lua.j2)0
-rw-r--r--python/knot_resolver/datamodel/templates/views.lua.j2 (renamed from manager/knot_resolver_manager/datamodel/templates/views.lua.j2)0
-rw-r--r--python/knot_resolver/datamodel/templates/webmgmt.lua.j2 (renamed from manager/knot_resolver_manager/datamodel/templates/webmgmt.lua.j2)0
-rw-r--r--python/knot_resolver/datamodel/templates/worker-config.lua.j2 (renamed from manager/knot_resolver_manager/datamodel/templates/worker-config.lua.j2)0
-rw-r--r--python/knot_resolver/datamodel/types/__init__.py (renamed from manager/knot_resolver_manager/datamodel/types/__init__.py)5
-rw-r--r--python/knot_resolver/datamodel/types/base_types.py (renamed from manager/knot_resolver_manager/datamodel/types/base_types.py)2
-rw-r--r--python/knot_resolver/datamodel/types/enums.py (renamed from manager/knot_resolver_manager/datamodel/types/enums.py)2
-rw-r--r--python/knot_resolver/datamodel/types/files.py (renamed from manager/knot_resolver_manager/datamodel/types/files.py)102
-rw-r--r--python/knot_resolver/datamodel/types/generic_types.py (renamed from manager/knot_resolver_manager/datamodel/types/generic_types.py)2
-rw-r--r--python/knot_resolver/datamodel/types/types.py (renamed from manager/knot_resolver_manager/datamodel/types/types.py)12
-rw-r--r--python/knot_resolver/datamodel/view_schema.py (renamed from manager/knot_resolver_manager/datamodel/view_schema.py)8
-rw-r--r--python/knot_resolver/datamodel/webmgmt_schema.py (renamed from manager/knot_resolver_manager/datamodel/webmgmt_schema.py)10
-rw-r--r--python/knot_resolver/exceptions.py4
-rw-r--r--python/knot_resolver/manager/__init__.py0
-rw-r--r--python/knot_resolver/manager/__main__.py4
-rw-r--r--python/knot_resolver/manager/config_store.py (renamed from manager/knot_resolver_manager/config_store.py)11
-rw-r--r--python/knot_resolver/manager/constants.py (renamed from manager/knot_resolver_manager/constants.py)43
-rw-r--r--python/knot_resolver/manager/exceptions.py5
-rw-r--r--python/knot_resolver/manager/logging.py (renamed from manager/knot_resolver_manager/log.py)11
-rw-r--r--python/knot_resolver/manager/main.py (renamed from manager/knot_resolver_manager/main.py)23
-rw-r--r--python/knot_resolver/manager/manager.py (renamed from manager/knot_resolver_manager/kres_manager.py)36
-rw-r--r--python/knot_resolver/manager/metrics/__init__.py4
-rw-r--r--python/knot_resolver/manager/metrics/collect.py38
-rw-r--r--python/knot_resolver/manager/metrics/prometheus.py (renamed from manager/knot_resolver_manager/statistics.py)277
-rw-r--r--python/knot_resolver/manager/server.py (renamed from manager/knot_resolver_manager/server.py)78
-rw-r--r--python/knot_resolver/meson.build17
-rw-r--r--python/knot_resolver/utils/__init__.py (renamed from manager/knot_resolver_manager/utils/__init__.py)0
-rw-r--r--python/knot_resolver/utils/async_utils.py (renamed from manager/knot_resolver_manager/utils/async_utils.py)2
-rw-r--r--python/knot_resolver/utils/compat/__init__.py3
-rw-r--r--python/knot_resolver/utils/compat/asyncio.py (renamed from manager/knot_resolver_manager/compat/asyncio.py)0
-rw-r--r--python/knot_resolver/utils/custom_atexit.py (renamed from manager/knot_resolver_manager/utils/custom_atexit.py)0
-rw-r--r--python/knot_resolver/utils/etag.py (renamed from manager/knot_resolver_manager/utils/etag.py)0
-rw-r--r--python/knot_resolver/utils/functional.py (renamed from manager/knot_resolver_manager/utils/functional.py)0
-rw-r--r--python/knot_resolver/utils/modeling/README.md (renamed from manager/knot_resolver_manager/utils/modeling/README.md)0
-rw-r--r--python/knot_resolver/utils/modeling/__init__.py (renamed from manager/knot_resolver_manager/utils/modeling/__init__.py)0
-rw-r--r--python/knot_resolver/utils/modeling/base_generic_type_wrapper.py (renamed from manager/knot_resolver_manager/utils/modeling/base_generic_type_wrapper.py)0
-rw-r--r--python/knot_resolver/utils/modeling/base_schema.py (renamed from manager/knot_resolver_manager/utils/modeling/base_schema.py)30
-rw-r--r--python/knot_resolver/utils/modeling/base_value_type.py (renamed from manager/knot_resolver_manager/utils/modeling/base_value_type.py)0
-rw-r--r--python/knot_resolver/utils/modeling/exceptions.py (renamed from manager/knot_resolver_manager/utils/modeling/exceptions.py)4
-rw-r--r--python/knot_resolver/utils/modeling/json_pointer.py (renamed from manager/knot_resolver_manager/utils/modeling/json_pointer.py)0
-rw-r--r--python/knot_resolver/utils/modeling/parsing.py (renamed from manager/knot_resolver_manager/utils/modeling/parsing.py)0
-rw-r--r--python/knot_resolver/utils/modeling/query.py (renamed from manager/knot_resolver_manager/utils/modeling/query.py)8
-rw-r--r--python/knot_resolver/utils/modeling/renaming.py (renamed from manager/knot_resolver_manager/utils/modeling/renaming.py)0
-rw-r--r--python/knot_resolver/utils/modeling/types.py (renamed from manager/knot_resolver_manager/utils/modeling/types.py)4
-rw-r--r--python/knot_resolver/utils/requests.py (renamed from manager/knot_resolver_manager/utils/requests.py)4
-rw-r--r--python/knot_resolver/utils/systemd_notify.py (renamed from manager/knot_resolver_manager/utils/systemd_notify.py)0
-rw-r--r--python/knot_resolver/utils/which.py (renamed from manager/knot_resolver_manager/utils/which.py)0
-rw-r--r--python/meson.build23
-rw-r--r--python/setup.py.in12
-rw-r--r--scripts/README.md18
-rwxr-xr-xscripts/bugreport-journals.py2
-rwxr-xr-xscripts/ci/build-in-obs.sh (renamed from scripts/build-in-obs.sh)6
-rwxr-xr-xscripts/ci/enable-repo-cznic-labs.sh (renamed from scripts/enable-repo-cznic-labs.sh)2
-rwxr-xr-xscripts/ci/make-obs.sh (renamed from scripts/make-obs.sh)6
-rwxr-xr-xscripts/coverage_c_combine.sh26
-rwxr-xr-xscripts/coverage_env.sh42
-rwxr-xr-xscripts/gen-pgp-keyblock.sh2
-rwxr-xr-xscripts/lib/get-date.sh (renamed from scripts/get-date.sh)4
-rwxr-xr-xscripts/lib/upstream-version.sh (renamed from scripts/upstream-version.sh)2
-rwxr-xr-xscripts/luacov_gen_empty.sh18
-rwxr-xr-xscripts/luacov_to_info.lua57
-rwxr-xr-xscripts/make-archive.sh2
-rwxr-xr-xscripts/make-doc.sh25
-rwxr-xr-xscripts/map_install_src.lua168
-rwxr-xr-xscripts/meson/bench.sh (renamed from scripts/bench.sh)2
-rwxr-xr-xscripts/meson/gen-cdefs.sh (renamed from scripts/gen-cdefs.sh)2
-rwxr-xr-xscripts/meson/make-doc.sh20
-rwxr-xr-xscripts/meson/run-pylint.sh (renamed from scripts/run-pylint.sh)4
-rwxr-xr-xscripts/meson/test-config.sh (renamed from scripts/test-config.sh)2
-rwxr-xr-xscripts/meson/test-integration-prepare.sh (renamed from scripts/test-integration-prepare.sh)2
-rwxr-xr-xscripts/poe-tasks/check (renamed from manager/scripts/codecheck)35
-rwxr-xr-xscripts/poe-tasks/clean22
-rwxr-xr-xscripts/poe-tasks/configure14
-rwxr-xr-xscripts/poe-tasks/doc13
-rwxr-xr-xscripts/poe-tasks/doc-schema13
-rwxr-xr-xscripts/poe-tasks/examples11
-rwxr-xr-xscripts/poe-tasks/format13
-rwxr-xr-xscripts/poe-tasks/gen-constantspy14
-rwxr-xr-xscripts/poe-tasks/gen-setuppy8
-rwxr-xr-xscripts/poe-tasks/kresctl12
-rwxr-xr-xscripts/poe-tasks/run35
-rwxr-xr-xscripts/poe-tasks/run-debug (renamed from manager/scripts/run-debug)6
-rwxr-xr-xscripts/poe-tasks/test8
-rw-r--r--scripts/poe-tasks/utils/_env.sh104
-rwxr-xr-x[-rw-r--r--]scripts/poe-tasks/utils/create_setup.py (renamed from manager/scripts/create_setup.py)2
-rwxr-xr-xscripts/run-scanbuild-with-args.sh51
-rwxr-xr-xscripts/update-authors.sh2
-rwxr-xr-xscripts/update-root-hints.sh2
-rw-r--r--setup.py60
-rw-r--r--tests/README.rst31
-rw-r--r--tests/config/meson.build2
-rwxr-xr-xtests/dnstap/src/dnstap-test/run.sh2
m---------tests/integration/deckard0
-rw-r--r--tests/integration/meson.build2
-rw-r--r--tests/manager/__init__.py5
-rw-r--r--tests/manager/datamodel/templates/test_cache_macros.py (renamed from manager/tests/unit/datamodel/templates/test_cache_macros.py)4
-rw-r--r--tests/manager/datamodel/templates/test_common_macros.py (renamed from manager/tests/unit/datamodel/templates/test_common_macros.py)6
-rw-r--r--tests/manager/datamodel/templates/test_forward_macros.py (renamed from manager/tests/unit/datamodel/templates/test_forward_macros.py)6
-rw-r--r--tests/manager/datamodel/templates/test_network_macros.py (renamed from manager/tests/unit/datamodel/templates/test_network_macros.py)4
-rw-r--r--tests/manager/datamodel/templates/test_policy_macros.py (renamed from manager/tests/unit/datamodel/templates/test_policy_macros.py)8
-rw-r--r--tests/manager/datamodel/templates/test_types_render.py (renamed from manager/tests/unit/datamodel/templates/test_types_render.py)4
-rw-r--r--tests/manager/datamodel/templates/test_view_macros.py (renamed from manager/tests/unit/datamodel/templates/test_view_macros.py)4
-rw-r--r--tests/manager/datamodel/test_config_schema.py (renamed from manager/tests/unit/datamodel/test_config_schema.py)8
-rw-r--r--tests/manager/datamodel/test_forward_schema.py (renamed from manager/tests/unit/datamodel/test_forward_schema.py)4
-rw-r--r--tests/manager/datamodel/test_local_data.py (renamed from manager/tests/unit/datamodel/test_local_data.py)4
-rw-r--r--tests/manager/datamodel/test_lua_schema.py (renamed from manager/tests/unit/datamodel/test_lua_schema.py)4
-rw-r--r--tests/manager/datamodel/test_management_schema.py (renamed from manager/tests/unit/datamodel/test_management_schema.py)4
-rw-r--r--tests/manager/datamodel/test_network_schema.py (renamed from manager/tests/unit/datamodel/test_network_schema.py)6
-rw-r--r--tests/manager/datamodel/test_policy_schema.py (renamed from manager/tests/unit/datamodel/test_policy_schema.py)8
-rw-r--r--tests/manager/datamodel/test_rpz_schema.py (renamed from manager/tests/unit/datamodel/test_rpz_schema.py)4
-rw-r--r--tests/manager/datamodel/types/test_base_types.py (renamed from manager/tests/unit/datamodel/types/test_base_types.py)8
-rw-r--r--tests/manager/datamodel/types/test_custom_types.py (renamed from manager/tests/unit/datamodel/types/test_custom_types.py)15
-rw-r--r--tests/manager/datamodel/types/test_generic_types.py (renamed from manager/tests/unit/datamodel/types/test_generic_types.py)8
-rw-r--r--tests/manager/test_config_store.py (renamed from manager/tests/unit/test_config_store.py)4
-rw-r--r--tests/manager/test_knot_resolver_manager.py12
-rw-r--r--tests/manager/utils/modeling/test_base_schema.py (renamed from manager/tests/unit/utils/modeling/test_base_schema.py)7
-rw-r--r--tests/manager/utils/modeling/test_etag.py (renamed from manager/tests/unit/utils/modeling/test_etag.py)2
-rw-r--r--tests/manager/utils/modeling/test_json_pointer.py (renamed from manager/tests/unit/utils/modeling/test_json_pointer.py)2
-rw-r--r--tests/manager/utils/modeling/test_query.py (renamed from manager/tests/unit/utils/modeling/test_query.py)2
-rw-r--r--tests/manager/utils/modeling/test_renaming.py (renamed from manager/tests/unit/utils/modeling/test_renaming.py)2
-rw-r--r--tests/manager/utils/modeling/test_types.py (renamed from manager/tests/unit/utils/modeling/test_types.py)7
-rw-r--r--tests/manager/utils/test_functional.py (renamed from manager/tests/unit/utils/test_functional.py)2
-rw-r--r--tests/meson.build1
-rw-r--r--tests/packaging/README.md5
-rwxr-xr-xtests/packaging/dependencies.py (renamed from manager/tests/packaging/dependencies.py)4
-rwxr-xr-xtests/packaging/interactive/cache-clear.sh (renamed from manager/tests/packaging/interactive/cache-clear.sh)2
-rwxr-xr-xtests/packaging/interactive/etag.sh (renamed from manager/tests/packaging/interactive/etag.sh)2
-rwxr-xr-xtests/packaging/interactive/metrics.sh (renamed from manager/tests/packaging/interactive/metrics.sh)2
-rwxr-xr-xtests/packaging/interactive/reload.sh (renamed from manager/tests/packaging/interactive/reload.sh)2
-rwxr-xr-xtests/packaging/interactive/schema.sh15
-rwxr-xr-xtests/packaging/interactive/workers.sh (renamed from manager/tests/packaging/interactive/workers.sh)2
-rwxr-xr-xtests/packaging/knot-resolver.sh (renamed from manager/tests/packaging/knot-resolver.sh)2
-rwxr-xr-xtests/packaging/kresctl.sh (renamed from manager/tests/packaging/kresctl.sh)2
-rwxr-xr-xtests/packaging/manpage.sh (renamed from manager/tests/packaging/manpage.sh)0
-rwxr-xr-xtests/packaging/systemd_service.sh (renamed from manager/tests/packaging/systemd_service.sh)10
-rw-r--r--utils/shell-completion/client.bash (renamed from manager/shell-completion/client.bash)0
-rw-r--r--utils/shell-completion/client.fish (renamed from manager/shell-completion/client.fish)0
-rw-r--r--utils/shell-completion/meson.build (renamed from manager/shell-completion/meson.build)0
321 files changed, 4334 insertions, 2274 deletions
diff --git a/.gitignore b/.gitignore
index 569b0323..0d813846 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,3 +1,4 @@
+**/__pycache__/
*.6
*.Plo
*.a
@@ -8,6 +9,7 @@
*.gcno
*.gcov
*.info
+*.junit.xml
*.la
*.lo
*.log
@@ -18,17 +20,20 @@
*.so.*
*.swp
*~
+.coverage
.deps
.dirstamp
.libs
+.mypy_cache
.pytest_cache
-/.build-depend
+/.build*/
/.cache
+/.install_dev
/aclocal.m4
/ar-lib
/autom4te.cache/*
/bench/bench_lru
-/build*
+/build*/
/compile
/compile_commands.json
/config.guess
@@ -44,13 +49,13 @@
/daemon/lua/*.inc
/daemon/lua/trust_anchors.lua
/depcomp
+/dist
/distro/tests/*/.vagrant
/doc/**/.doctrees
/doc/**/doxyxml
/doc/html
/doc/kresd.8
/doc/texinfo
-/doc/_static/config.schema.json
/doc/_static/schema_doc*
/doc/config-schema-body.md
/ephemeral_key.pem
@@ -83,3 +88,4 @@ _obj
kresd.amalg.c
libkres.amalg.c
luacov.*.out
+poetry.lock
diff --git a/manager/.gitlab-ci.yml b/.gitlab-ci.manager.yml
index 369035c8..727e1bf1 100644
--- a/manager/.gitlab-ci.yml
+++ b/.gitlab-ci.manager.yml
@@ -4,7 +4,6 @@ stages:
default:
image: $IMAGE_PREFIX/manager:$IMAGE_TAG
before_script:
- - cd manager
- poetry --version
- poetry env use $PYTHON_INTERPRETER
tags:
@@ -32,19 +31,17 @@ lint:py3.12:
stage: check
script:
- poetry install --only main,dev,test
- # create required directories that are in default config, otherwise unit tests fail
- - mkdir -p /var/cache/knot-resolver
- poe test
# the following command makes sure that the source root of the coverage file is at $gitroot
- - poetry run bash -c "cd ..; coverage combine manager/.coverage; coverage xml"
+ - poetry run bash -c "coverage combine .coverage; coverage xml"
artifacts:
reports:
coverage_report:
coverage_format: cobertura
path: coverage.xml
- junit: manager/unit.junit.xml
+ junit: unit.junit.xml
paths:
- - manager/unit.junit.xml
+ - unit.junit.xml
unit:py3.8:
<<: *unit
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 8531e5cc..bf1b37f2 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -20,7 +20,7 @@ variables:
# IMAGE_TAG is a Git branch/tag name from https://gitlab.nic.cz/knot/knot-resolver-ci
# In general, keep it pointing to a tag - use a branch only for development.
# More info in the knot-resolver-ci repository.
- IMAGE_TAG: 'v20240606'
+ IMAGE_TAG: 'v20240924'
IMAGE_PREFIX: '$CI_REGISTRY/knot/knot-resolver-ci'
image: $IMAGE_PREFIX/debian12-knot_3_3:$IMAGE_TAG
@@ -130,7 +130,7 @@ build-arch:
build-stable:
<<: *build
script:
- - meson build_ci_stable --prefix=$PREFIX -Dmalloc=disabled -Dwerror=true -Dextra_tests=enabled
+ - meson build_ci_stable --prefix=$PREFIX -Dmalloc=disabled -Dwerror=true -Dextra_tests=enabled -Dbench=enabled
- ninja -C build_ci_stable
- ninja -C build_ci_stable install >/dev/null
- ${MESON_TEST} --suite unit --suite config --suite dnstap --no-suite snowflake
@@ -391,7 +391,7 @@ manager:
stage: test
needs: []
trigger:
- include: manager/.gitlab-ci.yml
+ include: .gitlab-ci.manager.yml
strategy: depend
except:
refs:
@@ -579,7 +579,7 @@ obs:trigger: &obs_trigger
- pip install --upgrade pip
- pip install apkg
- scripts/make-obs.sh
- - echo y | scripts/build-in-obs.sh $OBS_REPO
+ - echo y | scripts/ci/build-in-obs.sh $OBS_REPO
obs:release:
<<: *obs_trigger
@@ -606,7 +606,7 @@ obs:odvr:
.enable_repo_build: &enable_repo_build
before_script:
- - ./scripts/enable-repo-cznic-labs.sh knot-dns
+ - ./scripts/ci/enable-repo-cznic-labs.sh knot-dns
.pkg_test: &pkg_test
stage: pkg
@@ -698,9 +698,14 @@ pkg:alma-9:
pkg:arch:
<<: *pkg_test_user
- image: $CI_REGISTRY/packaging/apkg/full/arch
+ image: $CI_REGISTRY/packaging/apkg/test/arch
+ tags:
+ - docker
+ - linux
+ - amd64
before_script:
- pacman -Syy
+ - pip install apkg
# RHEL 8 derivatives would need more work due to *default* python being old
#pkg:rocky-8:
diff --git a/.python-version b/.python-version
new file mode 100644
index 00000000..8234e317
--- /dev/null
+++ b/.python-version
@@ -0,0 +1,5 @@
+3.8.19
+3.9.19
+3.10.14
+3.11.9
+3.12.4
diff --git a/manager/ARCHITECTURE.md b/ARCHITECTURE.md
index 18df7885..18df7885 100644
--- a/manager/ARCHITECTURE.md
+++ b/ARCHITECTURE.md
diff --git a/Dockerfile b/Dockerfile
index 689b17a4..613636e3 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -59,7 +59,7 @@ RUN apt-get install -y /pkg/*/*.deb && \
rm -rf /var/lib/apt/lists/* && \
mkdir /config
-COPY manager/etc/knot-resolver/config.example.docker.yaml /config/config.yaml
+COPY etc/config/config.example.docker.yaml /config/config.yaml
LABEL cz.knot-resolver.vendor="CZ.NIC"
LABEL maintainer="knot-resolver-users@lists.nic.cz"
diff --git a/manager/ERROR_HANDLING.md b/ERROR_HANDLING.md
index 770227b8..770227b8 100644
--- a/manager/ERROR_HANDLING.md
+++ b/ERROR_HANDLING.md
diff --git a/NEWS b/NEWS
index d84537d3..8d19f937 100644
--- a/NEWS
+++ b/NEWS
@@ -1,12 +1,37 @@
Knot Resolver 6.0.9 (2024-mm-dd)
================================
+Incompatible changes
+--------------------
+- -f/--forks is removed (#631, !1602)
+
Improvements
------------
- manager: secret for TLS session resumption via ticket (RFC5077) (!1567)
- The manager creates and sets the secret for all running 'kresd' workers. The secret is created automatically if the user does not configure his own secret in the configuration. This means that the workers will be able to resume each other's TLS sessions, regardless of whether the user has configured it to do so.
+ The manager creates and sets the secret for all running 'kresd' workers.
+ The secret is created automatically if the user does not configure their own secret in the configuration.
+ This means that the workers will be able to resume each other's TLS sessions, regardless of whether the user has configured it to do so.
+
+- answer NOTIMPL for meta-types and non-IN RR classes (!1589)
+- views: improve interaction with old-style policies (!1576)
+- stats: add stale answer counter 'answer.stale' (!1591)
+- extended_errors: answer with EDE in more cases (!1585, !1588, !1590, !1592)
+- local-data: make DNAMEs work, i.e. generate CNAMEs (!1609)
+- daemon: use connected UDP sockets by default (#326, !1618)
+
+Bugfixes
+--------
+
+- daemon/proxyv2: fix informing the engine about TCP/TLS from the actual client (!1578)
+- forward: fix wrong pin-sha256 length; also log pins on mismatch (!1601, #813)
+
+Incompatible changes
+--------------------
+- gnutls < 3.4 support is dropped, released over 9 years ago (!1601)
+- libuv < 1.27 support is dropped, released over 5 years ago (!1618)
+
Knot Resolver 6.0.8 (2024-07-23)
================================
diff --git a/README.md b/README.md
index 5a3bb1ac..51a9976d 100644
--- a/README.md
+++ b/README.md
@@ -4,24 +4,23 @@
[![Coverage Status](https://gitlab.nic.cz/knot/knot-resolver/badges/nightly/coverage.svg?x)](https://www.knot-resolver.cz/documentation/latest)
[![Packaging status](https://repology.org/badge/tiny-repos/knot-resolver.svg)](https://repology.org/project/knot-resolver/versions)
-Knot Resolver is a caching full resolver implementation written in C and [LuaJIT][luajit], both a resolver library and a daemon. The core architecture is tiny and efficient, and provides a foundation and
-a state-machine like API for extensions. There are three modules built-in - *iterator*, *validator*, *cache*, and a few more are loaded by default. Most of the [rich features](https://www.knot-resolver.cz/documentation/latest/config-overview.html) are written in Lua(JIT) and C. Batteries are included, but optional.
+Knot Resolver is a full caching DNS resolver implementation. The core architecture is tiny and efficient, written in C and [LuaJIT][luajit], providing a foundation and a state-machine-like API for extension modules. There are three built-in modules - *iterator*, *validator* and *cache* - which provide the main functionality of the resolver. A few other modules are automatically loaded by default to extend the resolver's functionality.
-The LuaJIT modules, support DNS privacy and DNSSEC, and persistent cache with low memory footprint make it a great personal DNS resolver or a research tool to tap into DNS data. TL;DR it's the [OpenResty][openresty] of DNS.
+Since Knot Resolver version 6, it also includes a so-called [manager][manager]. It is a new component written in [Python][python] that hides the complexity of older versions and makes it more user friendly. For example, new features include declarative configuration in YAML format and HTTP API for dynamic changes in the resolver and more.
-Strong filtering rules, and auto-configuration with etcd make it a great large-scale resolver solution.
+Knot Resolver uses a [different scaling strategy][scaling] than the rest of the DNS resolvers - no threading, shared-nothing architecture (except MVCC cache which can be shared), which allows you to pin workers to available CPU cores and grow by self-replication. You can start and stop additional workers based on the contention without downtime, which is automated by the [manager][manager] by default.
-The server adopts a [different scaling strategy][scaling] than the rest of the DNS recursors - no threading, shared-nothing architecture (except MVCC cache that may be shared) that allows you to pin instances on available CPU cores and grow by self-replication. You can start and stop additional nodes depending on the contention without downtime, which is by default automated by the included [manager][manager].
+The LuaJIT modules, support for DNS privacy and DNSSEC, and persistent cache with low memory footprint make it a great personal DNS resolver or a research tool to tap into DNS data. Strong filtering rules, and auto-configuration with etcd make it a great large-scale resolver solution. It also has strong support for DNS over TCP, in particular TCP Fast-Open, query pipelining and deduplication, and response reordering.
-It also has strong support for DNS over TCP, notably TCP Fast-Open, query pipelining and deduplication, and response reordering.
+For more on using the resolver, see the [User Documentation][doc]. See the [Developer Documentation][doc-dev] for detailed architecture and development.
-### Packages
+## Packages
The latest stable packages for various distributions are available in our
[upstream repository](https://pkg.labs.nic.cz/doc/?project=knot-resolver).
Follow the installation instructions to add this repository to your system.
-Knot Resolver is also available from the following distributions' repositories.
+Knot Resolver is also available from the following distributions' repositories:
* [Fedora and Fedora EPEL](https://src.fedoraproject.org/rpms/knot-resolver)
* [Debian stable](https://packages.debian.org/stable/knot-resolver),
@@ -31,43 +30,50 @@ Knot Resolver is also available from the following distributions' repositories.
* [Arch Linux](https://archlinux.org/packages/extra/x86_64/knot-resolver/)
* [Alpine Linux](https://pkgs.alpinelinux.org/packages?name=knot-resolver)
-### Building from sources
+### Packaging
-Knot Resolver mainly [depends][depends] on Knot DNS libraries, [LuaJIT][luajit], and [libuv][libuv].
-See the [Building project][depends] documentation page for more information.
+The project uses [`apkg`](https://gitlab.nic.cz/packaging/apkg) for packaging.
+See [`distro/README.md`](distro/README.md) for packaging specific instructions.
-### Docker image
+## Building from sources
-This is simple and doesn't require any dependencies or system modifications, just run:
+Knot Resolver mainly depends on [KnotDNS][knot-dns] libraries, [LuaJIT][luajit], [libuv][libuv] and [Python][python].
+
+See the [Building project][build] documentation page for more information.
+
+## Running
+
+By default, Knot Resolver comes with [systemd][systemd] integration and you just need to start its service. It requires no configuration changes to run a server on localhost.
```
-$ docker run -Pit cznic/knot-resolver
+# systemctl start knot-resolver
```
-The images are meant as an easy way to try knot-resolver, and they're not designed for production use.
+See the documentation at [knot-resolver.cz/documentation/latest][doc] for more information.
-### Running
+## Running the Docker image
-The project builds a resolver library in the `lib` directory, and a daemon in the `daemon` directory. It requires no configuration or parameters to run a server on localhost.
+Running the Docker image is simple and doesn't require any dependencies or system modifications, just run:
```
-$ kresd
+$ docker run -Pit cznic/knot-resolver
```
-See the documentation at [knot-resolver.cz/documentation/latest][doc] for more options.
-
-[depends]: https://www.knot-resolver.cz/documentation/latest/dev/build.html
-[doc]: https://www.knot-resolver.cz/documentation/latest/
-[scaling]: https://www.knot-resolver.cz/documentation/latest/config-multiple-workers.html
-[manager]: https://www.knot-resolver.cz/documentation/latest/architecture-manager.html
-[deckard]: https://gitlab.nic.cz/knot/deckard
-[luajit]: https://luajit.org/
-[libuv]: http://libuv.org
-[openresty]: https://openresty.org/
+The images are meant as an easy way to try the resolver, and they're not designed for production use.
-### Contacting us
+## Contacting us
- [GitLab issues](https://gitlab.nic.cz/knot/knot-resolver/issues) (you may authenticate via GitHub)
- [mailing list](https://lists.nic.cz/postorius/lists/knot-resolver-announce.lists.nic.cz/)
- [![Join the chat at https://gitter.im/CZ-NIC/knot-resolver](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/CZ-NIC/knot-resolver?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
+[build]: https://www.knot-resolver.cz/documentation/latest/dev/build.html
+[doc]: https://www.knot-resolver.cz/documentation/latest/
+[doc-dev]: https://www.knot-resolver.cz/documentation/latest/dev
+[knot-dns]: https://www.knot-dns.cz/
+[luajit]: https://luajit.org/
+[libuv]: http://libuv.org
+[python]: https://www.python.org/
+[systemd]: https://systemd.io/
+[scaling]: https://www.knot-resolver.cz/documentation/latest/config-multiple-workers.html
+[manager]: https://www.knot-resolver.cz/documentation/latest/dev/architecture.html
diff --git a/bench/bench_lru.c b/bench/bench_lru.c
index 06f77c0d..b0060773 100644
--- a/bench/bench_lru.c
+++ b/bench/bench_lru.c
@@ -13,13 +13,13 @@
#include "daemon/engine.h"
#include "lib/selection.h"
-typedef kr_nsrep_lru_t lru_bench_t;
+typedef lru_t(unsigned) lru_bench_t;
#define p_out(...) do { \
printf(__VA_ARGS__); \
- fflush(stdout); \
- } while (0)
-#define p_err(...) fprintf(stderr, __VA_ARGS__)
+ (void)fflush(stdout); \
+} while (0)
+#define p_err(...) ((void)fprintf(stderr, __VA_ARGS__))
#ifndef LRU_RTT_SIZE
#define LRU_RTT_SIZE 65536 /**< NS RTT cache size */
@@ -27,7 +27,7 @@ typedef kr_nsrep_lru_t lru_bench_t;
static int die(const char *cause)
{
- fprintf(stderr, "%s: %s\n", cause, strerror(errno));
+ (void)fprintf(stderr, "%s: %s\n", cause, strerror(errno));
exit(1);
}
@@ -171,7 +171,7 @@ int main(int argc, char ** argv)
struct key *keys = read_lines(argv[2], &key_count, &data_to_free);
size_t run_count;
{
- size_t run_log = atoi(argv[1]);
+ size_t run_log = atoi(argv[1]); // NOLINT: atoi is fine for this tool...
assert(run_log < 64);
run_count = 1ULL << run_log;
p_err("\ntest run length:\t2^");
@@ -179,7 +179,7 @@ int main(int argc, char ** argv)
}
struct timeval time;
- const int lru_size = argc > 4 ? atoi(argv[4]) : LRU_RTT_SIZE;
+ const int lru_size = argc > 4 ? atoi(argv[4]) : LRU_RTT_SIZE; // NOLINT: ditto atoi
lru_bench_t *lru;
#ifdef lru_create
diff --git a/bench/meson.build b/bench/meson.build
index b15dd0f7..a44bf7cc 100644
--- a/bench/meson.build
+++ b/bench/meson.build
@@ -20,5 +20,5 @@ bench_lru = executable(
run_target(
'bench',
- command: '../scripts/bench.sh',
+ command: '../scripts/meson/bench.sh',
)
diff --git a/manager/build_c_extensions.py b/build_c_extensions.py
index 5406433b..77c197a1 100644
--- a/manager/build_c_extensions.py
+++ b/build_c_extensions.py
@@ -8,8 +8,8 @@ def build(setup_kwargs: Dict[Any, Any]) -> None:
{
"ext_modules": [
Extension(
- name="knot_resolver_manager.kresd_controller.supervisord.plugin.notify",
- sources=["knot_resolver_manager/kresd_controller/supervisord/plugin/notifymodule.c"],
+ name="knot_resolver.controller.supervisord.plugin.notify",
+ sources=["python/knot_resolver/controller/supervisord/plugin/notifymodule.c"],
),
]
}
diff --git a/ci/gh_actions.py b/ci/gh_actions.py
index bbeb3b34..b99096f5 100755
--- a/ci/gh_actions.py
+++ b/ci/gh_actions.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python3
+#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-3.0-or-later
import json
import time
diff --git a/ci/images/manager/Dockerfile b/ci/images/manager/Dockerfile
deleted file mode 100644
index 69ed251e..00000000
--- a/ci/images/manager/Dockerfile
+++ /dev/null
@@ -1,21 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-FROM fedora:38
-LABEL Knot Resolver <knot-resolver@labs.nic.cz>
-
-WORKDIR /root
-CMD ["/bin/bash"]
-ENV PATH="/root/.local/bin:${PATH}"
-
-# Install Python and deps
-RUN dnf install -y\
- python3.8 python3.9 python3.10 python3.10-devel\
- python3.11 python3.11-devel python3.12 python3.12-devel\
- python3-gobject pipx git which diffutils gcc pkg-config\
- cairo-devel gobject-introspection-devel cairo-gobject-devel\
- && dnf clean all
-
-# Install poetry
-RUN pipx install poetry==1.4.2\
- # not exactly required, but helpful
- && pipx install poethepoet
diff --git a/ci/respdiff/run-respdiff-tests.sh b/ci/respdiff/run-respdiff-tests.sh
index 2bfc44d9..6f065038 100755
--- a/ci/respdiff/run-respdiff-tests.sh
+++ b/ci/respdiff/run-respdiff-tests.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
# SPDX-License-Identifier: GPL-3.0-or-later
# $1 == udp/tcp/tls, it selects configuration file to use
diff --git a/daemon/engine.h b/daemon/engine.h
index e25590a8..4699b77d 100644
--- a/daemon/engine.h
+++ b/daemon/engine.h
@@ -93,7 +93,6 @@ struct args {
addr_array_t addrs, addrs_tls;
flagged_fd_array_t fds;
int control_fd;
- int forks;
config_array_t config;
const char *rundir;
bool interactive;
diff --git a/daemon/io.c b/daemon/io.c
index 86bd314e..36648907 100644
--- a/daemon/io.c
+++ b/daemon/io.c
@@ -67,7 +67,7 @@ void udp_recv(uv_udp_t *handle, ssize_t nread, const uv_buf_t *buf,
if (s->closing || nread <= 0 || comm_addr->sa_family == AF_UNSPEC)
return;
- if (s->outgoing) {
+ if (!the_network->enable_connect_udp && s->outgoing) {
const struct sockaddr *peer = session2_get_peer(s);
if (kr_fails_assert(peer->sa_family != AF_UNSPEC))
return;
diff --git a/daemon/layered-protocols.rst b/daemon/layered-protocols.rst
new file mode 100644
index 00000000..ffcef923
--- /dev/null
+++ b/daemon/layered-protocols.rst
@@ -0,0 +1,445 @@
+Layered protocols
+=================
+
+Motivation
+----------
+
+One of the bigger changes made in Knot Resolver 6 is the almost complete
+rewrite of its I/O (input/output) system and management of communication
+sessions.
+
+To understand why this rewrite was needed, let us first take a brief
+look at the history of Knot Resolver’s I/O.
+
+In the beginning, the Resolver’s I/O was really quite simple. As it only
+supported DNS over plain UDP and TCP (nowadays collectively called Do53
+after the standardized DNS port), there used to be only two quite
+distinct code paths for communication – one for UDP and one for TCP.
+
+As time went on and privacy became an important concern in the internet
+community, we gained two more standardized transports over which DNS
+could be communicated: TLS and HTTPS. Both of these run atop TCP, with
+HTTPS additionally running on top of TLS. It thus makes sense that all
+three share some of the code relevant to all of them. However, up until
+the rewrite, all three transports were quite entangled in a single big
+mess of code, making the I/O system increasingly harder to maintain as
+the Resolver was gaining more and more I/O-related features (one of the
+more recent ones pertaining to that part of the code being the support for the
+`PROXY protocol <https://github.com/haproxy/haproxy/blob/master/doc/proxy-protocol.txt>`__).
+
+Another aspect that led to the decision to ultimately rewrite the whole
+thing was the plan to add support for *DNS-over-QUIC* (DoQ). QUIC is a
+special kind of beast among communication protocols. It runs on top of
+**UDP**, integrates TLS, and – unlike TCP, where each connection creates
+only a single stream – it can create *multiple independent streams in a
+single connection*. This means that, with only a single TLS handshake
+(which is a very costly part of any connection establishment routine),
+one can create multiple streams of data that do not have to wait for
+each other [1]_, which allows for theoretically very efficient encrypted
+communication. On the other hand, it also means that Knot Resolver was
+increasingly ill-prepared for the future, because there was no way the
+status quo could accommodate such connections.
+
+Enter the rewrite. One of the goals of this effort was to prepare Knot
+Resolver for the eventual implementation of QUIC, as well as to untangle
+its I/O system and make it easier to maintain and reason about in
+general. But before we start rewriting, we first need to get to
+understand *sessions*.
+
+Sessions, tasks, wire buffers, protocol ceremony
+------------------------------------------------
+
+Knot Resolver has long been using the concept of so-called *sessions*. A
+session is a data structure (``struct session``) generally holding
+information about a connection in the case of TCP, some shared
+information about the listening socket in the case of incoming UDP, or
+information about I/O towards an authoritative DNS server in the case of
+outgoing UDP. This information includes, among other things, a bit field
+of flags, which tell us whether the session is *outgoing* (i.e. towards
+an authoritative server, instead of a client), whether it has been
+*throttled*, whether the connection has been established (or is yet
+waiting to be established), and more. Historically, in Knot Resolver
+<=5, it also contained information about whether TLS and/or HTTPS was
+being used for a particular session.
+
+Sessions also keep track of so-called *query resolution tasks*
+(``struct qr_task``) – these can be thought of as units of data about a
+query that is being resolved, either *incoming* (i.e. from a client) or
+*outgoing* (i.e. to an authoritative server). As it is not unusual for
+tasks to be relevant to multiple sessions (a client or even multiple
+ones asking the same query, the authoritative servers that are being
+consulted for the right answer), they are reference-counted, and their
+lifetime may at times look quite blurry to the programmer, since we
+refer to them from multiple places (e.g. the sessions, I/O handles,
+timers, etc.). If we get the reference counting wrong, we may either
+free a task’s memory too early, or we may get a dangling task –
+basically a harder-to-catch memory leak. Since there usually is
+*something* pointing to the task, common leak detectors will not be able
+find such a leak.
+
+In addition to this, a session also holds a *wire buffer* – this is a
+fixed-length buffer we fill with DNS queries in the binary format
+defined by the DNS standard (called the *wire format*, hence the name
+*wire buffer*). This buffer is kept per-connection for TCP and
+per-endpoint for UDP and (a portion of it) is passed to the ``libuv``
+library for the operating system to write the data into during
+asynchronous I/O operations.
+
+The wire buffer is used for **input** and is controlled by two indices –
+*start* and *end*. These tell us which parts of the wire buffer contain
+valid but as of yet unprocessed data. In UDP, we get the whole DNS
+message at once, together with its length, so this mechanism is not as
+important there; but in TCP, we only get the concept of a contiguous
+stream of bytes in the user space. There is no guarantee in how much of
+a DNS message we get on a single receive callback, so it is common that
+DNS messages need to be *pieced together*.
+
+In order to parse DNS messages received over TCP, we need two things:
+the DNS standard-defined 16-bit message length that is prepended to each
+actual DNS message in a stream; and a buffer into which we continuously
+write our bytes until we have the whole message. With the *end* index,
+we can keep track of where in the buffer we are, appending to the end of
+what has already been written. This way we get the whole DNS message
+even if received piecewise.
+
+But what about the *start* index? What is *that* for? Well, we can use
+it to strip protocol “ceremony” from the beginning of the message. This
+may be the 16-bit message length, a PROXY protocol header, or possibly
+other data. This ceremony stripping allows us to eventually pass the
+whole message to the exact same logic that processes UDP DNS messages,
+once we are done with all of it.
+
+This is however not the whole story of ceremony stripping. As mentioned,
+in TCP there are two more protocols that share this same code path, and
+those are *DNS-over-TLS* (DoT) and *DNS-over-HTTPS* (DoH). For TLS and
+HTTP/2 (only the first one in the case of DoT, and both together in the
+case of DoH), we need to *decode* the buffer and store the results in
+*another* buffer, since the ceremony is not simply prepended to the rest
+of the message, but it basically transforms its whole content.
+
+Now, for **output**, the process is quite similar, just in reverse – We
+prepend the 16-bit message length and encode the resulting bytes using
+HTTP/2 and/or TLS. To save us some copying and memory allocations, we
+actually do not need to use any special wire buffer or other contiguous
+memory area mechanism. Instead, we leverage I/O vectors
+(``struct iovec``) defined by POSIX, through which we basically provide
+the OS with multiple separate buffers and only tell it which order these
+buffers are supposed to be sent in.
+
+Isolation of protocols
+----------------------
+
+Let us now look at Knot Resolver from another perspective. Here is what
+it generally does from a very high-level point of view: it takes a
+client’s *incoming* DNS query message from the I/O, parses it and
+figures out what to do to resolve it (i.e. either takes the answer from
+the cache, or *asks around* in the network of authoritative servers [2]_
+– utilizing the I/O again, but with an *outgoing* DNS query). Then it
+puts together an answer and hands it back over to the I/O towards the
+client. This basic logic is (mostly) the same for all types of I/O – it
+does not matter whether the request came through Do53, DoH, DoT, or DoQ,
+this core part will always do the same thing.
+
+As already indicated, the I/O basically works in two directions:
+
+- it either takes the wire bytes and transforms them into something the
+ main DNS resolver decision-making system can work with (i.e. it
+ strips them of the “ceremony” imposed by the protocols used) – we
+ call this the *unwrap direction*;
+- or it takes the resolved DNS data and transforms it back into the
+ wire format (i.e. adds the imposed “ceremony”) – we call this the
+ *wrap direction*.
+
+If we look at it from the perspective of the OSI model [3]_, in the
+*unwrap direction* we climb *up* the protocol stack; in the *wrap
+direction* we step *down*.
+
+It is also important to note that the code handling each of the
+protocols may for the most part only be concerned with its own domain.
+PROXYv2 may only check the PROXY header and modify transport
+metadata [4]_; TLS may only take care of securing the connection,
+encrypting and decrypting input bytes; HTTP/2 may only take care of
+adding HTTP metadata (headers, methods, etc.) and encoding/decoding the
+data streams; etc. The protocols basically do not have to know much of
+anything about each other, they only see the input bytes without much
+context, and transform them into output bytes.
+
+Since the code around protocol management used to be quite tangled
+together, it required us to jump through hoops in terms of resource
+management, allocating and deallocating additional buffers required for
+decoding in ways that are hard to reason about, managing the
+aforementioned tasks and their reference-counting, which may be very
+error-prone in unmanaged programming languages like C, where the
+counting needs to be done manually.
+
+Asynchronous I/O complicates this even further. Flow control is not
+“straight-through” as with synchronous I/O, which meant that we needed
+to wait for finishing callbacks, the order of which may not always be
+reliably predictable, to free some of the required resources.
+
+All of this and more makes the lifecycles of different resources and/or
+objects rather unclear and hard to think about, leading to bugs that are
+not easy to track down.
+
+To clear things up, we have decided to basically tear out most of the
+existing code around sessions and transport protocols and reimplement it
+using a new system we call *protocol layers*.
+
+Protocol layers
+---------------
+
+.. note::
+
+ For this next part, it may be useful to open up the
+ `Knot Resolver sources <https://gitlab.nic.cz/knot/knot-resolver>`__,
+ find the ``daemon/session2.h`` and ``daemon/session2.c`` files and use them
+ as a reference while reading this post.
+
+In Knot Resolver 6, protocols are organized into what are basically
+virtual function tables, sort of like in the object-oriented model of
+C++ and other languages. There is a ``struct protolayer_globals``
+defining a protocol’s interface, mainly pointers to functions that are
+responsible for state management and the actual data transformation, and
+some other metadata, like the size of a layer’s state struct. It is
+basically what you would call a table of virtual functions in an
+object-oriented programming language.
+
+Layers are organized in *sequences* (static arrays of
+``enum protolayer_type``). A sequence is based on what the *high-level
+protocol* is; for example, DNS-over-HTTPS, one of the high-level
+protocols, has a sequence of these five lower-level protocols, in
+*unwrap* order: TCP, PROXYv2, TLS, HTTP, and DNS.
+
+This is then utilized by a layer management system, which takes a
+*payload* – i.e. a chunk of data – and loops over each layer in the
+sequence, passing said payload to the layer’s *unwrap* or *wrap*
+callbacks, depending on whether the payload is being received from the
+network or generated and sent by Knot Resolver, respectively (as
+described above). The ``struct protolayer_globals`` member callbacks
+``unwrap`` and ``wrap`` are responsible for the transformation itself,
+each in the direction to which its name alludes.
+
+Also note that the order of layer traversal is – unsurprisingly –
+reversed between *wrap* and *unwrap* directions.
+
+This is the basic idea of protocol layers – we take a payload and
+process it with a pipeline of layers to be either sent out, or processed
+by Knot Resolver.
+
+The layer management system also permits any layer to interrupt the
+payload processing, basically switching between synchronous to
+asynchronous operation. Layers may produce payloads without being
+prompted to by a previous layer as well.
+
+Both of these are necessary because in some layers, like HTTP and TLS,
+input and output payloads are not always in a one-to-one relationship,
+i.e. we may need to receive multiple input payloads for HTTP to produce
+an output payload. Some layers may also need to produce payloads without
+having received *any* input payloads, like when there is an ongoing TLS
+handshake. An upcoming *query prioritization* feature also utilizes the
+interruption mechanism to defer the processing of payloads to a later
+point in time.
+
+Apart from the aforementioned callbacks, layers may define other
+parameters. As mentioned, layers are allowed to declare their custom
+state structs, both per-session and/or per-payload, to hold their own
+context in, should they need it. There are also callbacks for
+initialization and deinitialization of the layer, again per-session
+and/or per-payload, which are primarily meant to (de)initialize said
+structs, but may well be used for other preparation tasks. There is also
+a simple system in place for handling events that may occur, like
+session closure (both graceful and forced), timeouts, OS buffer
+fill-ups, and more.
+
+Defining a protocol
+~~~~~~~~~~~~~~~~~~~
+
+A globals table for HTTP may look something like this:
+
+.. code:: c
+
+ protolayer_globals[PROTOLAYER_TYPE_HTTP] = (struct protolayer_globals){
+ .sess_size = sizeof(struct pl_http_sess_data),
+ .sess_deinit = pl_http_sess_deinit,
+ .wire_buf_overhead = HTTP_MAX_FRAME_SIZE,
+ .sess_init = pl_http_sess_init,
+ .unwrap = pl_http_unwrap,
+ .wrap = pl_http_wrap,
+ .event_unwrap = pl_http_event_unwrap,
+ .request_init = pl_http_request_init
+ };
+
+Note that this is using the `C99 compound literal syntax
+<https://en.cppreference.com/w/c/language/compound_literal>`__,
+in which unspecified members are set to zero. The interface is designed
+so that all of its parts may be specified on an as-needed basis – all of
+its fields are optional and zeroes are a valid option [5]_. In the case
+illustrated above, HTTP uses almost the full interface, so most members
+in the struct are populated. The PROXYv2 implementations (separate
+variants for UDP and TCP) on the other hand, are quite simple, only
+requiring ``unwrap`` handlers and tiny structs for state:
+
+.. code:: c
+
+ // Note that we use the same state struct for both DGRAM and STREAM, but in
+ // DGRAM it is per-iteration, while in STREAM it is per-session.
+
+ protolayer_globals[PROTOLAYER_TYPE_PROXYV2_DGRAM] = (struct protolayer_globals){
+ .iter_size = sizeof(struct pl_proxyv2_state),
+ .unwrap = pl_proxyv2_dgram_unwrap,
+ };
+
+ protolayer_globals[PROTOLAYER_TYPE_PROXYV2_STREAM] = (struct protolayer_globals){
+ .sess_size = sizeof(struct pl_proxyv2_state),
+ .unwrap = pl_proxyv2_stream_unwrap,
+ };
+
+Transforming payloads
+~~~~~~~~~~~~~~~~~~~~~
+
+Let us now look at the ``wrap`` and ``unwrap`` callbacks. They are both
+of the same type, ``protolayer_iter_cb``, specified by the following C
+declaration:
+
+.. code:: c
+
+ typedef enum protolayer_iter_cb_result (*protolayer_iter_cb)(
+ void *sess_data,
+ void *iter_data,
+ struct protolayer_iter_ctx *ctx);
+
+A function of this type takes two ``void *`` pointers pointing to
+layer-specific state structs, as allocated according to the
+``sess_size`` and ``iter_size`` members of ``protolayer_globals``. for
+the currently processsed layer. These have a *session* lifetime and
+so-called *iteration* lifetime, respectively. An *iteration* here is
+what we call the process of going through a sequence of protocol layers,
+transforming a payload one-by-one until either an internal system is
+reached (in the *unwrap* direction), or the I/O is used to transfer said
+payload (in the *wrap* direction). Iteration-lifetime structs are
+allocated and initialized when a new payload is constructed, and are
+freed when its processing ends. Session-lifetime structs are allocated
+and initialized, and then later deinitialized together with each
+session.
+
+A struct pointing to the payload lives in the ``ctx`` parameter of the
+callback. This context lives through the whole *iteration* and contains
+data useful for both the system managing the protocol layers as a whole,
+and the implementations of individual layers, which actually includes
+the memory pointed to by ``iter_data`` (but the pointer is provided both
+as an optimization *and* for convenience). The rules for manipulating
+the ``struct protolayer_iter_ctx`` in a way so that the whole system
+works in a defined manner are specified in its comments in the
+``session2.h`` file.
+
+You may have noticed that the callbacks’ return value,
+``enum protolayer_iter_cb_result``, has actually only a single value,
+the ``PROTOLAYER_ITER_CB_RESULT_MAGIC``, with a random number. This
+value is there only for sanity-checking. When implementing a layer, you
+are meant to exit the callbacks with something we call *layer sequence
+return functions*, which dictate how the control flow of the iteration
+is meant to continue:
+
+- ``protolayer_continue`` tells the system to simply pass the current
+ payload on to the next layer, or the I/O if this is the last layer.
+- ``protolayer_break`` tells the system to end the iteration on the
+ current payload, with the specified status code, which is going to be
+ logged in the debug log. The status is meant to be one of the
+ POSIX-defined ``errno`` values.
+- ``protolayer_async`` tells the system to interrupt the iteration on
+ the current payload, to be *continued* and/or *broken* at a later
+ point in time. The planning of this is the responsibility of the
+ layer that called the ``protolayer_async`` function – this gives the
+ layer absolute control of what is going to happen next, but, if not
+ done correctly, leaks will occur.
+
+This system clearly defines the lifetime of
+``struct protolayer_iter_ctx`` and consequently all of its associated
+resources. The system creates the context when a payload is submitted to
+the pipeline, and destroys it either when ``protolayer_break`` is
+called, or the end of the layer sequence has been reached (including
+processing by the I/O in the *wrap* direction).
+
+When submitting payloads, the submitter is also allowed to define a
+callback for when the iteration has ended. This callback is called for
+**every** way the iteration may end (except for undetected leaks), even
+if it immediately fails, allowing for fine-grained control over
+resources with only a minimum amount of checks that need to be in place
+at the submitter site.
+
+To implement a payload transform for a protocol, you simply modify the
+provided payload. Note that the memory a payload points to is always
+owned by the system that had created it, so if a protocol requires extra
+resources for its transformation, it needs to manage it by itself.
+
+The ``struct protolayer_iter_ctx`` provides a convenient ``pool``
+member, using the ``knot_mm_t`` interface from Knot DNS. This can be
+used by layers to allocate additional memory, which will get freed
+automatically at the end of the context’s lifetime. If a layer has any
+special needs regarding resource allocation, it needs to take proper
+care of it by itself (preferably using its state struct), and free all
+of its allocated resources by itself in its deinitialization callbacks.
+
+Events
+~~~~~~
+
+There is one more important aspect to protocol layers. Apart from
+payload transformation, the layers occasionally need to get to know
+and/or let other layers know of some particular *events* that may occur.
+Events may let layers know that a session is about to close, or is being
+closed “forcefully” [6]_, or something may have timed out, a malformed
+message may have been received, etc.
+
+The event system is similar to payload transformation in that it
+iterates over layers in ``wrap`` and ``unwrap`` directions, but the
+procedure is simplified quite a bit. We may never choose, which
+direction we start in – we always start in ``unwrap``, then
+automatically bounce back and go in the ``wrap`` direction. Event
+handling is also never asynchronous and there is no special context
+allocated for event iterations.
+
+Each ``event_wrap`` and/or ``event_unwrap`` callback may return either
+``PROTOLAYER_EVENT_CONSUME`` to consume the event, stopping the
+iteration; or ``PROTOLAYER_EVENT_PROPAGATE`` to propagate the event to
+the next layer in sequence. The default (when there is no callback) is
+to propagate; well-behaved layers will also propagate all events that do
+not concern them.
+
+This provides us with a degree of abstraction – e.g. when using
+DNS-over-TLS towards an upstream server (currently only in forwarding),
+from the point of view of TCP a connection may have been established, so
+the I/O system sends a ``CONNECT`` event. This would normally (in plain
+TCP) signal the DNS layer to start sending queries, but TLS still needs
+to perform a secure handshake. So, TLS consumes the ``CONNECT`` event
+received from TCP, performs the handshake, and when it is done, it sends
+its own ``CONNECT`` event to subsequent layers.
+
+.. [1]
+ Head-of-line blocking:
+ https://en.wikipedia.org/wiki/Head-of-line_blocking
+
+.. [2]
+ Plus DNSSEC validation, but that does not change this process from
+ the I/O point of view much either.
+
+.. [3]
+ Open Systems Interconnections model – a model commonly used to
+ describe network communications.
+ (`Wikipedia <https://en.wikipedia.org/wiki/OSI_model>`__)
+
+.. [4]
+ The metadata consists of IP addresses of the actual clients that
+ queried the resolver through a proxy using the PROXYv2 protocol – see
+ the relevant
+ `documentation <https://www.knot-resolver.cz/documentation/latest/config-network-server.html#proxyv2-protocol>`__.
+
+.. [5]
+ This neat pattern is sometimes called *ZII*, or *zero is
+ initialization*, `as coined by Casey
+ Muratori <https://www.youtube.com/watch?v=lzdKgeovBN0&t=1684s>`__.
+
+.. [6]
+ The difference between a forceful close and a graceful one is that
+ when closing gracefully, layers may still do some ceremony
+ (i.e. inform the other side that the connection is about to close).
+ With a forceful closure, we just stop communicating.
diff --git a/daemon/lua/kres-gen-33.lua b/daemon/lua/kres-gen-33.lua
index 24ace53d..042e3dfe 100644
--- a/daemon/lua/kres-gen-33.lua
+++ b/daemon/lua/kres-gen-33.lua
@@ -248,6 +248,7 @@ struct kr_request {
ranked_rr_array_t add_selected;
_Bool answ_validated;
_Bool auth_validated;
+ _Bool stale_accounted;
uint8_t rank;
struct kr_rplan rplan;
trace_log_f trace_log;
@@ -349,7 +350,7 @@ struct kr_query_data_src {
kr_rule_fwd_flags_t flags;
knot_db_val_t targets_ptr;
};
-enum kr_rule_sub_t {KR_RULE_SUB_EMPTY = 1, KR_RULE_SUB_NXDOMAIN, KR_RULE_SUB_NODATA, KR_RULE_SUB_REDIRECT};
+enum kr_rule_sub_t {KR_RULE_SUB_EMPTY = 1, KR_RULE_SUB_NXDOMAIN, KR_RULE_SUB_NODATA, KR_RULE_SUB_REDIRECT, KR_RULE_SUB_DNAME};
enum kr_proto {KR_PROTO_INTERNAL, KR_PROTO_UDP53, KR_PROTO_TCP53, KR_PROTO_DOT, KR_PROTO_DOH, KR_PROTO_DOQ, KR_PROTO_COUNT};
typedef unsigned char kr_proto_set;
kr_layer_t kr_layer_t_static;
@@ -489,6 +490,7 @@ int kr_ta_add(trie_t *, const knot_dname_t *, uint16_t, uint32_t, const uint8_t
int kr_ta_del(trie_t *, const knot_dname_t *);
void kr_ta_clear(trie_t *);
_Bool kr_dnssec_key_sep_flag(const uint8_t *);
+_Bool kr_dnssec_key_zonekey_flag(const uint8_t *);
_Bool kr_dnssec_key_revoked(const uint8_t *);
int kr_dnssec_key_tag(uint16_t, const uint8_t *, size_t);
int kr_dnssec_key_match(const uint8_t *, size_t, const uint8_t *, size_t);
@@ -542,7 +544,6 @@ struct args {
addr_array_t addrs_tls;
flagged_fd_array_t fds;
int control_fd;
- int forks;
config_array_t config;
const char *rundir;
_Bool interactive;
@@ -589,6 +590,7 @@ struct network {
int snd;
int rcv;
} listen_tcp_buflens;
+ _Bool enable_connect_udp;
};
struct args *the_args;
struct endpoint {
diff --git a/daemon/lua/kres-gen.sh b/daemon/lua/kres-gen.sh
index ab289b0d..76e9b632 100755
--- a/daemon/lua/kres-gen.sh
+++ b/daemon/lua/kres-gen.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
# SPDX-License-Identifier: GPL-3.0-or-later
# Run with "ninja kres-gen" to re-generate $1
@@ -6,7 +6,7 @@ set -o pipefail -o errexit -o nounset
cd "$(dirname ${0})"
OUTNAME="$1"
-CDEFS="../../scripts/gen-cdefs.sh"
+CDEFS="../../scripts/meson/gen-cdefs.sh"
LIBKRES="${MESON_BUILD_ROOT}/lib/libkres.so"
KRESD="${MESON_BUILD_ROOT}/daemon/kresd"
if [ ! -e "$LIBKRES" ]; then
@@ -285,6 +285,7 @@ ${CDEFS} ${LIBKRES} functions <<-EOF
kr_ta_clear
# DNSSEC
kr_dnssec_key_sep_flag
+ kr_dnssec_key_zonekey_flag
kr_dnssec_key_revoked
kr_dnssec_key_tag
kr_dnssec_key_match
diff --git a/daemon/lua/kres.lua b/daemon/lua/kres.lua
index 44434b4d..473d0828 100644
--- a/daemon/lua/kres.lua
+++ b/daemon/lua/kres.lua
@@ -231,6 +231,11 @@ local const_extended_error = {
NREACH_AUTH = 22,
NETWORK = 23,
INV_DATA = 24,
+ EXPIRED_INV = 25,
+ TOO_EARLY = 26,
+ NSEC3_ITERS = 27,
+ NONCONF_POLICY = 28,
+ SYNTHESIZED = 29,
}
-- Constant tables
diff --git a/daemon/main.c b/daemon/main.c
index d18f59f9..227beba4 100644
--- a/daemon/main.c
+++ b/daemon/main.c
@@ -138,24 +138,6 @@ end:
* Server operation.
*/
-static int fork_workers(int forks)
-{
- /* Fork subprocesses if requested */
- while (--forks > 0) {
- int pid = fork();
- if (pid < 0) {
- perror("[system] fork");
- return kr_error(errno);
- }
-
- /* Forked process */
- if (pid == 0) {
- return forks;
- }
- }
- return 0;
-}
-
static void help(int argc, char *argv[])
{
printf("Usage: %s [parameters] [rundir]\n", argv[0]);
@@ -174,7 +156,7 @@ static void help(int argc, char *argv[])
}
/** \return exit code for main() */
-static int run_worker(uv_loop_t *loop, bool leader, struct args *args)
+static int run_worker(uv_loop_t *loop, struct args *args)
{
/* Only some kinds of stdin work with uv_pipe_t.
* Otherwise we would abort() from libuv e.g. with </dev/null */
@@ -227,7 +209,6 @@ static void args_init(struct args *args)
{
memset(args, 0, sizeof(struct args));
/* Zeroed arrays are OK. */
- args->forks = 1;
args->control_fd = -1;
args->interactive = true;
args->quiet = false;
@@ -255,7 +236,6 @@ static int parse_args(int argc, char **argv, struct args *args)
{"addr", required_argument, 0, 'a'},
{"tls", required_argument, 0, 't'},
{"config", required_argument, 0, 'c'},
- {"forks", required_argument, 0, 'f'},
{"noninteractive", no_argument, 0, 'n'},
{"verbose", no_argument, 0, 'v'},
{"quiet", no_argument, 0, 'q'},
@@ -279,20 +259,6 @@ static int parse_args(int argc, char **argv, struct args *args)
kr_require(optarg);
array_push(args->config, optarg);
break;
- case 'f':
- kr_require(optarg);
- args->forks = strtol(optarg, NULL, 10);
- if (args->forks == 1) {
- kr_log_deprecate(SYSTEM, "use --noninteractive instead of --forks=1\n");
- } else {
- kr_log_deprecate(SYSTEM, "support for running multiple --forks will be removed\n");
- }
- if (args->forks <= 0) {
- kr_log_error(SYSTEM, "error '-f' requires a positive"
- " number, not '%s'\n", optarg);
- return EXIT_FAILURE;
- }
- /* fall through */
case 'n':
args->interactive = false;
break;
@@ -516,12 +482,6 @@ int main(int argc, char **argv)
(long)rlim.rlim_cur);
}
- /* Fork subprocesses if requested */
- int fork_id = fork_workers(the_args->forks);
- if (fork_id < 0) {
- return EXIT_FAILURE;
- }
-
kr_crypto_init();
network_init(uv_default_loop(), TCP_BACKLOG_DEFAULT);
@@ -640,7 +600,7 @@ int main(int argc, char **argv)
kr_rules_commit(true);
/* Run the event loop */
- ret = run_worker(loop, fork_id == 0, the_args);
+ ret = run_worker(loop, the_args);
cleanup:/* Cleanup. */
network_unregister();
diff --git a/daemon/network.c b/daemon/network.c
index 59d47809..5551b15a 100644
--- a/daemon/network.c
+++ b/daemon/network.c
@@ -78,6 +78,7 @@ void network_init(uv_loop_t *loop, int tcp_backlog)
the_network->tcp.tls_handshake_timeout = TLS_MAX_HANDSHAKE_TIME;
the_network->tcp.user_timeout = 1000; // 1s should be more than enough
the_network->tcp_backlog = tcp_backlog;
+ the_network->enable_connect_udp = true;
// On Linux, unset means some auto-tuning mechanism also depending on RAM,
// which might be OK default (together with the user_timeout above)
diff --git a/daemon/network.h b/daemon/network.h
index 83d1b6f4..9d50e46d 100644
--- a/daemon/network.h
+++ b/daemon/network.h
@@ -115,6 +115,13 @@ struct network {
struct {
int snd, rcv;
} listen_udp_buflens, listen_tcp_buflens;
+
+ /** Use uv_udp_connect as the transport method for UDP.
+ * Enabling this increases the total number of syscalls, with a variable
+ * impact on the time spent processing them, sometimes resulting in
+ * a slight improvement in syscall processing efficiency.
+ * Note: This does not necessarily lead to overall performance gains. */
+ bool enable_connect_udp;
};
/** Pointer to the singleton network state. NULL if not initialized. */
diff --git a/daemon/proxyv2.c b/daemon/proxyv2.c
index 48eff866..f1af0b57 100644
--- a/daemon/proxyv2.c
+++ b/daemon/proxyv2.c
@@ -308,10 +308,13 @@ static inline bool proxy_header_present(const void* buf, const ssize_t nread)
}
-struct pl_proxyv2_dgram_iter_data {
+struct pl_proxyv2_state {
struct protolayer_data h;
+ /** Storage for data parsed from PROXY header. */
struct proxy_result proxy;
- bool has_proxy;
+ /** Stream/TCP: Some data has already arrived and we are not expecting
+ * PROXY header anymore. */
+ bool had_data : 1;
};
static enum protolayer_iter_cb_result pl_proxyv2_dgram_unwrap(
@@ -324,7 +327,7 @@ static enum protolayer_iter_cb_result pl_proxyv2_dgram_unwrap(
}
struct session2 *s = ctx->session;
- struct pl_proxyv2_dgram_iter_data *udp = iter_data;
+ struct pl_proxyv2_state *proxy_state = iter_data;
char *data = ctx->payload.buffer.buf;
ssize_t data_len = ctx->payload.buffer.len;
@@ -336,7 +339,7 @@ static enum protolayer_iter_cb_result pl_proxyv2_dgram_unwrap(
return protolayer_break(ctx, kr_error(EPERM));
}
- ssize_t trimmed = proxy_process_header(&udp->proxy, data, data_len);
+ ssize_t trimmed = proxy_process_header(&proxy_state->proxy, data, data_len);
if (trimmed == KNOT_EMALF) {
if (kr_log_is_debug(IO, NULL)) {
kr_log_debug(IO, "<= ignoring malformed PROXYv2 UDP "
@@ -353,12 +356,10 @@ static enum protolayer_iter_cb_result pl_proxyv2_dgram_unwrap(
return protolayer_break(ctx, kr_error(EINVAL));
}
- if (udp->proxy.command == PROXY2_CMD_PROXY && udp->proxy.family != AF_UNSPEC) {
- udp->has_proxy = true;
-
- comm->src_addr = &udp->proxy.src_addr.ip;
- comm->dst_addr = &udp->proxy.dst_addr.ip;
- comm->proxy = &udp->proxy;
+ if (proxy_state->proxy.command == PROXY2_CMD_PROXY && proxy_state->proxy.family != AF_UNSPEC) {
+ comm->src_addr = &proxy_state->proxy.src_addr.ip;
+ comm->dst_addr = &proxy_state->proxy.dst_addr.ip;
+ comm->proxy = &proxy_state->proxy;
if (kr_log_is_debug(IO, NULL)) {
kr_log_debug(IO, "<= UDP query from '%s'\n",
@@ -375,19 +376,11 @@ static enum protolayer_iter_cb_result pl_proxyv2_dgram_unwrap(
return protolayer_continue(ctx);
}
-
-struct pl_proxyv2_stream_sess_data {
- struct protolayer_data h;
- struct proxy_result proxy;
- bool had_data : 1;
- bool has_proxy : 1;
-};
-
static enum protolayer_iter_cb_result pl_proxyv2_stream_unwrap(
void *sess_data, void *iter_data, struct protolayer_iter_ctx *ctx)
{
struct session2 *s = ctx->session;
- struct pl_proxyv2_stream_sess_data *tcp = sess_data;
+ struct pl_proxyv2_state *proxy_state = sess_data;
struct sockaddr *peer = session2_get_peer(s);
if (kr_fails_assert(ctx->payload.type == PROTOLAYER_PAYLOAD_WIRE_BUF)) {
@@ -398,7 +391,7 @@ static enum protolayer_iter_cb_result pl_proxyv2_stream_unwrap(
char *data = wire_buf_data(ctx->payload.wire_buf); /* layer's or session's wirebuf */
ssize_t data_len = wire_buf_data_length(ctx->payload.wire_buf);
struct comm_info *comm = ctx->comm;
- if (!s->outgoing && !tcp->had_data && proxy_header_present(data, data_len)) {
+ if (!s->outgoing && !proxy_state->had_data && proxy_header_present(data, data_len)) {
if (!proxy_allowed(comm->src_addr)) {
if (kr_log_is_debug(IO, NULL)) {
kr_log_debug(IO, "<= connection to '%s': PROXYv2 not allowed "
@@ -409,7 +402,7 @@ static enum protolayer_iter_cb_result pl_proxyv2_stream_unwrap(
return protolayer_break(ctx, kr_error(ECONNRESET));
}
- ssize_t trimmed = proxy_process_header(&tcp->proxy, data, data_len);
+ ssize_t trimmed = proxy_process_header(&proxy_state->proxy, data, data_len);
if (trimmed < 0) {
if (kr_log_is_debug(IO, NULL)) {
if (trimmed == KNOT_EMALF) {
@@ -429,9 +422,10 @@ static enum protolayer_iter_cb_result pl_proxyv2_stream_unwrap(
return protolayer_break(ctx, kr_error(ECONNRESET));
}
- if (tcp->proxy.command != PROXY2_CMD_LOCAL && tcp->proxy.family != AF_UNSPEC) {
- comm->src_addr = &tcp->proxy.src_addr.ip;
- comm->dst_addr = &tcp->proxy.dst_addr.ip;
+ if (proxy_state->proxy.command != PROXY2_CMD_LOCAL && proxy_state->proxy.family != AF_UNSPEC) {
+ comm->src_addr = &proxy_state->proxy.src_addr.ip;
+ comm->dst_addr = &proxy_state->proxy.dst_addr.ip;
+ comm->proxy = &proxy_state->proxy;
if (kr_log_is_debug(IO, NULL)) {
kr_log_debug(IO, "<= TCP stream from '%s'\n",
@@ -444,7 +438,7 @@ static enum protolayer_iter_cb_result pl_proxyv2_stream_unwrap(
wire_buf_trim(ctx->payload.wire_buf, trimmed);
}
- tcp->had_data = true;
+ proxy_state->had_data = true;
return protolayer_continue(ctx);
}
@@ -452,12 +446,12 @@ __attribute__((constructor))
static void proxy_protolayers_init(void)
{
protolayer_globals[PROTOLAYER_TYPE_PROXYV2_DGRAM] = (struct protolayer_globals){
- .iter_size = sizeof(struct pl_proxyv2_dgram_iter_data),
+ .iter_size = sizeof(struct pl_proxyv2_state),
.unwrap = pl_proxyv2_dgram_unwrap,
};
protolayer_globals[PROTOLAYER_TYPE_PROXYV2_STREAM] = (struct protolayer_globals){
- .sess_size = sizeof(struct pl_proxyv2_stream_sess_data),
+ .sess_size = sizeof(struct pl_proxyv2_state),
.unwrap = pl_proxyv2_stream_unwrap,
};
}
diff --git a/daemon/proxyv2.test/kresd_config.j2 b/daemon/proxyv2.test/kresd_config.j2
index e7cbf63a..8023d409 100644
--- a/daemon/proxyv2.test/kresd_config.j2
+++ b/daemon/proxyv2.test/kresd_config.j2
@@ -2,7 +2,6 @@
{% raw %}
modules.load('view < policy')
view:addr("127.127.0.0", policy.suffix(policy.DENY_MSG("addr 127.127.0.0 matched com"),{"\3com\0"}))
--- policy.add(policy.all(policy.FORWARD('1.2.3.4')))
-- make sure DNSSEC is turned off for tests
trust_anchors.remove('.')
diff --git a/daemon/session2.c b/daemon/session2.c
index f9be09f2..6761127a 100644
--- a/daemon/session2.c
+++ b/daemon/session2.c
@@ -631,18 +631,21 @@ static int session2_submit(
if (had_comm_param) {
struct comm_addr_storage *addrst = &ctx->comm_addr_storage;
if (comm->src_addr) {
- memcpy(&addrst->src_addr.ip, comm->src_addr,
- kr_sockaddr_len(comm->src_addr));
+ int len = kr_sockaddr_len(comm->src_addr);
+ kr_require(len > 0 && len <= sizeof(union kr_sockaddr));
+ memcpy(&addrst->src_addr, comm->src_addr, len);
ctx->comm_storage.src_addr = &addrst->src_addr.ip;
}
if (comm->comm_addr) {
- memcpy(&addrst->comm_addr.ip, comm->comm_addr,
- kr_sockaddr_len(comm->comm_addr));
+ int len = kr_sockaddr_len(comm->comm_addr);
+ kr_require(len > 0 && len <= sizeof(union kr_sockaddr));
+ memcpy(&addrst->comm_addr, comm->comm_addr, len);
ctx->comm_storage.comm_addr = &addrst->comm_addr.ip;
}
if (comm->dst_addr) {
- memcpy(&addrst->dst_addr.ip, comm->dst_addr,
- kr_sockaddr_len(comm->dst_addr));
+ int len = kr_sockaddr_len(comm->dst_addr);
+ kr_require(len > 0 && len <= sizeof(union kr_sockaddr));
+ memcpy(&addrst->dst_addr, comm->dst_addr, len);
ctx->comm_storage.dst_addr = &addrst->dst_addr.ip;
}
ctx->comm = &ctx->comm_storage;
@@ -1217,11 +1220,12 @@ int session2_unwrap_after(struct session2 *s, enum protolayer_type protocol,
const struct comm_info *comm,
protolayer_finished_cb cb, void *baton)
{
- ssize_t layer_ix = session2_get_protocol(s, protocol) + 1;
- if (layer_ix < 0)
- return layer_ix;
+ ssize_t layer_ix = session2_get_protocol(s, protocol);
+ bool ok = layer_ix >= 0 && layer_ix + 1 < protolayer_grps[s->proto].num_layers;
+ if (kr_fails_assert(ok)) // not found or "last layer"
+ return kr_error(EINVAL);
return session2_submit(s, PROTOLAYER_UNWRAP,
- layer_ix, payload, comm, cb, baton);
+ layer_ix + 1, payload, comm, cb, baton);
}
int session2_wrap(struct session2 *s, struct protolayer_payload payload,
@@ -1238,10 +1242,10 @@ int session2_wrap_after(struct session2 *s, enum protolayer_type protocol,
const struct comm_info *comm,
protolayer_finished_cb cb, void *baton)
{
- ssize_t layer_ix = session2_get_protocol(s, protocol) - 1;
- if (layer_ix < 0)
- return layer_ix;
- return session2_submit(s, PROTOLAYER_WRAP, layer_ix,
+ ssize_t layer_ix = session2_get_protocol(s, protocol);
+ if (kr_fails_assert(layer_ix > 0)) // not found or "last layer"
+ return kr_error(EINVAL);
+ return session2_submit(s, PROTOLAYER_WRAP, layer_ix - 1,
payload, comm, cb, baton);
}
@@ -1471,8 +1475,10 @@ static int session2_transport_pushv(struct session2 *s,
ctx);
return kr_ok();
} else {
- int ret = uv_udp_try_send((uv_udp_t*)handle,
- (uv_buf_t *)iov, iovcnt, comm->comm_addr);
+ int ret = uv_udp_try_send((uv_udp_t*)handle, (uv_buf_t *)iov, iovcnt,
+ the_network->enable_connect_udp ? NULL : comm->comm_addr);
+ if (ret > 0) // equals buffer size, only confuses us
+ ret = 0;
if (ret == UV_EAGAIN) {
ret = kr_error(ENOBUFS);
session2_event(s, PROTOLAYER_EVENT_OS_BUFFER_FULL, NULL);
@@ -1504,6 +1510,8 @@ static int session2_transport_pushv(struct session2 *s,
ret = kr_error(ENOBUFS);
session2_event(s, PROTOLAYER_EVENT_OS_BUFFER_FULL, NULL);
}
+ else if (ret > 0) // iovec_sum was checked, let's not get confused anymore
+ ret = 0;
if (false && ret == UV_EAGAIN) {
uv_write_t *req = malloc(sizeof(*req));
diff --git a/daemon/session2.h b/daemon/session2.h
index 50fb697b..4ea42c30 100644
--- a/daemon/session2.h
+++ b/daemon/session2.h
@@ -2,6 +2,8 @@
* SPDX-License-Identifier: GPL-3.0-or-later
*/
+/* High-level explanation of layered protocols: ./layered-protocols.rst */
+
/* HINT: If you are looking to implement support for a new transport protocol,
* start with the doc comment of the `PROTOLAYER_TYPE_MAP` macro and
* continue from there. */
@@ -582,7 +584,12 @@ enum protolayer_event_cb_result {
*
* When `PROTOLAYER_EVENT_PROPAGATE` is returned, iteration over the sequence
* of layers continues. When `PROTOLAYER_EVENT_CONSUME` is returned, iteration
- * stops. */
+ * stops.
+ *
+ * **IMPORTANT:** A well-behaved layer will **ALWAYS** propagate events it knows
+ * nothing about. Only ever consume events you actually have good reason to
+ * consume (like TLS consumes `CONNECT` from TCP, because it needs to perform
+ * its own handshake first). */
typedef enum protolayer_event_cb_result (*protolayer_event_cb)(
enum protolayer_event_type event, void **baton,
struct session2 *session, void *sess_data);
diff --git a/daemon/tls.c b/daemon/tls.c
index 173cf3c3..231bff2d 100644
--- a/daemon/tls.c
+++ b/daemon/tls.c
@@ -340,7 +340,6 @@ static void tls_close(struct pl_tls_sess_data *tls, struct session2 *session, bo
}
}
-#if TLS_CAN_USE_PINS
/*
DNS-over-TLS Out of band key-pinned authentication profile uses the
same form of pins as HPKP:
@@ -384,11 +383,11 @@ static int get_oob_key_pin(gnutls_x509_crt_t crt, char *outchar, ssize_t outchar
err = kr_base64_encode((uint8_t *)raw_pin, sizeof(raw_pin),
(uint8_t *)outchar, outchar_len);
if (err >= 0 && err < outchar_len) {
- err = GNUTLS_E_SUCCESS;
outchar[err] = '\0'; /* kr_base64_encode() doesn't do it */
+ err = GNUTLS_E_SUCCESS;
} else if (kr_fails_assert(err < 0)) {
- err = kr_error(ENOSPC); /* base64 fits but '\0' doesn't */
outchar[outchar_len - 1] = '\0';
+ err = kr_error(ENOSPC); /* base64 fits but '\0' doesn't */
}
leave:
gnutls_free(datum.data);
@@ -428,12 +427,6 @@ void tls_credentials_log_pins(struct tls_credentials *tls_credentials)
gnutls_free(certs);
}
}
-#else
-void tls_credentials_log_pins(struct tls_credentials *tls_credentials)
-{
- kr_log_debug(TLS, "could not calculate RFC 7858 OOB key-pin; GnuTLS 3.4.0+ required\n");
-}
-#endif
static int str_replace(char **where_ptr, const char *with)
{
@@ -715,6 +708,41 @@ int tls_client_param_remove(tls_client_params_t *params, const struct sockaddr *
return kr_ok();
}
+static void log_all_pins(tls_client_param_t *params)
+{
+ uint8_t buffer[TLS_SHA256_BASE64_BUFLEN + 1];
+ for (int i = 0; i < params->pins.len; i++) {
+ int len = kr_base64_encode(params->pins.at[i], TLS_SHA256_RAW_LEN,
+ buffer, TLS_SHA256_BASE64_BUFLEN);
+ if (!kr_fails_assert(len > 0)) {
+ buffer[len] = '\0';
+ kr_log_error(TLSCLIENT, "pin no. %d: %s\n", i, buffer);
+ }
+ }
+}
+
+static void log_all_certificates(const unsigned int cert_list_size,
+ const gnutls_datum_t *cert_list)
+{
+ for (int i = 0; i < cert_list_size; i++) {
+ gnutls_x509_crt_t cert;
+ if (gnutls_x509_crt_init(&cert) != GNUTLS_E_SUCCESS) {
+ return;
+ }
+ if (gnutls_x509_crt_import(cert, &cert_list[i], GNUTLS_X509_FMT_DER) != GNUTLS_E_SUCCESS) {
+ gnutls_x509_crt_deinit(cert);
+ return;
+ }
+ char cert_pin[TLS_SHA256_BASE64_BUFLEN];
+ if (get_oob_key_pin(cert, cert_pin, sizeof(cert_pin), false) != GNUTLS_E_SUCCESS) {
+ gnutls_x509_crt_deinit(cert);
+ return;
+ }
+ kr_log_error(TLSCLIENT, "Certificate: %s\n", cert_pin);
+ gnutls_x509_crt_deinit(cert);
+ }
+}
+
/**
* Verify that at least one certificate in the certificate chain matches
* at least one certificate pin in the non-empty params->pins array.
@@ -726,7 +754,6 @@ static int client_verify_pin(const unsigned int cert_list_size,
{
if (kr_fails_assert(params->pins.len > 0))
return GNUTLS_E_CERTIFICATE_ERROR;
-#if TLS_CAN_USE_PINS
for (int i = 0; i < cert_list_size; i++) {
gnutls_x509_crt_t cert;
int ret = gnutls_x509_crt_init(&cert);
@@ -740,20 +767,6 @@ static int client_verify_pin(const unsigned int cert_list_size,
return ret;
}
- #ifdef DEBUG
- if (kr_log_is_debug(TLS, NULL)) {
- char pin_base64[TLS_SHA256_BASE64_BUFLEN];
- /* DEBUG: additionally compute and print the base64 pin.
- * Not very efficient, but that's OK for DEBUG. */
- ret = get_oob_key_pin(cert, pin_base64, sizeof(pin_base64), false);
- if (ret == GNUTLS_E_SUCCESS) {
- VERBOSE_MSG(true, "received pin: %s\n", pin_base64);
- } else {
- VERBOSE_MSG(true, "failed to convert received pin\n");
- /* Now we hope that `ret` below can't differ. */
- }
- }
- #endif
char cert_pin[TLS_SHA256_RAW_LEN];
/* Get raw pin and compare. */
ret = get_oob_key_pin(cert, cert_pin, sizeof(cert_pin), true);
@@ -774,13 +787,9 @@ static int client_verify_pin(const unsigned int cert_list_size,
kr_log_error(TLSCLIENT, "no pin matched: %zu pins * %d certificates\n",
params->pins.len, cert_list_size);
+ log_all_pins(params);
+ log_all_certificates(cert_list_size, cert_list);
return GNUTLS_E_CERTIFICATE_ERROR;
-
-#else /* TLS_CAN_USE_PINS */
- kr_log_error(TLSCLIENT, "internal inconsistency: TLS_CAN_USE_PINS\n");
- kr_assert(false);
- return GNUTLS_E_CERTIFICATE_ERROR;
-#endif
}
/**
diff --git a/daemon/tls.h b/daemon/tls.h
index ff1bbea2..b24b6165 100644
--- a/daemon/tls.h
+++ b/daemon/tls.h
@@ -51,12 +51,6 @@ struct tls_credentials {
/** Required buffer length for pin_sha256, including the zero terminator. */
#define TLS_SHA256_BASE64_BUFLEN (((TLS_SHA256_RAW_LEN * 8 + 4) / 6) + 3 + 1)
-#if GNUTLS_VERSION_NUMBER >= 0x030400
- #define TLS_CAN_USE_PINS 1
-#else
- #define TLS_CAN_USE_PINS 0
-#endif
-
/** TLS authentication parameters for a single address-port pair. */
typedef struct {
diff --git a/daemon/tls_session_ticket-srv.c b/daemon/tls_session_ticket-srv.c
index 26d41862..ed8a5005 100644
--- a/daemon/tls_session_ticket-srv.c
+++ b/daemon/tls_session_ticket-srv.c
@@ -33,12 +33,6 @@
#define TST_HASH abort()
#endif
-#if GNUTLS_VERSION_NUMBER < 0x030400
- /* It's of little use anyway. We may get the secret through lua,
- * which creates a copy outside of our control. */
- #define gnutls_memset memset
-#endif
-
/** Fields are internal to tst_key_* functions. */
typedef struct tls_session_ticket_ctx {
uv_timer_t timer; /**< timer for rotation of the key */
diff --git a/daemon/worker.c b/daemon/worker.c
index 6217dc8a..fe303802 100644
--- a/daemon/worker.c
+++ b/daemon/worker.c
@@ -831,6 +831,17 @@ static int transmit(struct qr_task *task)
struct comm_info out_comm = {
.comm_addr = (struct sockaddr *)choice
};
+
+ if (the_network->enable_connect_udp && session->outgoing && !session->stream) {
+ uv_udp_t *udp = (uv_udp_t *)session2_get_handle(session);
+ int connect_tries = 3;
+
+ do {
+ ret = uv_udp_connect(udp, out_comm.comm_addr);
+ } while (ret == UV_EADDRINUSE && --connect_tries > 0);
+ if (ret < 0)
+ kr_log_error(IO, "Failed to establish udp connection: %s\n", uv_strerror(ret));
+ }
ret = qr_task_send(task, session, &out_comm, task->pktbuf);
if (ret) {
session2_close(session);
@@ -2296,9 +2307,6 @@ int worker_init(void)
uv_loop_t *loop = uv_default_loop();
the_worker->loop = loop;
- static const int worker_count = 1;
- the_worker->count = worker_count;
-
/* Register table for worker per-request variables */
struct lua_State *L = the_engine->L;
lua_newtable(L);
@@ -2334,8 +2342,6 @@ int worker_init(void)
lua_pushnumber(L, pid);
lua_setfield(L, -2, "pid");
- lua_pushnumber(L, worker_count);
- lua_setfield(L, -2, "count");
char cwd[PATH_MAX];
get_workdir(cwd, sizeof(cwd));
diff --git a/distro/config/apkg.toml b/distro/config/apkg.toml
index 19d4d8be..43a24671 100644
--- a/distro/config/apkg.toml
+++ b/distro/config/apkg.toml
@@ -7,7 +7,7 @@ make_archive_script = "scripts/make-archive.sh"
# needed for get-archive
archive_url = "https://secure.nic.cz/files/knot-resolver/knot-resolver-{{ version }}.tar.xz"
signature_url = "https://secure.nic.cz/files/knot-resolver/knot-resolver-{{ version }}.tar.xz.asc"
-version_script = "scripts/upstream-version.sh"
+version_script = "scripts/lib/upstream-version.sh"
[apkg]
compat = 4
diff --git a/distro/pkg/arch/PKGBUILD b/distro/pkg/arch/PKGBUILD
index 422da695..ebbd164d 100644
--- a/distro/pkg/arch/PKGBUILD
+++ b/distro/pkg/arch/PKGBUILD
@@ -63,12 +63,7 @@ build() {
-D malloc=jemalloc \
-D unit_tests=enabled
ninja -C build
- pushd build/python
python -Pm build --wheel --no-isolation
- popd
- pushd manager
- python -Pm build --wheel --no-isolation
- popd
}
check() {
@@ -87,17 +82,8 @@ package() {
# remove modules with missing dependencies
rm "${pkgdir}/usr/lib/knot-resolver/kres_modules/etcd.lua"
- # install knot-resolver metadata Python module
- pushd build/python
- python -Pm installer --destdir="$pkgdir" dist/*.whl
- popd
-
- # install knot-resolver-manager
- pushd manager
+ # install knot-resolver Python module
python -Pm installer --destdir="$pkgdir" dist/*.whl
- install -m 644 -D etc/knot-resolver/config.yaml ${pkgdir}/etc/knot-resolver/config.yaml
- install -m 644 -D shell-completion/client.bash ${pkgdir}/usr/share/bash-completion/completions/kresctl
- install -m 644 -D shell-completion/client.fish ${pkgdir}/usr/share/fish/completions/kresctl.fish
- popd
+ install -m 644 -D etc/config/config.yaml ${pkgdir}/etc/knot-resolver/config.yaml
}
diff --git a/distro/pkg/deb/knot-resolver6.install b/distro/pkg/deb/knot-resolver6.install
index 29d23032..7b9d0c41 100644
--- a/distro/pkg/deb/knot-resolver6.install
+++ b/distro/pkg/deb/knot-resolver6.install
@@ -34,5 +34,3 @@ usr/lib/systemd/system/knot-resolver.service
usr/lib/tmpfiles.d/knot-resolver.conf
usr/sbin/kres-cache-gc
usr/sbin/kresd
-usr/share/bash-completion/completions/kresctl
-usr/share/fish/completions/kresctl.fish
diff --git a/distro/pkg/deb/rules b/distro/pkg/deb/rules
index 037d7663..66c71c3e 100755
--- a/distro/pkg/deb/rules
+++ b/distro/pkg/deb/rules
@@ -19,7 +19,7 @@ include /usr/share/dpkg/default.mk
%:
- dh $@ --with python3
+ dh $@ --with python3 --buildsystem pybuild
override_dh_auto_build:
meson build_deb \
@@ -35,20 +35,12 @@ override_dh_auto_build:
-Dc_args="$${CFLAGS}" \
-Dc_link_args="$${LDFLAGS}"
ninja -v -C build_deb
- PYBUILD_NAME=knot_resolver PYBUILD_DESTDIR="$${PYKRES_DESTDIR}" \
- dh_auto_build --buildsystem=pybuild --sourcedirectory build_deb/python
- PYBUILD_NAME=knot_resoolver_manager PYBUILD_DESTDIR="$${PYKRES_DESTDIR}" \
- dh_auto_build --buildsystem=pybuild --sourcedirectory manager
+ PYBUILD_NAME=knot_resolver PYBUILD_DESTDIR="$${PYKRES_DESTDIR}" dh_auto_build
override_dh_auto_install:
DESTDIR="$(shell pwd)/debian/tmp" ninja -v -C build_deb install
- PYBUILD_NAME=knot_resolver PYBUILD_DESTDIR="$${PYKRES_DESTDIR}" \
- dh_auto_install --buildsystem=pybuild --sourcedirectory build_deb/python
- PYBUILD_NAME=knot_resolver_manager PYBUILD_DESTDIR="$${PYKRES_DESTDIR}" \
- dh_auto_install --buildsystem=pybuild --sourcedirectory manager
- install -m 644 -D manager/etc/knot-resolver/config.yaml debian/tmp/etc/knot-resolver/config.yaml
- install -m 644 -D manager/shell-completion/client.bash debian/tmp/usr/share/bash-completion/completions/kresctl
- install -m 644 -D manager/shell-completion/client.fish debian/tmp/usr/share/fish/completions/kresctl.fish
+ PYBUILD_NAME=knot_resolver PYBUILD_DESTDIR="$${PYKRES_DESTDIR}" dh_auto_install
+ install -m 644 -D etc/config/config.yaml debian/tmp/etc/knot-resolver/config.yaml
override_dh_auto_test:
meson test -C build_deb
diff --git a/distro/pkg/rpm/knot-resolver.spec b/distro/pkg/rpm/knot-resolver.spec
index de962e8c..ca8602ff 100644
--- a/distro/pkg/rpm/knot-resolver.spec
+++ b/distro/pkg/rpm/knot-resolver.spec
@@ -181,13 +181,7 @@ CFLAGS="%{optflags}" LDFLAGS="%{?__global_ldflags}" meson build_rpm \
%{NINJA} -v -C build_rpm
-pushd build_rpm/python
%py3_build
-popd
-
-pushd manager
-%py3_build
-popd
%install
DESTDIR="${RPM_BUILD_ROOT}" %{NINJA} -v -C build_rpm install
@@ -215,18 +209,10 @@ install -m 755 -d %{buildroot}/%{_pkgdocdir}
mv %{buildroot}/%{_datadir}/doc/%{name}/* %{buildroot}/%{_pkgdocdir}/
%endif
-pushd build_rpm/python
-%py3_install
-popd
-
-# install knot-resolver-manager
-pushd manager
+# install knot_resolver python module
%py3_install
-install -m 644 -D etc/knot-resolver/config.yaml %{buildroot}%{_sysconfdir}/knot-resolver/config.yaml
-install -m 644 -D shell-completion/client.bash %{buildroot}%{_datarootdir}/bash-completion/completions/kresctl
-install -m 644 -D shell-completion/client.fish %{buildroot}%{_datarootdir}/fish/completions/kresctl.fish
-popd
+install -m 644 -D etc/config/config.yaml %{buildroot}%{_sysconfdir}/knot-resolver/config.yaml
%pre
getent group knot-resolver >/dev/null || groupadd -r knot-resolver
@@ -306,18 +292,9 @@ getent passwd knot-resolver >/dev/null || useradd -r -g knot-resolver -d %{_sysc
%{_libdir}/knot-resolver/kres_modules/view.lua
%{_libdir}/knot-resolver/kres_modules/watchdog.lua
%{_libdir}/knot-resolver/kres_modules/workarounds.lua
-%{python3_sitelib}/knot_resolver.py
-%{python3_sitelib}/knot_resolver-*
-%{python3_sitearch}/knot_resolver_manager*
-%if 0%{?suse_version}
-%pycache_only %{python3_sitelib}/__pycache__/knot_resolver.*
-%else
-%{python3_sitelib}/__pycache__/knot_resolver.*
-%endif
+%{python3_sitearch}/knot_resolver*
%{_mandir}/man8/kresd.8.gz
%{_mandir}/man8/kresctl.8.gz
-%{_datarootdir}/bash-completion/completions/kresctl
-%{_datarootdir}/fish/completions/kresctl.fish
%files devel
%{_includedir}/libkres
diff --git a/distro/tests/extra/all/control b/distro/tests/extra/all/control
index b13cc27d..1130b04d 100644
--- a/distro/tests/extra/all/control
+++ b/distro/tests/extra/all/control
@@ -1,2 +1,41 @@
-{# This adds all tests for manager's packaging #}
-{% include 'manager/tests/packaging/control' %}
+{# Test that all packages are installed #}
+Tests: dependencies.py
+Tests-Directory: tests/packaging/
+
+
+{# Test that kresctl command exists and is in $PATH #}
+Tests: kresctl.sh
+Tests-Directory: tests/packaging
+
+
+{# Test that knot-resolver command exists and is in $PATH #}
+Tests: knot-resolver.sh
+Tests-Directory: tests/packaging
+
+
+{# Tests that the manager can be started with default config and it resolves some domains #}
+Tests: systemd_service.sh
+Tests-Directory: tests/packaging
+Restrictions: needs-root
+{% if distro.match('fedora') -%}
+Depends: knot-utils, jq, curl, procps
+{% elif distro.match('debian') or distro.match('ubuntu') -%}
+Depends: knot-dnsutils, jq, curl, procps
+{% elif distro.match('arch') -%}
+Depends: knot, jq, curl
+{% elif distro.match('rocky', 'centos') -%}
+Depends: knot-utils, jq, curl
+{% elif distro.match('almalinux') -%}
+Depends: knot-utils, jq, curl-minimal, procps
+{% elif distro.match('opensuse') -%}
+Depends: knot-utils, jq, curl
+{% else -%}
+Depends: unsupported-distro-this-package-does-not-exist-and-the-test-should-fail
+{%- endif %}
+
+
+Tests: manpage.sh
+Tests-Directory: tests/packaging
+{% if distro.match('fedora') or distro.match('rocky') or distro.match('opensuse') -%}
+Depends: man
+{%- endif %}
diff --git a/doc/_static/config.schema.json b/doc/_static/config.schema.json
new file mode 100644
index 00000000..036a3ce0
--- /dev/null
+++ b/doc/_static/config.schema.json
@@ -0,0 +1,1703 @@
+{
+ "$schema": "https://json-schema.org/draft/2020-12/schema",
+ "$id": "https://www.knot-resolver.cz/documentation/v6.0.8/_static/config.schema.json",
+ "title": "Knot Resolver configuration JSON schema",
+ "description": "Version Knot Resolver 6.0.8",
+ "type": "object",
+ "properties": {
+ "version": {
+ "type": "integer",
+ "description": "Version of the configuration schema. By default it is the latest supported by the resolver, but couple of versions back are be supported as well.",
+ "default": 1
+ },
+ "nsid": {
+ "type": [
+ "string",
+ "null"
+ ],
+ "description": "Name Server Identifier (RFC 5001) which allows DNS clients to request resolver to send back its NSID along with the reply to a DNS request.",
+ "default": null
+ },
+ "hostname": {
+ "type": [
+ "string",
+ "null"
+ ],
+ "description": "Internal DNS resolver hostname. Default is machine hostname.",
+ "default": null
+ },
+ "rundir": {
+ "type": "string",
+ "description": "Directory where the resolver can create files and which will be it's cwd.",
+ "default": "/run/knot-resolver"
+ },
+ "workers": {
+ "anyOf": [
+ {
+ "type": "string",
+ "enum": [
+ "auto"
+ ]
+ },
+ {
+ "type": "integer",
+ "minimum": 1
+ }
+ ],
+ "description": "The number of running kresd (Knot Resolver daemon) workers. If set to 'auto', it is equal to number of CPUs available.",
+ "default": 1
+ },
+ "max-workers": {
+ "type": "integer",
+ "minimum": 1,
+ "description": "The maximum number of workers allowed. Cannot be changed in runtime.",
+ "default": 256
+ },
+ "management": {
+ "description": "Configuration of management HTTP API.",
+ "type": "object",
+ "properties": {
+ "unix-socket": {
+ "type": [
+ "string",
+ "null"
+ ],
+ "description": "Path to unix domain socket to listen to.",
+ "default": null
+ },
+ "interface": {
+ "type": [
+ "string",
+ "null"
+ ],
+ "description": "IP address and port number to listen to.",
+ "default": null
+ }
+ },
+ "default": {
+ "unix_socket": "/run/knot-resolver/kres-api.sock",
+ "interface": null
+ }
+ },
+ "webmgmt": {
+ "description": "Configuration of legacy web management endpoint.",
+ "type": [
+ "object",
+ "null"
+ ],
+ "properties": {
+ "unix-socket": {
+ "type": [
+ "string",
+ "null"
+ ],
+ "description": "Path to unix domain socket to listen to.",
+ "default": null
+ },
+ "interface": {
+ "type": [
+ "string",
+ "null"
+ ],
+ "description": "IP address or interface name with port number to listen to.",
+ "default": null
+ },
+ "tls": {
+ "type": "boolean",
+ "description": "Enable/disable TLS.",
+ "default": false
+ },
+ "cert-file": {
+ "type": [
+ "string",
+ "null"
+ ],
+ "description": "Path to certificate file.",
+ "default": null
+ },
+ "key-file": {
+ "type": [
+ "string",
+ "null"
+ ],
+ "description": "Path to certificate key.",
+ "default": null
+ }
+ },
+ "default": null
+ },
+ "options": {
+ "description": "Fine-tuning global parameters of DNS resolver operation.",
+ "type": "object",
+ "properties": {
+ "glue-checking": {
+ "type": "string",
+ "enum": [
+ "normal",
+ "strict",
+ "permissive"
+ ],
+ "description": "Glue records scrictness checking level.",
+ "default": "normal"
+ },
+ "minimize": {
+ "type": "boolean",
+ "description": "Send minimum amount of information in recursive queries to enhance privacy.",
+ "default": true
+ },
+ "query-loopback": {
+ "type": "boolean",
+ "description": "Permits queries to loopback addresses.",
+ "default": false
+ },
+ "reorder-rrset": {
+ "type": "boolean",
+ "description": "Controls whether resource records within a RRSet are reordered each time it is served from the cache.",
+ "default": true
+ },
+ "query-case-randomization": {
+ "type": "boolean",
+ "description": "Randomize Query Character Case.",
+ "default": true
+ },
+ "priming": {
+ "type": "boolean",
+ "description": "Initializing DNS resolver cache with Priming Queries (RFC 8109)",
+ "default": true
+ },
+ "rebinding-protection": {
+ "type": "boolean",
+ "description": "Protection against DNS Rebinding attack.",
+ "default": false
+ },
+ "refuse-no-rd": {
+ "type": "boolean",
+ "description": "Queries without RD (recursion desired) bit set in query are answered with REFUSED.",
+ "default": true
+ },
+ "time-jump-detection": {
+ "type": "boolean",
+ "description": "Detection of difference between local system time and expiration time bounds in DNSSEC signatures for '. NS' records.",
+ "default": true
+ },
+ "violators-workarounds": {
+ "type": "boolean",
+ "description": "Workarounds for known DNS protocol violators.",
+ "default": false
+ },
+ "serve-stale": {
+ "type": "boolean",
+ "description": "Allows using timed-out records in case DNS resolver is unable to contact upstream servers.",
+ "default": false
+ }
+ },
+ "default": {
+ "glue_checking": "normal",
+ "minimize": true,
+ "query_loopback": false,
+ "reorder_rrset": true,
+ "query_case_randomization": true,
+ "priming": true,
+ "rebinding_protection": false,
+ "refuse_no_rd": true,
+ "time_jump_detection": true,
+ "violators_workarounds": false,
+ "serve_stale": false
+ }
+ },
+ "network": {
+ "description": "Network connections and protocols configuration.",
+ "type": "object",
+ "properties": {
+ "do-ipv4": {
+ "type": "boolean",
+ "description": "Enable/disable using IPv4 for contacting upstream nameservers.",
+ "default": true
+ },
+ "do-ipv6": {
+ "type": "boolean",
+ "description": "Enable/disable using IPv6 for contacting upstream nameservers.",
+ "default": true
+ },
+ "out-interface-v4": {
+ "type": [
+ "string",
+ "null"
+ ],
+ "description": "IPv4 address used to perform queries. Not set by default, which lets the OS choose any address.",
+ "default": null
+ },
+ "out-interface-v6": {
+ "type": [
+ "string",
+ "null"
+ ],
+ "description": "IPv6 address used to perform queries. Not set by default, which lets the OS choose any address.",
+ "default": null
+ },
+ "tcp-pipeline": {
+ "type": "integer",
+ "minimum": 0,
+ "maximum": 65535,
+ "description": "TCP pipeline limit. The number of outstanding queries that a single client connection can make in parallel.",
+ "default": 100
+ },
+ "edns-tcp-keepalive": {
+ "type": "boolean",
+ "description": "Allows clients to discover the connection timeout. (RFC 7828)",
+ "default": true
+ },
+ "edns-buffer-size": {
+ "description": "Maximum EDNS payload size advertised in DNS packets. Different values can be configured for communication downstream (towards clients) and upstream (towards other DNS servers).",
+ "type": "object",
+ "properties": {
+ "upstream": {
+ "type": "string",
+ "pattern": "^(\\d+)(B|K|M|G)$",
+ "description": "Maximum EDNS upstream (towards other DNS servers) payload size.",
+ "default": "1232B"
+ },
+ "downstream": {
+ "type": "string",
+ "pattern": "^(\\d+)(B|K|M|G)$",
+ "description": "Maximum EDNS downstream (towards clients) payload size for communication.",
+ "default": "1232B"
+ }
+ },
+ "default": {
+ "upstream": "1232B",
+ "downstream": "1232B"
+ }
+ },
+ "address-renumbering": {
+ "type": [
+ "array",
+ "null"
+ ],
+ "items": {
+ "description": "Renumbers addresses in answers to different address space.",
+ "type": "object",
+ "properties": {
+ "source": {
+ "type": "string",
+ "description": "Source subnet."
+ },
+ "destination": {
+ "anyOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "description": "Destination address prefix."
+ }
+ }
+ },
+ "description": "Renumbers addresses in answers to different address space.",
+ "default": null
+ },
+ "tls": {
+ "description": "TLS configuration, also affects DNS over TLS and DNS over HTTPS.",
+ "type": "object",
+ "properties": {
+ "cert-file": {
+ "type": [
+ "string",
+ "null"
+ ],
+ "description": "Path to certificate file.",
+ "default": null
+ },
+ "key-file": {
+ "type": [
+ "string",
+ "null"
+ ],
+ "description": "Path to certificate key file.",
+ "default": null
+ },
+ "sticket-secret": {
+ "type": [
+ "string",
+ "null"
+ ],
+ "minLength": 32,
+ "description": "Secret for TLS session resumption via tickets. (RFC 5077).",
+ "default": null
+ },
+ "sticket-secret-file": {
+ "type": [
+ "string",
+ "null"
+ ],
+ "description": "Path to file with secret for TLS session resumption via tickets. (RFC 5077).",
+ "default": null
+ },
+ "auto-discovery": {
+ "type": "boolean",
+ "description": "Experimental automatic discovery of authoritative servers supporting DNS-over-TLS.",
+ "default": false
+ },
+ "padding": {
+ "anyOf": [
+ {
+ "type": "boolean"
+ },
+ {
+ "type": "integer",
+ "minimum": 0,
+ "maximum": 512
+ }
+ ],
+ "description": "EDNS(0) padding of queries and answers sent over an encrypted channel.",
+ "default": true
+ }
+ },
+ "default": {
+ "cert_file": null,
+ "key_file": null,
+ "sticket_secret": null,
+ "sticket_secret_file": null,
+ "auto_discovery": false,
+ "padding": true
+ }
+ },
+ "proxy-protocol": {
+ "anyOf": [
+ {
+ "type": "string",
+ "enum": [
+ false
+ ]
+ },
+ {
+ "description": "PROXYv2 protocol configuration.",
+ "type": "object",
+ "properties": {
+ "allow": {
+ "type": "array",
+ "items": {
+ "anyOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ]
+ },
+ "description": "Allow usage of the PROXYv2 protocol headers by clients on the specified addresses."
+ }
+ }
+ }
+ ],
+ "description": "PROXYv2 protocol configuration.",
+ "default": false
+ },
+ "listen": {
+ "type": "array",
+ "items": {
+ "description": "Configuration of listening interface.",
+ "type": "object",
+ "properties": {
+ "interface": {
+ "anyOf": [
+ {
+ "type": "null"
+ },
+ {
+ "anyOf": [
+ {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ {
+ "type": "string"
+ }
+ ]
+ }
+ ],
+ "description": "IP address or interface name with optional port number to listen to.",
+ "default": null
+ },
+ "unix-socket": {
+ "anyOf": [
+ {
+ "type": "null"
+ },
+ {
+ "anyOf": [
+ {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ {
+ "type": "string"
+ }
+ ]
+ }
+ ],
+ "description": "Path to unix domain socket to listen to.",
+ "default": null
+ },
+ "port": {
+ "type": [
+ "integer",
+ "null"
+ ],
+ "minimum": 1,
+ "maximum": 65535,
+ "description": "Port number to listen to.",
+ "default": null
+ },
+ "kind": {
+ "type": "string",
+ "enum": [
+ "dns",
+ "xdp",
+ "dot",
+ "doh-legacy",
+ "doh2"
+ ],
+ "description": "Specifies DNS query transport protocol.",
+ "default": "dns"
+ },
+ "freebind": {
+ "type": "boolean",
+ "description": "Used for binding to non-local address.",
+ "default": false
+ }
+ }
+ },
+ "description": "List of interfaces to listen to and its configuration.",
+ "default": [
+ {
+ "interface": [
+ "127.0.0.1"
+ ],
+ "unix_socket": null,
+ "port": 53,
+ "kind": "dns",
+ "freebind": false
+ },
+ {
+ "interface": [
+ "::1"
+ ],
+ "unix_socket": null,
+ "port": 53,
+ "kind": "dns",
+ "freebind": true
+ }
+ ]
+ }
+ },
+ "default": {
+ "do_ipv4": true,
+ "do_ipv6": true,
+ "out_interface_v4": null,
+ "out_interface_v6": null,
+ "tcp_pipeline": 100,
+ "edns_tcp_keepalive": true,
+ "edns_buffer_size": {
+ "upstream": "1232B",
+ "downstream": "1232B"
+ },
+ "address_renumbering": null,
+ "tls": {
+ "cert_file": null,
+ "key_file": null,
+ "sticket_secret": null,
+ "sticket_secret_file": null,
+ "auto_discovery": false,
+ "padding": true
+ },
+ "proxy_protocol": false,
+ "listen": [
+ {
+ "interface": [
+ "127.0.0.1"
+ ],
+ "unix_socket": null,
+ "port": 53,
+ "kind": "dns",
+ "freebind": false
+ },
+ {
+ "interface": [
+ "::1"
+ ],
+ "unix_socket": null,
+ "port": 53,
+ "kind": "dns",
+ "freebind": true
+ }
+ ]
+ }
+ },
+ "views": {
+ "type": [
+ "array",
+ "null"
+ ],
+ "items": {
+ "description": "Configuration parameters that allow you to create personalized policy rules and other.",
+ "type": "object",
+ "properties": {
+ "subnets": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ },
+ "description": "Identifies the client based on his subnet. Rule with more precise subnet takes priority."
+ },
+ "dst-subnet": {
+ "type": [
+ "string",
+ "null"
+ ],
+ "description": "Destination subnet, as an additional condition.",
+ "default": null
+ },
+ "protocols": {
+ "type": [
+ "array",
+ "null"
+ ],
+ "items": {
+ "type": "string",
+ "enum": [
+ "udp53",
+ "tcp53",
+ "dot",
+ "doh",
+ "doq"
+ ]
+ },
+ "description": "Transport protocol, as an additional condition.",
+ "default": null
+ },
+ "tags": {
+ "type": [
+ "array",
+ "null"
+ ],
+ "items": {
+ "type": "string",
+ "pattern": "^(?!-)[a-z0-9-]*[a-z0-9]+$"
+ },
+ "description": "Tags to link with other policy rules.",
+ "default": null
+ },
+ "answer": {
+ "type": [
+ "string",
+ "null"
+ ],
+ "enum": [
+ "allow",
+ "refused",
+ "noanswer"
+ ],
+ "description": "Direct approach how to handle request from clients identified by the view.",
+ "default": null
+ },
+ "options": {
+ "description": "Configuration options for clients identified by the view.",
+ "type": "object",
+ "properties": {
+ "minimize": {
+ "type": "boolean",
+ "description": "Send minimum amount of information in recursive queries to enhance privacy.",
+ "default": true
+ },
+ "dns64": {
+ "type": "boolean",
+ "description": "Enable/disable DNS64.",
+ "default": true
+ }
+ },
+ "default": {
+ "minimize": true,
+ "dns64": true
+ }
+ }
+ }
+ },
+ "description": "List of views and its configuration.",
+ "default": null
+ },
+ "local-data": {
+ "description": "Local data for forward records (A/AAAA) and reverse records (PTR).",
+ "type": "object",
+ "properties": {
+ "ttl": {
+ "type": [
+ "string",
+ "null"
+ ],
+ "pattern": "^(\\d+)(us|ms|s|m|h|d)$",
+ "description": "Default TTL value used for added local data/records.",
+ "default": null
+ },
+ "nodata": {
+ "type": "boolean",
+ "description": "Use NODATA synthesis. NODATA will be synthesised for matching name, but mismatching type(e.g. AAAA query when only A exists).",
+ "default": true
+ },
+ "root-fallback-addresses": {
+ "type": [
+ "object",
+ "null"
+ ],
+ "additionalProperties": {
+ "anyOf": [
+ {
+ "type": "array",
+ "items": {
+ "anyOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ]
+ }
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ]
+ },
+ "description": "Direct replace of root hints.",
+ "default": null
+ },
+ "root-fallback-addresses-files": {
+ "type": [
+ "array",
+ "null"
+ ],
+ "items": {
+ "type": "string"
+ },
+ "description": "Direct replace of root hints from a zonefile.",
+ "default": null
+ },
+ "addresses": {
+ "type": [
+ "object",
+ "null"
+ ],
+ "additionalProperties": {
+ "anyOf": [
+ {
+ "type": "array",
+ "items": {
+ "anyOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ]
+ }
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ]
+ },
+ "description": "Direct addition of hostname and IP addresses pairs.",
+ "default": null
+ },
+ "addresses-files": {
+ "type": [
+ "array",
+ "null"
+ ],
+ "items": {
+ "type": "string"
+ },
+ "description": "Direct addition of hostname and IP addresses pairs from files in '/etc/hosts' like format.",
+ "default": null
+ },
+ "records": {
+ "type": [
+ "string",
+ "null"
+ ],
+ "description": "Direct addition of records in DNS zone file format.",
+ "default": null
+ },
+ "rules": {
+ "type": [
+ "array",
+ "null"
+ ],
+ "items": {
+ "description": "Local data advanced rule configuration.",
+ "type": "object",
+ "properties": {
+ "name": {
+ "anyOf": [
+ {
+ "type": "null"
+ },
+ {
+ "anyOf": [
+ {
+ "type": "array",
+ "items": {
+ "type": "string",
+ "pattern": "(?=^.{,253}\\.?$)(^(?!\\.)((?!-)\\.?[a-zA-Z0-9-]{,62}[a-zA-Z0-9])+\\.?$)|^\\.$"
+ }
+ },
+ {
+ "type": "string",
+ "pattern": "(?=^.{,253}\\.?$)(^(?!\\.)((?!-)\\.?[a-zA-Z0-9-]{,62}[a-zA-Z0-9])+\\.?$)|^\\.$"
+ }
+ ]
+ }
+ ],
+ "description": "Hostname(s).",
+ "default": null
+ },
+ "subtree": {
+ "type": [
+ "string",
+ "null"
+ ],
+ "enum": [
+ "empty",
+ "nxdomain",
+ "redirect"
+ ],
+ "description": "Type of subtree.",
+ "default": null
+ },
+ "address": {
+ "anyOf": [
+ {
+ "type": "null"
+ },
+ {
+ "anyOf": [
+ {
+ "type": "array",
+ "items": {
+ "anyOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ]
+ }
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ }
+ ]
+ }
+ ],
+ "description": "Address(es) to pair with hostname(s).",
+ "default": null
+ },
+ "file": {
+ "anyOf": [
+ {
+ "type": "null"
+ },
+ {
+ "anyOf": [
+ {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ {
+ "type": "string"
+ }
+ ]
+ }
+ ],
+ "description": "Path to file(s) with hostname and IP address(es) pairs in '/etc/hosts' like format.",
+ "default": null
+ },
+ "records": {
+ "type": [
+ "string",
+ "null"
+ ],
+ "description": "Direct addition of records in DNS zone file format.",
+ "default": null
+ },
+ "tags": {
+ "type": [
+ "array",
+ "null"
+ ],
+ "items": {
+ "type": "string",
+ "pattern": "^(?!-)[a-z0-9-]*[a-z0-9]+$"
+ },
+ "description": "Tags to link with other policy rules.",
+ "default": null
+ },
+ "ttl": {
+ "type": [
+ "string",
+ "null"
+ ],
+ "pattern": "^(\\d+)(us|ms|s|m|h|d)$",
+ "description": "Optional, TTL value used for these answers.",
+ "default": null
+ },
+ "nodata": {
+ "type": [
+ "boolean",
+ "null"
+ ],
+ "description": "Optional, use NODATA synthesis. NODATA will be synthesised for matching name, but mismatching type(e.g. AAAA query when only A exists).",
+ "default": null
+ }
+ }
+ },
+ "description": "Local data rules.",
+ "default": null
+ },
+ "rpz": {
+ "type": [
+ "array",
+ "null"
+ ],
+ "items": {
+ "description": "Configuration or Response Policy Zone (RPZ).",
+ "type": "object",
+ "properties": {
+ "file": {
+ "type": "string",
+ "description": "Path to the RPZ zone file."
+ },
+ "tags": {
+ "type": [
+ "array",
+ "null"
+ ],
+ "items": {
+ "type": "string",
+ "pattern": "^(?!-)[a-z0-9-]*[a-z0-9]+$"
+ },
+ "description": "Tags to link with other policy rules.",
+ "default": null
+ }
+ }
+ },
+ "description": "List of Response Policy Zones and its configuration.",
+ "default": null
+ }
+ },
+ "default": {
+ "ttl": null,
+ "nodata": true,
+ "root_fallback_addresses": null,
+ "root_fallback_addresses_files": null,
+ "addresses": null,
+ "addresses_files": null,
+ "records": null,
+ "rules": null,
+ "rpz": null
+ }
+ },
+ "forward": {
+ "type": [
+ "array",
+ "null"
+ ],
+ "items": {
+ "description": "Configuration of forward subtree.",
+ "type": "object",
+ "properties": {
+ "subtree": {
+ "anyOf": [
+ {
+ "type": "array",
+ "items": {
+ "type": "string",
+ "pattern": "(?=^.{,253}\\.?$)(^(?!\\.)((?!-)\\.?[a-zA-Z0-9-]{,62}[a-zA-Z0-9])+\\.?$)|^\\.$"
+ }
+ },
+ {
+ "type": "string",
+ "pattern": "(?=^.{,253}\\.?$)(^(?!\\.)((?!-)\\.?[a-zA-Z0-9-]{,62}[a-zA-Z0-9])+\\.?$)|^\\.$"
+ }
+ ],
+ "description": "Subtree(s) to forward."
+ },
+ "servers": {
+ "anyOf": [
+ {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ {
+ "type": "array",
+ "items": {
+ "description": "Forward server configuration.",
+ "type": "object",
+ "properties": {
+ "address": {
+ "anyOf": [
+ {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ {
+ "type": "string"
+ }
+ ],
+ "description": "IP address(es) of a forward server."
+ },
+ "transport": {
+ "type": [
+ "string",
+ "null"
+ ],
+ "enum": [
+ "tls"
+ ],
+ "description": "Transport protocol for a forward server.",
+ "default": null
+ },
+ "pin-sha256": {
+ "anyOf": [
+ {
+ "type": "null"
+ },
+ {
+ "anyOf": [
+ {
+ "type": "array",
+ "items": {
+ "type": "string",
+ "pattern": "^[A-Za-z\\d+/]{43}=$"
+ }
+ },
+ {
+ "type": "string",
+ "pattern": "^[A-Za-z\\d+/]{43}=$"
+ }
+ ]
+ }
+ ],
+ "description": "Hash of accepted CA certificate.",
+ "default": null
+ },
+ "hostname": {
+ "type": [
+ "string",
+ "null"
+ ],
+ "pattern": "(?=^.{,253}\\.?$)(^(?!\\.)((?!-)\\.?[a-zA-Z0-9-]{,62}[a-zA-Z0-9])+\\.?$)|^\\.$",
+ "description": "Hostname of the Forward server.",
+ "default": null
+ },
+ "ca-file": {
+ "type": [
+ "string",
+ "null"
+ ],
+ "description": "Path to CA certificate file.",
+ "default": null
+ }
+ }
+ }
+ }
+ ],
+ "description": "Forward servers configuration."
+ },
+ "options": {
+ "description": "Subtree(s) forward options.",
+ "type": "object",
+ "properties": {
+ "authoritative": {
+ "type": "boolean",
+ "description": "The forwarding target is an authoritative server.",
+ "default": false
+ },
+ "dnssec": {
+ "type": "boolean",
+ "description": "Enable/disable DNSSEC.",
+ "default": true
+ }
+ },
+ "default": {
+ "authoritative": false,
+ "dnssec": true
+ }
+ }
+ }
+ },
+ "description": "List of Forward Zones and its configuration.",
+ "default": null
+ },
+ "cache": {
+ "description": "DNS resolver cache configuration.",
+ "type": "object",
+ "properties": {
+ "storage": {
+ "type": "string",
+ "description": "Cache storage of the DNS resolver.",
+ "default": "/var/cache/knot-resolver"
+ },
+ "size-max": {
+ "type": "string",
+ "pattern": "^(\\d+)(B|K|M|G)$",
+ "description": "Maximum size of the cache.",
+ "default": "100M"
+ },
+ "garbage-collector": {
+ "anyOf": [
+ {
+ "description": "Configuration options of the cache garbage collector (kres-cache-gc).",
+ "type": "object",
+ "properties": {
+ "interval": {
+ "type": "string",
+ "pattern": "^(\\d+)(us|ms|s|m|h|d)$",
+ "description": "Time interval how often the garbage collector will be run.",
+ "default": "1s"
+ },
+ "threshold": {
+ "type": "integer",
+ "minimum": 0,
+ "maximum": 100,
+ "description": "Cache usage in percent that triggers the garbage collector.",
+ "default": 80
+ },
+ "release": {
+ "type": "integer",
+ "minimum": 0,
+ "maximum": 100,
+ "description": "Percent of used cache to be freed by the garbage collector.",
+ "default": 10
+ },
+ "temp-keys-space": {
+ "type": "string",
+ "pattern": "^(\\d+)(B|K|M|G)$",
+ "description": "Maximum amount of temporary memory for copied keys (0 = unlimited).",
+ "default": "0M"
+ },
+ "rw-deletes": {
+ "type": "integer",
+ "minimum": 0,
+ "description": "Maximum number of deleted records per read-write transaction (0 = unlimited).",
+ "default": 100
+ },
+ "rw-reads": {
+ "type": "integer",
+ "minimum": 0,
+ "description": "Maximum number of readed records per read-write transaction (0 = unlimited).",
+ "default": 200
+ },
+ "rw-duration": {
+ "type": "string",
+ "pattern": "^(\\d+)(us|ms|s|m|h|d)$",
+ "description": "Maximum duration of read-write transaction (0 = unlimited).",
+ "default": "0us"
+ },
+ "rw-delay": {
+ "type": "string",
+ "pattern": "^(\\d+)(us|ms|s|m|h|d)$",
+ "description": "Wait time between two read-write transactions.",
+ "default": "0us"
+ },
+ "dry-run": {
+ "type": "boolean",
+ "description": "Run the garbage collector in dry-run mode.",
+ "default": false
+ }
+ }
+ },
+ {
+ "type": "string",
+ "enum": [
+ false
+ ]
+ }
+ ],
+ "description": "Use the garbage collector (kres-cache-gc) to periodically clear cache.",
+ "default": {
+ "interval": "1s",
+ "threshold": 80,
+ "release": 10,
+ "temp_keys_space": "0M",
+ "rw_deletes": 100,
+ "rw_reads": 200,
+ "rw_duration": "0us",
+ "rw_delay": "0us",
+ "dry_run": false
+ }
+ },
+ "ttl-min": {
+ "type": "string",
+ "pattern": "^(\\d+)(us|ms|s|m|h|d)$",
+ "description": "Minimum time-to-live for the cache entries.",
+ "default": "5s"
+ },
+ "ttl-max": {
+ "type": "string",
+ "pattern": "^(\\d+)(us|ms|s|m|h|d)$",
+ "description": "Maximum time-to-live for the cache entries.",
+ "default": "1d"
+ },
+ "ns-timeout": {
+ "type": "string",
+ "pattern": "^(\\d+)(us|ms|s|m|h|d)$",
+ "description": "Time interval for which a nameserver address will be ignored after determining that it does not return (useful) answers.",
+ "default": "1000ms"
+ },
+ "prefill": {
+ "type": [
+ "array",
+ "null"
+ ],
+ "items": {
+ "description": "Prefill the cache periodically by importing zone data obtained over HTTP.",
+ "type": "object",
+ "properties": {
+ "origin": {
+ "type": "string",
+ "pattern": "(?=^.{,253}\\.?$)(^(?!\\.)((?!-)\\.?[a-zA-Z0-9-]{,62}[a-zA-Z0-9])+\\.?$)|^\\.$",
+ "description": "Origin for the imported data. Cache prefilling is only supported for the root zone ('.')."
+ },
+ "url": {
+ "type": "string",
+ "description": "URL of the zone data to be imported."
+ },
+ "refresh-interval": {
+ "type": "string",
+ "pattern": "^(\\d+)(us|ms|s|m|h|d)$",
+ "description": "Time interval between consecutive refreshes of the imported zone data.",
+ "default": "1d"
+ },
+ "ca-file": {
+ "type": [
+ "string",
+ "null"
+ ],
+ "description": "Path to the file containing a CA certificate bundle that is used to authenticate the HTTPS connection.",
+ "default": null
+ }
+ }
+ },
+ "description": "Prefill the cache periodically by importing zone data obtained over HTTP.",
+ "default": null
+ },
+ "prefetch": {
+ "description": "These options help keep the cache hot by prefetching expiring records or learning usage patterns and repetitive queries.",
+ "type": "object",
+ "properties": {
+ "expiring": {
+ "type": "boolean",
+ "description": "Prefetch expiring records.",
+ "default": false
+ },
+ "prediction": {
+ "description": "Prefetch record by predicting based on usage patterns and repetitive queries.",
+ "type": [
+ "object",
+ "null"
+ ],
+ "properties": {
+ "window": {
+ "type": "string",
+ "pattern": "^(\\d+)(us|ms|s|m|h|d)$",
+ "description": "Sampling window length.",
+ "default": "15m"
+ },
+ "period": {
+ "type": "integer",
+ "minimum": 1,
+ "description": "Number of windows that can be kept in memory.",
+ "default": 24
+ }
+ },
+ "default": null
+ }
+ },
+ "default": {
+ "expiring": false,
+ "prediction": null
+ }
+ }
+ },
+ "default": {
+ "storage": "/var/cache/knot-resolver",
+ "size_max": "100M",
+ "garbage_collector": {
+ "interval": "1s",
+ "threshold": 80,
+ "release": 10,
+ "temp_keys_space": "0M",
+ "rw_deletes": 100,
+ "rw_reads": 200,
+ "rw_duration": "0us",
+ "rw_delay": "0us",
+ "dry_run": false
+ },
+ "ttl_min": "5s",
+ "ttl_max": "1d",
+ "ns_timeout": "1000ms",
+ "prefill": null,
+ "prefetch": {
+ "expiring": false,
+ "prediction": null
+ }
+ }
+ },
+ "dnssec": {
+ "anyOf": [
+ {
+ "type": "boolean"
+ },
+ {
+ "description": "DNSSEC configuration.",
+ "type": "object",
+ "properties": {
+ "trust-anchor-sentinel": {
+ "type": "boolean",
+ "description": "Allows users of DNSSEC validating resolver to detect which root keys are configured in resolver's chain of trust. (RFC 8509)",
+ "default": true
+ },
+ "trust-anchor-signal-query": {
+ "type": "boolean",
+ "description": "Signaling Trust Anchor Knowledge in DNSSEC Using Key Tag Query, according to (RFC 8145#section-5).",
+ "default": true
+ },
+ "time-skew-detection": {
+ "type": "boolean",
+ "description": "Detection of difference between local system time and expiration time bounds in DNSSEC signatures for '. NS' records.",
+ "default": true
+ },
+ "keep-removed": {
+ "type": "integer",
+ "minimum": 0,
+ "description": "How many removed keys should be held in history (and key file) before being purged.",
+ "default": 0
+ },
+ "refresh-time": {
+ "type": [
+ "string",
+ "null"
+ ],
+ "pattern": "^(\\d+)(us|ms|s|m|h|d)$",
+ "description": "Force trust-anchors to be updated every defined time periodically instead of relying on (RFC 5011) logic and TTLs. Intended only for testing purposes.",
+ "default": null
+ },
+ "hold-down-time": {
+ "type": "string",
+ "pattern": "^(\\d+)(us|ms|s|m|h|d)$",
+ "description": "Modify hold-down timer (RFC 5011). Intended only for testing purposes.",
+ "default": "30d"
+ },
+ "trust-anchors": {
+ "type": [
+ "array",
+ "null"
+ ],
+ "items": {
+ "type": "string"
+ },
+ "description": "List of trust-anchors in DS/DNSKEY records format.",
+ "default": null
+ },
+ "negative-trust-anchors": {
+ "type": [
+ "array",
+ "null"
+ ],
+ "items": {
+ "type": "string",
+ "pattern": "(?=^.{,253}\\.?$)(^(?!\\.)((?!-)\\.?[a-zA-Z0-9-]{,62}[a-zA-Z0-9])+\\.?$)|^\\.$"
+ },
+ "description": "List of domain names representing negative trust-anchors. (RFC 7646)",
+ "default": null
+ },
+ "trust-anchors-files": {
+ "type": [
+ "array",
+ "null"
+ ],
+ "items": {
+ "description": "Trust-anchor zonefile configuration.",
+ "type": "object",
+ "properties": {
+ "file": {
+ "type": "string",
+ "description": "Path to the zonefile that stores trust-anchors."
+ },
+ "read-only": {
+ "type": "boolean",
+ "description": "Blocks zonefile updates according to RFC 5011.",
+ "default": false
+ }
+ }
+ },
+ "description": "List of zonefiles where trust-anchors are stored.",
+ "default": null
+ }
+ }
+ }
+ ],
+ "description": "Disable DNSSEC, enable with defaults or set new configuration.",
+ "default": true
+ },
+ "dns64": {
+ "anyOf": [
+ {
+ "type": "boolean"
+ },
+ {
+ "description": "DNS64 (RFC 6147) configuration.",
+ "type": "object",
+ "properties": {
+ "prefix": {
+ "type": "string",
+ "description": "IPv6 prefix to be used for synthesizing AAAA records.",
+ "default": "64:ff9b::/96"
+ },
+ "rev-ttl": {
+ "type": [
+ "string",
+ "null"
+ ],
+ "pattern": "^(\\d+)(us|ms|s|m|h|d)$",
+ "description": "TTL in CNAME generated in the reverse 'ip6.arpa.' subtree.",
+ "default": null
+ },
+ "exclude-subnets": {
+ "type": [
+ "array",
+ "null"
+ ],
+ "items": {
+ "type": "string"
+ },
+ "description": "IPv6 subnets that are disallowed in answer.",
+ "default": null
+ }
+ }
+ }
+ ],
+ "description": "Disable DNS64 (RFC 6147), enable with defaults or set new configuration.",
+ "default": false
+ },
+ "logging": {
+ "description": "Logging and debugging configuration.",
+ "type": "object",
+ "properties": {
+ "level": {
+ "type": "string",
+ "enum": [
+ "crit",
+ "err",
+ "warning",
+ "notice",
+ "info",
+ "debug"
+ ],
+ "description": "Global logging level.",
+ "default": "notice"
+ },
+ "target": {
+ "anyOf": [
+ {
+ "type": "string",
+ "enum": [
+ "syslog",
+ "stderr",
+ "stdout"
+ ]
+ },
+ {
+ "type": "string",
+ "enum": [
+ "from-env"
+ ]
+ }
+ ],
+ "description": "Global logging stream target. \"from-env\" uses $KRES_LOGGING_TARGET and defaults to \"stdout\".",
+ "default": "from-env"
+ },
+ "groups": {
+ "type": [
+ "array",
+ "null"
+ ],
+ "items": {
+ "type": "string",
+ "enum": [
+ "manager",
+ "supervisord",
+ "cache-gc",
+ "system",
+ "cache",
+ "io",
+ "net",
+ "ta",
+ "tasent",
+ "tasign",
+ "taupd",
+ "tls",
+ "gnutls",
+ "tls_cl",
+ "xdp",
+ "doh",
+ "dnssec",
+ "hint",
+ "plan",
+ "iterat",
+ "valdtr",
+ "resolv",
+ "select",
+ "zoncut",
+ "cookie",
+ "statis",
+ "rebind",
+ "worker",
+ "policy",
+ "daf",
+ "timejm",
+ "timesk",
+ "graphi",
+ "prefil",
+ "primin",
+ "srvstl",
+ "wtchdg",
+ "nsid",
+ "dnstap",
+ "tests",
+ "dotaut",
+ "http",
+ "contrl",
+ "module",
+ "devel",
+ "renum",
+ "exterr",
+ "rules",
+ "prlayr"
+ ]
+ },
+ "description": "List of groups for which 'debug' logging level is set.",
+ "default": null
+ },
+ "dnssec-bogus": {
+ "type": "boolean",
+ "description": "Logging a message for each DNSSEC validation failure.",
+ "default": false
+ },
+ "dnstap": {
+ "anyOf": [
+ {
+ "type": "string",
+ "enum": [
+ false
+ ]
+ },
+ {
+ "description": "Logging DNS queries and responses to a unix socket.",
+ "type": "object",
+ "properties": {
+ "unix-socket": {
+ "type": "string",
+ "description": "Path to unix domain socket where dnstap messages will be sent."
+ },
+ "log-queries": {
+ "type": "boolean",
+ "description": "Log queries from downstream in wire format.",
+ "default": true
+ },
+ "log-responses": {
+ "type": "boolean",
+ "description": "Log responses to downstream in wire format.",
+ "default": true
+ },
+ "log-tcp-rtt": {
+ "type": "boolean",
+ "description": "Log TCP RTT (Round-trip time).",
+ "default": true
+ }
+ }
+ }
+ ],
+ "description": "Logging DNS requests and responses to a unix socket.",
+ "default": false
+ },
+ "debugging": {
+ "description": "Advanced debugging parameters for kresd (Knot Resolver daemon).",
+ "type": "object",
+ "properties": {
+ "assertion-abort": {
+ "type": "boolean",
+ "description": "Allow the process to be aborted in case it encounters a failed assertion.",
+ "default": false
+ },
+ "assertion-fork": {
+ "type": "string",
+ "pattern": "^(\\d+)(us|ms|s|m|h|d)$",
+ "description": "Fork and abord child kresd process to obtain a coredump, while the parent process recovers and keeps running.",
+ "default": "5m"
+ }
+ },
+ "default": {
+ "assertion_abort": false,
+ "assertion_fork": "5m"
+ }
+ }
+ },
+ "default": {
+ "level": "notice",
+ "target": "stdout",
+ "groups": null,
+ "dnssec_bogus": false,
+ "dnstap": false,
+ "debugging": {
+ "assertion_abort": false,
+ "assertion_fork": "5m"
+ }
+ }
+ },
+ "monitoring": {
+ "description": "Metrics exposisition configuration (Prometheus, Graphite)",
+ "type": "object",
+ "properties": {
+ "enabled": {
+ "type": "string",
+ "enum": [
+ "manager-only",
+ "lazy",
+ "always"
+ ],
+ "description": "configures, whether statistics module will be loaded into resolver",
+ "default": "lazy"
+ },
+ "graphite": {
+ "anyOf": [
+ {
+ "type": "string",
+ "enum": [
+ false
+ ]
+ },
+ {
+ "type": "object",
+ "properties": {
+ "host": {
+ "anyOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "string"
+ },
+ {
+ "type": "string",
+ "pattern": "(?=^.{,253}\\.?$)(^(?!\\.)((?!-)\\.?[a-zA-Z0-9-]{,62}[a-zA-Z0-9])+\\.?$)|^\\.$"
+ }
+ ]
+ },
+ "port": {
+ "type": "integer",
+ "minimum": 1,
+ "maximum": 65535,
+ "default": 2003
+ },
+ "prefix": {
+ "type": "string",
+ "default": ""
+ },
+ "interval": {
+ "type": "string",
+ "pattern": "^(\\d+)(us|ms|s|m|h|d)$",
+ "default": "5s"
+ },
+ "tcp": {
+ "type": "boolean",
+ "default": false
+ }
+ }
+ }
+ ],
+ "description": "optionally configures where should graphite metrics be sent to",
+ "default": false
+ }
+ },
+ "default": {
+ "enabled": "lazy",
+ "graphite": false
+ }
+ },
+ "lua": {
+ "description": "Custom Lua configuration.",
+ "type": "object",
+ "properties": {
+ "script-only": {
+ "type": "boolean",
+ "description": "Ignore declarative configuration and use only Lua script or file defined in this section.",
+ "default": false
+ },
+ "script": {
+ "type": [
+ "string",
+ "null"
+ ],
+ "description": "Custom Lua configuration script.",
+ "default": null
+ },
+ "script-file": {
+ "type": [
+ "string",
+ "null"
+ ],
+ "description": "Path to file that contains Lua configuration script.",
+ "default": null
+ }
+ },
+ "default": {
+ "script_only": false,
+ "script": null,
+ "script_file": null
+ }
+ }
+ }
+}
diff --git a/doc/dev/build.rst b/doc/dev/build.rst
index 2e8e7d17..f6512214 100644
--- a/doc/dev/build.rst
+++ b/doc/dev/build.rst
@@ -6,14 +6,15 @@
Cloning the repository
**********************
-.. note:: Latest up-to-date packages for various distribution can be obtained
- from web `<https://knot-resolver.cz/download/>`_.
+.. note:: Maybe you do not need to build from source?
+ See `<../gettingstarted-install.html>`_.
Knot Resolver is written for UNIX-like systems using modern C standards.
Beware that some 64-bit systems with LuaJIT 2.1 may be affected by
`a problem <https://github.com/LuaJIT/LuaJIT/blob/v2.1.0-beta3/doc/status.html#L100>`_
-- Linux on x86_64 is unaffected but `Linux on aarch64 is
-<https://gitlab.nic.cz/knot/knot-resolver/issues/216>`_.
+<https://gitlab.nic.cz/knot/knot-resolver/issues/216>`_,
+but distros supporting LuaJIT on aarch64 have typically resolved this already.
.. code-block:: bash
@@ -71,6 +72,8 @@ but users unfamiliar with Meson might want to read introductory
article `Using Meson <https://mesonbuild.com/Quick-guide.html>`_.
+.. _kresd-dep:
+
Dependencies
============
@@ -85,8 +88,8 @@ The following dependencies are needed to build and run Knot Resolver with core f
"ninja", "*build only*"
"meson >= 0.49", "*build only* [#]_"
"C and C++ compiler", "*build only* [#]_"
- "`pkg-config`_", "*build only* [#]_"
- "libknot_ 3.0.2+", "Knot DNS libraries"
+ "`pkg-config`_", "*build only*"
+ "libknot_ 3.3.0+", "Knot DNS libraries"
"LuaJIT_ 2.0+", "Embedded scripting language"
"libuv_ 1.7+", "Multiplatform I/O and services"
"lmdb", "Memory-mapped database for cache"
@@ -106,7 +109,7 @@ Resolver:
"`libcap-ng`_", "``daemon``", "Linux capabilities: support dropping them."
"`lua-basexx`_", "``config tests``", "Number base encoding/decoding for Lua."
"`lua-http`_", "``modules/http``", "HTTP/2 client/server for Lua."
- "`lua-cqueues`_", "some lua modules", ""
+ "`lua-cqueues`_", "some modules and tests", ""
"cmocka_", "``unit tests``", "Unit testing framework."
"dnsdist_", "``proxyv2 test``", "DNS proxy server"
"Doxygen_", "``documentation``", "Generating API documentation."
@@ -124,27 +127,21 @@ Resolver:
.. [#] If ``meson >= 0.49`` isn't available for your distro, check backports
repository or use python pip to install it.
-.. [#] Requires ``__attribute__((cleanup))`` and ``-MMD -MP`` for
- dependency file generation. We test GCC and Clang, and ICC is likely to work as well.
-.. [#] You can use variables ``<dependency>_CFLAGS`` and ``<dependency>_LIBS``
- to configure dependencies manually (i.e. ``libknot_CFLAGS`` and
- ``libknot_LIBS``).
-
-.. note:: Some build dependencies can be found in
- `home:CZ-NIC:knot-resolver-build
- <https://build.opensuse.org/project/show/home:CZ-NIC:knot-resolver-build>`_.
-
-On reasonably new systems most of the dependencies can be resolved from packages,
-here's an overview for several platforms.
+.. [#] We test GCC and Clang. We depend on GNU extensions to the C standard,
+ in particular ``__attribute__((cleanup))``.
-* **Debian/Ubuntu** - Current stable doesn't have new enough Meson
- and libknot. Use repository above or build them yourself. Fresh list of dependencies can be found in `Debian control file in our repo <https://gitlab.nic.cz/knot/knot-resolver/blob/master/distro/deb/control>`_, search for "Build-Depends".
+On reasonably new systems most of the dependencies can be resolved from packages.
+``apkg build-dep`` is one option of obtaining them (see above).
-* **CentOS/Fedora/RHEL/openSUSE** - Fresh list of dependencies can be found in `RPM spec file in our repo <https://gitlab.nic.cz/knot/knot-resolver/blob/master/distro/rpm/knot-resolver.spec>`_, search for "BuildRequires".
+We tend to require not too old libknot, so you might need to install a newer one.
+Their team also provides binaries for major Linux distros:
+https://www.knot-dns.cz/download/
* **FreeBSD** - when installing from ports, all dependencies will install
automatically, corresponding to the selected options.
+ FIXME: resolver 6.x stuff (manager) doesn't even work yet.
* **Mac OS X** - the dependencies can be obtained from `Homebrew formula <https://formulae.brew.sh/formula/knot-resolver>`_.
+ FIXME: resolver 6.x stuff (manager) doesn't even work yet.
Compilation
===========
@@ -311,7 +308,7 @@ All dependencies are also listed in `pyproject.toml <https://gitlab.nic.cz/knot/
"prometheus-client_", "Prometheus client for Python (optional)"
- You can install the ``manager`` using generated ``setup.py``.
+You can install the Manager using the generated ``setup.py``.
.. code-block:: bash
@@ -320,7 +317,7 @@ All dependencies are also listed in `pyproject.toml <https://gitlab.nic.cz/knot/
.. tip::
- For development, it is recommended to run the manager using the procedure described in `manager/README.md <https://gitlab.nic.cz/knot/knot-resolver/-/blob/master/manager/README.md>`_.
+ For development, it is recommended to run the manager using the procedure described in :ref:`manager-dev-env`.
************
diff --git a/doc/dev/index.rst b/doc/dev/index.rst
index 1f6dc1da..a13e3d61 100644
--- a/doc/dev/index.rst
+++ b/doc/dev/index.rst
@@ -28,6 +28,7 @@ Welcome to Knot Resolver's documentation for developers and advanced users!
architecture
manager-dev-code
+ layered-protocols
.. toctree::
:caption: Lua configuration
diff --git a/doc/dev/layered-protocols.rst b/doc/dev/layered-protocols.rst
new file mode 120000
index 00000000..2b06bc13
--- /dev/null
+++ b/doc/dev/layered-protocols.rst
@@ -0,0 +1 @@
+../../daemon/layered-protocols.rst \ No newline at end of file
diff --git a/doc/dev/manager-dev-env.rst b/doc/dev/manager-dev-env.rst
index 430fbd23..7178bc93 100644
--- a/doc/dev/manager-dev-env.rst
+++ b/doc/dev/manager-dev-env.rst
@@ -23,7 +23,7 @@ So we try to isolate everything from the system we are running on.
To start working on the manager, you need to install the following tools:
- Python: One of the supported versions.
- You can use `pyenv <https://github.com/pyenv/pyenv#installation>`_ to install and manage multiple versions of Python without affecting your system.
+ You may optionally use `pyenv <https://github.com/pyenv/pyenv#installation>`_ to install and manage multiple versions of Python without affecting your system.
Alternatively, some Linux distributions ship packages for older Python versions as well.
- `Poetry <https://python-poetry.org/docs/#installation>`_: We use it to manage our dependencies and virtual environments.
Do not install the package via ``pip``, follow instructions in Poetry's official documentation.
@@ -39,22 +39,50 @@ Running the manager from source for the first time
==================================================
1. Clone the Knot Resolver `GitLab repository <https://gitlab.nic.cz/knot/knot-resolver>`_.
-2. In the repository, change to the ``manager/`` directory and perform all of the following tasks in that directory.
-3. Run ``poetry env use $(which python3.12)`` to configure Poetry to use a Python interpreter other than the system default.
+2. Use ``apkg build-dep`` as described in the :ref:`kresd-dep` section to automatically install development dependencies for the Knot Resolver daemon.
+3. In the repository, change to the ``manager/`` directory and perform all of the following tasks in that directory.
+4. (Optional) Run ``poetry env use $(which python3.12)`` to configure Poetry to use a Python interpreter other than the system default.
- As mentioned above it is recommended to use ``pyenv`` to manage other Python versions.
+ As mentioned above it is possible to use ``pyenv`` to manage other Python versions.
Then poetry needs to be told where to look for that version of Python, e.g.:
.. code-block:: bash
$ poetry env use ~/.pyenv/versions/3.12.1/bin/python3.12
-4. Run ``poetry install --all-extras`` to install all dependencies including all optional ones (--all-extras flag), in a newly created virtual environment.
+5. Run ``poetry install --all-extras`` to install all dependencies, including all optional ones (omit ``--all-extras`` flag to exclude those), in a newly created virtual environment.
All dependencies can be seen in ``pyproject.toml``.
-5. Use ``./poe run`` to run the manager in development mode (Ctrl+C to exit).
+6. Use ``./poe configure`` to set up the build directory of the Knot Resolver daemon (``kresd``).
+ This command optionally takes the same arguments as ``meson configure``, but may just as well be run with none to get some sane defaults.
+7. Use ``./poe run`` to run the manager in development mode (Ctrl+C to exit).
The manager is started with the configuration located in ``manager/etc/knot-resolver/config.dev.yaml``.
+Advanced workspace directory setup
+==================================
+
+It may get annoying to have to juggle changes to the ``config.dev.yaml`` file in Git while using the setup described above.
+For this reason, we also allow specifying some paths via environment variables so that you can use a specialized separate workspace directory for development and testing:
+
+* ``KRES_MANAGER_RUNTIME`` specifies the working directory containing the cache, unix sockets and more.
+ Since these files are mostly temporary, but relatively frequently written into, it is best to keep them in a ``tmpfs`` filesystem, like ``/dev/shm`` or ``/tmp``.
+* ``KRES_MANAGER_CONFIG`` specifies the path to a ``config.yaml`` to be used by the manager.
+
+You may create a separate workspace directory containing a custom run script,
+which may look something like this, to make your life easier:
+
+.. code-block:: bash
+
+ #!/usr/bin/env bash
+ script_dir="$(dirname $(realpath $BASH_SOURCE[0]))"
+ shm_dir="/dev/shm/kresd6"
+
+ mkdir -p "$shm_dir"
+ export KRES_MANAGER_RUNTIME="$shm_dir"
+ export KRES_MANAGER_CONFIG="$script_dir/config.yaml"
+ exec $path_to_knot_resolver/poe "$@"
+
+
Commands
========
@@ -70,6 +98,7 @@ To list all the available commands, you can run ``poe help``.
The commands are defined in the ``pyproject.toml`` file.
The most important ones for everyday development are:
+- ``poe configure`` to configure the build directory of ``kresd``
- ``poe run`` to run the manager
- ``poe docs`` to create HTML documentation
- ``poe test`` to run unit tests (enforced by our CI)
diff --git a/doc/kresd.8.in b/doc/kresd.8.in
index 29d4ed9b..33e4035e 100644
--- a/doc/kresd.8.in
+++ b/doc/kresd.8.in
@@ -90,17 +90,6 @@ Option may be passed multiple times to listen on more file descriptors.
Set the config file with settings for kresd to read instead of reading the
file at the default location (\fIconfig\fR).
.TP
-.B \-f\fI N\fR, \fB\-\-forks=\fI<N>
-This option is deprecated since 5.0.0!
-
-With this option, the daemon is started in non-interactive mode and instead creates a
-UNIX socket in \fIrundir\fR that the operator can connect to for interactive session.
-A number greater than 1 forks the daemon N times, all forks will bind to same addresses
-and the kernel will load-balance between them on Linux with \fISO_REUSEPORT\fR support.
-
-If you want multiple concurrent processes supervised in this way,
-they should be supervised independently (see \fBkresd.systemd(7)\fR).
-.TP
.B \-n\fR, \fB\-\-noninteractive
Daemon will refrain from entering into read-eval-print loop for stdin+stdout.
.TP
diff --git a/doc/meson.build b/doc/meson.build
index 0dfa3cb3..ad6a0f9a 100644
--- a/doc/meson.build
+++ b/doc/meson.build
@@ -4,7 +4,7 @@
# man page
man_config = configuration_data()
man_config.set('version', meson.project_version())
-man_config.set('date', run_command('../scripts/get-date.sh', check: true).stdout())
+man_config.set('date', run_command('../scripts/lib/get-date.sh', check: true).stdout())
man_config.set('man_seealso_systemd', '')
if systemd_legacy_units == 'enabled'
@@ -72,7 +72,7 @@ if get_option('doc') == 'enabled'
endif
-make_doc = find_program('../scripts/make-doc.sh')
+make_doc = find_program('../scripts/meson/make-doc.sh')
run_target(
'doc',
command: make_doc
diff --git a/doc/user/config-local-data.rst b/doc/user/config-local-data.rst
index 24293105..c11d8b36 100644
--- a/doc/user/config-local-data.rst
+++ b/doc/user/config-local-data.rst
@@ -77,6 +77,13 @@ It provides various input formats described in following subsections.
34.example.com AAAA 2001:db8::3
34.example.com AAAA 2001:db8::4
+ .. warning::
+
+ While you can insert all kinds of records and rules into ``local-data:``,
+ they won't work exactly as in real zones on authoritative servers.
+ For example, wildcards won't get expanded and DNAMEs won't cause occlusion.
+
+
Response Policy Zones (RPZ)
---------------------------
diff --git a/doc/user/gettingstarted-install.rst b/doc/user/gettingstarted-install.rst
index 5179748a..2f84098e 100644
--- a/doc/user/gettingstarted-install.rst
+++ b/doc/user/gettingstarted-install.rst
@@ -18,8 +18,6 @@ Please, follow the instructions for your packaging system:
Please follow https://pkg.labs.nic.cz/doc/?project=knot-resolver
- For Ubuntu it is also possible to use https://launchpad.net/~cz.nic-labs/+archive/ubuntu/knot-resolver.
-
.. tab:: .rpm
Please follow https://copr.fedorainfracloud.org/coprs/g/cznic/knot-resolver/
diff --git a/manager/etc/knot-resolver/.gitignore b/etc/config/.gitignore
index fb64123a..7a814b87 100644
--- a/manager/etc/knot-resolver/.gitignore
+++ b/etc/config/.gitignore
@@ -1,2 +1,2 @@
runtime/
-cache/ \ No newline at end of file
+cache/ \ No newline at end of file
diff --git a/manager/etc/knot-resolver/config.dev.yaml b/etc/config/config.dev.yaml
index aa97a41f..6705069e 100644
--- a/manager/etc/knot-resolver/config.dev.yaml
+++ b/etc/config/config.dev.yaml
@@ -1,9 +1,6 @@
-rundir: ./runtime
workers: 1
management:
interface: 127.0.0.1@5000
-cache:
- storage: ./cache
logging:
level: notice
groups:
@@ -43,14 +40,14 @@ local-data:
address: 1.2.3.4
nodata: true
tags: [t01]
- - file: hosts.custom
- tags: [t02]
- ttl: 10m
- subtree: empty
tags: [ t02 ]
name: [ example1.org ]
- subtree: nxdomain
name: [ sub4.example.org ]
+ # - file: hosts.custom
+ # tags: [t02]
+ # ttl: 10m
# rpz:
# - file: runtime/blocklist.rpz
# tags: [t01, t02]
@@ -64,9 +61,10 @@ forward:
transport: tls
hostname: odvr.nic.cz
- address: [ 192.0.2.1, 192.0.2.2 ]
+ transport: tls
pin-sha256:
- - YmE3ODE2YmY4ZjAx+2ZlYTQxNDE0MGRlNWRhZTIyMjNiMDAzNjFhMzk/MTc3YTljYjQxMGZmNjFmMjAwMTVhZA==
- - OTJmODU3ZDMyOWMwOWNlNTU4Y2M0YWNjMjI5NWE2NWJlMzY4MzRmMzY3NGU3NDAwNTI1YjMxZTMxYTgzMzQwMQ==
+ - d6qzRu9zOECb90Uez27xWltNsj0e1Md7GkYYkVoZWmM=
+ - E9CZ9INDbd+2eRQozYqqbQ2yXLVKB9+xcprMF+44U1g=
- subtree: 1.168.192.in-addr.arpa
options:
dnssec: false
diff --git a/manager/etc/knot-resolver/config.example.docker.yaml b/etc/config/config.example.docker.yaml
index 5c10d666..5c10d666 100644
--- a/manager/etc/knot-resolver/config.example.docker.yaml
+++ b/etc/config/config.example.docker.yaml
diff --git a/manager/etc/knot-resolver/config.example.internal.yaml b/etc/config/config.example.internal.yaml
index 6c11b2c6..6c11b2c6 100644
--- a/manager/etc/knot-resolver/config.example.internal.yaml
+++ b/etc/config/config.example.internal.yaml
diff --git a/manager/etc/knot-resolver/config.example.isp.yaml b/etc/config/config.example.isp.yaml
index 6f7fbe50..6f7fbe50 100644
--- a/manager/etc/knot-resolver/config.example.isp.yaml
+++ b/etc/config/config.example.isp.yaml
diff --git a/manager/etc/knot-resolver/config.example.personal.yaml b/etc/config/config.example.personal.yaml
index 69233fef..69233fef 100644
--- a/manager/etc/knot-resolver/config.example.personal.yaml
+++ b/etc/config/config.example.personal.yaml
diff --git a/manager/etc/knot-resolver/config.yaml b/etc/config/config.yaml
index cf4da929..cf4da929 100644
--- a/manager/etc/knot-resolver/config.yaml
+++ b/etc/config/config.yaml
diff --git a/lib/README.rst b/lib/README.rst
index b631fe7b..f2463d4a 100644
--- a/lib/README.rst
+++ b/lib/README.rst
@@ -4,11 +4,6 @@
Knot Resolver library
*********************
-Requirements
-============
-
-* libknot_ 2.0 (Knot DNS high-performance DNS library.)
-
For users
=========
diff --git a/lib/cache/api.c b/lib/cache/api.c
index 0cd18534..046dae20 100644
--- a/lib/cache/api.c
+++ b/lib/cache/api.c
@@ -237,9 +237,7 @@ int32_t get_new_ttl(const struct entry_h *entry, const struct kr_query *qry,
int res_stale = qry->stale_cb(res, owner, type, qry);
if (res_stale >= 0) {
VERBOSE_MSG(qry, "responding with stale answer\n");
- /* LATER: Perhaps we could use a more specific Stale
- * NXDOMAIN Answer code for applicable responses. */
- kr_request_set_extended_error(qry->request, KNOT_EDNS_EDE_STALE, "6Q6X");
+ qry->request->stale_accounted = true;
return res_stale;
}
}
diff --git a/lib/cache/peek.c b/lib/cache/peek.c
index d12031fc..46a4868c 100644
--- a/lib/cache/peek.c
+++ b/lib/cache/peek.c
@@ -214,6 +214,7 @@ int peek_nosync(kr_layer_t *ctx, knot_pkt_t *pkt)
/* Try the NSEC* parameters in order, until success.
* Let's not mix different parameters for NSEC* RRs in a single proof. */
+ bool is_synthesized = false;
for (int i = 0; ;) {
int32_t log_new_ttl = -123456789; /* visually recognizable value */
ret = nsec_p_ttl(el[i], qry->timestamp.tv_sec, &log_new_ttl);
@@ -234,6 +235,7 @@ int peek_nosync(kr_layer_t *ctx, knot_pkt_t *pkt)
/**** 2. and 3. inside */
ret = peek_encloser(k, &ans, sname_labels,
lowest_rank, qry, cache);
+ is_synthesized = (ret == 0);
nsec_p_cleanup(&ans.nsec_p);
if (!ret) break;
if (ret < 0) return ctx->state;
@@ -316,6 +318,10 @@ int peek_nosync(kr_layer_t *ctx, knot_pkt_t *pkt)
qf->CACHED = true;
qf->NO_MINIMIZE = true;
+ if (is_synthesized && qry == req->rplan.initial) {
+ kr_request_set_extended_error(req, KNOT_EDNS_EDE_SYNTHESIZED,
+ "2NEP: synthesized from aggressive cache");
+ }
return KR_STATE_DONE;
}
diff --git a/lib/dnssec.c b/lib/dnssec.c
index 77cec796..169ce2bf 100644
--- a/lib/dnssec.c
+++ b/lib/dnssec.c
@@ -63,6 +63,10 @@ static int validate_rrsig_rr(int *flags, int cov_labels,
if (kr_fails_assert(flags && rrsigs && vctx && vctx->zone_name)) {
return kr_error(EINVAL);
}
+ if (knot_rrsig_sig_expiration(rrsigs) < knot_rrsig_sig_inception(rrsigs)) {
+ vctx->rrs_counters.expired_before_inception++;
+ return kr_error(EINVAL);
+ }
/* bullet 5 */
if (knot_rrsig_sig_expiration(rrsigs) < vctx->timestamp) {
vctx->rrs_counters.expired++;
@@ -435,26 +439,32 @@ finish:
return vctx->result;
}
-bool kr_ds_algo_support(const knot_rrset_t *ta)
+int kr_ds_algo_support(const knot_rrset_t *ta)
{
if (kr_fails_assert(ta && ta->type == KNOT_RRTYPE_DS && ta->rclass == KNOT_CLASS_IN))
- return false;
+ return kr_error(EINVAL);
/* Check if at least one DS has a usable algorithm pair. */
+ int ret = kr_error(ENOENT);
knot_rdata_t *rdata_i = ta->rrs.rdata;
for (uint16_t i = 0; i < ta->rrs.count;
++i, rdata_i = knot_rdataset_next(rdata_i)) {
- if (dnssec_algorithm_digest_support(knot_ds_digest_type(rdata_i))
- && dnssec_algorithm_key_support(knot_ds_alg(rdata_i))) {
- return true;
- }
+ if (dnssec_algorithm_digest_support(knot_ds_digest_type(rdata_i))) {
+ if (dnssec_algorithm_key_support(knot_ds_alg(rdata_i)))
+ return kr_ok();
+ else
+ ret = DNSSEC_INVALID_KEY_ALGORITHM;
+ } else
+ ret = DNSSEC_INVALID_DIGEST_ALGORITHM;
}
- return false;
+ return ret;
}
-// Now we instantiate these two as non-inline externally linkable code here (for lua).
+// Now we instantiate these three as non-inline externally linkable code here (for lua).
KR_EXPORT extern inline KR_PURE
bool kr_dnssec_key_sep_flag(const uint8_t *dnskey_rdata);
KR_EXPORT extern inline KR_PURE
+bool kr_dnssec_key_zonekey_flag(const uint8_t *dnskey_rdata);
+KR_EXPORT extern inline KR_PURE
bool kr_dnssec_key_revoked(const uint8_t *dnskey_rdata);
int kr_dnskeys_trusted(kr_rrset_validation_ctx_t *vctx, const knot_rdataset_t *sigs,
diff --git a/lib/dnssec.h b/lib/dnssec.h
index 52465042..b9f854d0 100644
--- a/lib/dnssec.h
+++ b/lib/dnssec.h
@@ -56,8 +56,9 @@ struct kr_rrset_validation_ctx {
const struct kr_query *log_qry; /*!< The query; just for logging purposes. */
struct {
unsigned int matching_name_type; /*!< Name + type matches */
- unsigned int expired;
- unsigned int notyet;
+ unsigned int expired; /*!< Number of expired signatures */
+ unsigned int notyet; /*!< Number of signatures not yet valid (inception > now) */
+ unsigned int expired_before_inception; /*!< Number of signatures already expired before inception time */
unsigned int signer_invalid; /*!< Signer is not zone apex */
unsigned int labels_invalid; /*!< Number of labels in RRSIG */
unsigned int key_invalid; /*!< Algorithm/keytag/key owner */
@@ -78,10 +79,17 @@ typedef struct kr_rrset_validation_ctx kr_rrset_validation_ctx_t;
int kr_rrset_validate(kr_rrset_validation_ctx_t *vctx, knot_rrset_t *covered);
/**
- * Return true iff the RRset contains at least one usable DS. See RFC6840 5.2.
+ * Check whether the RRset contains at least one usable DS.
+ *
+ * See RFC6840 5.2.
+ * @param ta Pointer to TA RRSet.
+ * @return kr_ok() if at least one DS is supported
+ * DNSSEC_INVALID_KEY_ALGORITHM if all DSes are not supported, because of their key algorithm
+ * DNSSEC_INVALID_DIGEST_ALGORITHM if all DSes are not supported, because of their digest algorithm
+ * @note Given that entries are iterated until a supported DS is found, the error refers to the last one.
*/
KR_EXPORT KR_PURE
-bool kr_ds_algo_support(const knot_rrset_t *ta);
+int kr_ds_algo_support(const knot_rrset_t *ta);
/**
* Check whether the DNSKEY rrset matches the supplied trust anchor RRSet.
@@ -97,13 +105,20 @@ int kr_dnskeys_trusted(kr_rrset_validation_ctx_t *vctx, const knot_rdataset_t *s
// flags: https://www.iana.org/assignments/dnskey-flags/dnskey-flags.xhtml
// https://datatracker.ietf.org/doc/html/rfc4034#section-2.1
-/** Return true if the DNSKEY has the SEP flag (normally ignored). */
+/** Return true if the DNSKEY has the SEP flag/bit set (normally ignored). */
KR_EXPORT inline KR_PURE
bool kr_dnssec_key_sep_flag(const uint8_t *dnskey_rdata)
{
return dnskey_rdata[1] & 0x01;
}
+/** Return true if the DNSKEY has the Zone Key flag/bit set. */
+KR_EXPORT inline KR_PURE
+bool kr_dnssec_key_zonekey_flag(const uint8_t *dnskey_rdata)
+{
+ return dnskey_rdata[0] & 0x01;
+}
+
/** Return true if the DNSKEY is revoked. */
KR_EXPORT inline KR_PURE
bool kr_dnssec_key_revoked(const uint8_t *dnskey_rdata)
@@ -111,11 +126,14 @@ bool kr_dnssec_key_revoked(const uint8_t *dnskey_rdata)
return dnskey_rdata[1] & 0x80;
}
-/** Return true if the DNSKEY could be used to validate zone records. */
+/**
+ * Return true if the DNSKEY could be used to validate zone records, meaning
+ * it correctly has the Zone Key flag/bit set to 1 and it is not revoked.
+ */
static inline KR_PURE
bool kr_dnssec_key_usable(const uint8_t *dnskey_rdata)
{
- return (dnskey_rdata[0] & 0x01) && !kr_dnssec_key_revoked(dnskey_rdata);
+ return kr_dnssec_key_zonekey_flag(dnskey_rdata) && !kr_dnssec_key_revoked(dnskey_rdata);
}
/** Return DNSKEY tag.
diff --git a/lib/generic/array.h b/lib/generic/array.h
index 9bea546b..eb1f7bc2 100644
--- a/lib/generic/array.h
+++ b/lib/generic/array.h
@@ -122,7 +122,7 @@ static inline void array_std_free(void *baton, void *p)
* @return element index on success, <0 on failure
*/
#define array_push_mm(array, val, reserve, baton) \
- (int)((array).len < (array).cap ? ((array).at[(array).len] = (val), (array).len++) \
+ (ssize_t)((array).len < (array).cap ? ((array).at[(array).len] = (val), (array).len++) \
: (array_reserve_mm(array, ((array).cap + 1), reserve, baton) < 0 ? -1 \
: ((array).at[(array).len] = (val), (array).len++)))
diff --git a/lib/layer/iterate.c b/lib/layer/iterate.c
index 69fe344c..3cc641cd 100644
--- a/lib/layer/iterate.c
+++ b/lib/layer/iterate.c
@@ -922,14 +922,15 @@ static int begin(kr_layer_t *ctx)
}
struct kr_query *qry = ctx->req->current_query;
- /* Avoid any other classes, and avoid any meta-types ~~except for ANY~~. */
- if (qry->sclass != KNOT_CLASS_IN
- || (knot_rrtype_is_metatype(qry->stype)
- /* && qry->stype != KNOT_RRTYPE_ANY hmm ANY seems broken ATM */)) {
+ /* Avoid any other classes, and avoid any meta-types. */
+ if (qry->sclass != KNOT_CLASS_IN || knot_rrtype_is_metatype(qry->stype)) {
knot_pkt_t *ans = kr_request_ensure_answer(ctx->req);
- if (!ans) return ctx->req->state;
+ if (!ans)
+ return ctx->req->state;
+ /* This RCODE is explicitly suggested for meta QTYPEs in RFC 8906 sec.7 */
knot_wire_set_rcode(ans->wire, KNOT_RCODE_NOTIMPL);
- return KR_STATE_FAIL;
+ kr_request_set_extended_error(ctx->req, KNOT_EDNS_EDE_NOTSUP, "57CK");
+ return KR_STATE_DONE;
}
return reset(ctx);
diff --git a/lib/layer/validate.c b/lib/layer/validate.c
index af20b2e4..321b0a25 100644
--- a/lib/layer/validate.c
+++ b/lib/layer/validate.c
@@ -12,6 +12,7 @@
#include <libknot/rrtype/rdname.h>
#include <libknot/rrtype/rrsig.h>
#include <libdnssec/error.h>
+#include <libdnssec/key.h>
#include "lib/dnssec/nsec.h"
#include "lib/dnssec/nsec3.h"
@@ -137,6 +138,7 @@ do_downgrade: // we do this deep inside calls because of having signer name avai
VERBOSE_MSG(qry,
"<= DNSSEC downgraded due to expensive NSEC3: %d iterations, %d salt length\n",
(int)knot_nsec3_iters(rd), (int)knot_nsec3_salt_len(rd));
+ kr_request_set_extended_error(qry->request, KNOT_EDNS_EDE_NSEC3_ITERS, "AUO2");
qry->flags.DNSSEC_WANT = false;
qry->flags.DNSSEC_INSECURE = true;
rank_records(qry, true, KR_RANK_INSECURE, vctx->zone_name);
@@ -242,7 +244,9 @@ static int validate_section(kr_rrset_validation_ctx_t *vctx, struct kr_query *qr
} else {
kr_rank_set(&entry->rank, KR_RANK_BOGUS);
vctx->err_cnt += 1;
- if (vctx->rrs_counters.expired > 0)
+ if (vctx->rrs_counters.expired_before_inception > 0)
+ kr_request_set_extended_error(req, KNOT_EDNS_EDE_EXPIRED_INV, "XXAP");
+ else if (vctx->rrs_counters.expired > 0)
kr_request_set_extended_error(req, KNOT_EDNS_EDE_SIG_EXPIRED, "YFJ2");
else if (vctx->rrs_counters.notyet > 0)
kr_request_set_extended_error(req, KNOT_EDNS_EDE_SIG_NOTYET, "UBBS");
@@ -368,7 +372,12 @@ static int validate_keyset(struct kr_request *req, knot_pkt_t *answer, bool has_
}
}
if (sig_index < 0) {
- kr_request_set_extended_error(req, KNOT_EDNS_EDE_RRSIG_MISS, "EZDC");
+ if (!kr_dnssec_key_zonekey_flag(qry->zone_cut.key->rrs.rdata->data)) {
+ kr_request_set_extended_error(req, KNOT_EDNS_EDE_DNSKEY_BIT, "YQEH");
+ } else {
+ kr_request_set_extended_error(req, KNOT_EDNS_EDE_RRSIG_MISS,
+ "EZDC: no valid RRSIGs for DNSKEY");
+ }
return kr_error(ENOENT);
}
const knot_rdataset_t *sig_rds = &req->answ_selected.at[sig_index]->rr->rrs;
@@ -395,15 +404,49 @@ static int validate_keyset(struct kr_request *req, knot_pkt_t *answer, bool has_
ret == 0 ? KR_RANK_SECURE : KR_RANK_BOGUS);
if (ret != 0) {
- log_bogus_rrsig(&vctx, qry->zone_cut.key, "bogus key");
- knot_rrset_free(qry->zone_cut.key, qry->zone_cut.pool);
- qry->zone_cut.key = NULL;
- if (vctx.rrs_counters.expired > 0)
+ const knot_rdataset_t *ds = &qry->zone_cut.trust_anchor->rrs;
+ int sep_keytag = kr_dnssec_key_tag(KNOT_RRTYPE_DS, ds->rdata->data, ds->rdata->len);
+ int dnskey_keytag = -1;
+ bool have_zone_key_bit = true, dnskey_algo_supported = true;
+ knot_rdata_t *rdata_sep = NULL, *rdata_i = qry->zone_cut.key->rrs.rdata;
+ for (uint8_t i = 0; i < qry->zone_cut.key->rrs.count;
+ ++i, rdata_i = knot_rdataset_next(rdata_i)) {
+ if (dnskey_keytag != sep_keytag) {
+ dnskey_keytag = kr_dnssec_key_tag(KNOT_RRTYPE_DNSKEY, rdata_i->data, rdata_i->len);
+ rdata_sep = rdata_i;
+ }
+
+ if (!kr_dnssec_key_zonekey_flag(rdata_i->data))
+ have_zone_key_bit = false;
+
+ if (!dnssec_algorithm_key_support(knot_dnskey_alg(rdata_i)))
+ dnskey_algo_supported = false;
+ }
+ bool sep_matches_tag_algo = rdata_sep && sep_keytag == dnskey_keytag &&
+ knot_ds_alg(ds->rdata) == knot_dnskey_alg(rdata_sep);
+
+ if (!have_zone_key_bit)
+ kr_request_set_extended_error(req, KNOT_EDNS_EDE_DNSKEY_BIT, "CYNG");
+ else if (!sep_matches_tag_algo)
+ kr_request_set_extended_error(req, KNOT_EDNS_EDE_DNSKEY_MISS, "NMJZ: no matching SEP");
+ else if (kr_dnssec_key_revoked(rdata_sep->data))
+ kr_request_set_extended_error(req, KNOT_EDNS_EDE_DNSKEY_MISS, "DGVI: DNSKEY matching SEP has the Revoke bit set");
+ else if (!dnskey_algo_supported)
+ kr_request_set_extended_error(req, KNOT_EDNS_EDE_DNSKEY_ALG, "H6OO");
+ else if (vctx.rrs_counters.matching_name_type == 0 && vctx.rrs_counters.key_invalid > 0)
+ kr_request_set_extended_error(req, KNOT_EDNS_EDE_RRSIG_MISS, "7N4Z: no valid RRSIGs for DNSKEY");
+ else if (vctx.rrs_counters.expired_before_inception > 0)
+ kr_request_set_extended_error(req, KNOT_EDNS_EDE_EXPIRED_INV, "4UBF");
+ else if (vctx.rrs_counters.expired > 0)
kr_request_set_extended_error(req, KNOT_EDNS_EDE_SIG_EXPIRED, "6GJV");
else if (vctx.rrs_counters.notyet > 0)
kr_request_set_extended_error(req, KNOT_EDNS_EDE_SIG_NOTYET, "4DJQ");
else
kr_request_set_extended_error(req, KNOT_EDNS_EDE_BOGUS, "EXRU");
+
+ log_bogus_rrsig(&vctx, qry->zone_cut.key, "bogus key");
+ knot_rrset_free(qry->zone_cut.key, qry->zone_cut.pool);
+ qry->zone_cut.key = NULL;
return ret;
}
@@ -1137,7 +1180,7 @@ static int validate(kr_layer_t *ctx, knot_pkt_t *pkt)
count += (knot_pkt_rr(sec, i)->type == KNOT_RRTYPE_NSEC3);
if (count > 8) {
VERBOSE_MSG(qry, "<= too many NSEC3 records in AUTHORITY (%d)\n", count);
- kr_request_set_extended_error(req, 27/*KNOT_EDNS_EDE_NSEC3_ITERS*/,
+ kr_request_set_extended_error(req, KNOT_EDNS_EDE_NSEC3_ITERS,
/* It's not about iteration values per se, but close enough. */
"DYRH: too many NSEC3 records");
qry->flags.DNSSEC_BOGUS = true;
@@ -1147,10 +1190,19 @@ static int validate(kr_layer_t *ctx, knot_pkt_t *pkt)
if (knot_wire_get_aa(pkt->wire) && qtype == KNOT_RRTYPE_DNSKEY) {
const knot_rrset_t *ds = qry->zone_cut.trust_anchor;
- if (ds && !kr_ds_algo_support(ds)) {
- VERBOSE_MSG(qry, ">< all DS entries use unsupported algorithm pairs, going insecure\n");
- /* ^ the message is a bit imprecise to avoid being too verbose */
- kr_request_set_extended_error(req, KNOT_EDNS_EDE_OTHER, "LSLC: unsupported digest/key");
+ ret = ds ? kr_ds_algo_support(ds) : kr_ok();
+ if (ret != kr_ok()) {
+ char *reason = "???";
+ if (ret == DNSSEC_INVALID_KEY_ALGORITHM) {
+ reason = "key";
+ kr_request_set_extended_error(req, KNOT_EDNS_EDE_DNSKEY_ALG, "PBAO");
+ } else if (ret == DNSSEC_INVALID_DIGEST_ALGORITHM) {
+ reason = "digest";
+ kr_request_set_extended_error(req, KNOT_EDNS_EDE_DS_DIGEST, "DDDV");
+ }
+ VERBOSE_MSG(qry,
+ ">< all DS entries are unsupported (last error: %s algorithm), going insecure\n",
+ reason);
qry->flags.DNSSEC_WANT = false;
qry->flags.DNSSEC_INSECURE = true;
rank_records(qry, true, KR_RANK_INSECURE, qry->zone_cut.name);
@@ -1320,37 +1372,6 @@ static int validate(kr_layer_t *ctx, knot_pkt_t *pkt)
VERBOSE_MSG(qry, "<= answer valid, OK\n");
return KR_STATE_DONE;
}
-
-/** Hide RRsets which did not validate from clients. */
-static int hide_bogus(kr_layer_t *ctx) {
- if (knot_wire_get_cd(ctx->req->qsource.packet->wire)) {
- return ctx->state;
- }
- /* We don't want to send bogus answers to clients, not even in SERVFAIL
- * answers, but we cannot drop whole sections. If a CNAME chain
- * SERVFAILs somewhere, the steps that were OK should be put into
- * answer.
- *
- * There is one specific issue: currently we follow CNAME *before*
- * we validate it, because... iterator comes before validator.
- * Therefore some rrsets might be added into req->*_selected before
- * we detected failure in validator.
- * TODO: better approach, probably during work on parallel queries.
- */
- const ranked_rr_array_t *sel[] = kr_request_selected(ctx->req);
- for (knot_section_t sect = KNOT_ANSWER; sect <= KNOT_ADDITIONAL; ++sect) {
- for (size_t i = 0; i < sel[sect]->len; ++i) {
- ranked_rr_array_entry_t *e = sel[sect]->at[i];
- e->to_wire = e->to_wire
- && !kr_rank_test(e->rank, KR_RANK_INDET)
- && !kr_rank_test(e->rank, KR_RANK_BOGUS)
- && !kr_rank_test(e->rank, KR_RANK_MISMATCH)
- && !kr_rank_test(e->rank, KR_RANK_MISSING);
- }
- }
- return ctx->state;
-}
-
static int validate_wrapper(kr_layer_t *ctx, knot_pkt_t *pkt) {
// Wrapper for now.
int ret = validate(ctx, pkt);
@@ -1358,25 +1379,64 @@ static int validate_wrapper(kr_layer_t *ctx, knot_pkt_t *pkt) {
struct kr_query *qry = req->current_query;
if (ret & KR_STATE_FAIL && qry->flags.DNSSEC_BOGUS)
qry->server_selection.error(qry, req->upstream.transport, KR_SELECTION_DNSSEC_ERROR);
- if (ret & KR_STATE_DONE && !qry->flags.DNSSEC_BOGUS) {
- /* Don't report extended DNS errors related to validation
- * when it managed to succeed (e.g. by trying different auth). */
- switch (req->extended_error.info_code) {
+ return ret;
+}
+
+/**
+ * Hide RRsets which did not validate from clients and clear Extended
+ * Error if a query failed validation, but later managed to succeed.
+ */
+static int validate_finalize(kr_layer_t *ctx) {
+ if (!knot_wire_get_cd(ctx->req->qsource.packet->wire)) {
+ /* We don't want to send bogus answers to clients, not even in SERVFAIL
+ * answers, but we cannot drop whole sections. If a CNAME chain
+ * SERVFAILs somewhere, the steps that were OK should be put into
+ * answer.
+ *
+ * There is one specific issue: currently we follow CNAME *before*
+ * we validate it, because... iterator comes before validator.
+ * Therefore some rrsets might be added into req->*_selected before
+ * we detected failure in validator.
+ * TODO: better approach, probably during work on parallel queries.
+ */
+ const ranked_rr_array_t *sel[] = kr_request_selected(ctx->req);
+ for (knot_section_t sect = KNOT_ANSWER; sect <= KNOT_ADDITIONAL; ++sect) {
+ for (size_t i = 0; i < sel[sect]->len; ++i) {
+ ranked_rr_array_entry_t *e = sel[sect]->at[i];
+ e->to_wire = e->to_wire
+ && !kr_rank_test(e->rank, KR_RANK_INDET)
+ && !kr_rank_test(e->rank, KR_RANK_BOGUS)
+ && !kr_rank_test(e->rank, KR_RANK_MISMATCH)
+ && !kr_rank_test(e->rank, KR_RANK_MISSING);
+ }
+ }
+ }
+
+ /* Clear DNSSEC-related Extended Error in case the request managed to succeed somehow. */
+ if (ctx->state == KR_STATE_DONE) {
+ switch (ctx->req->extended_error.info_code) {
+ case KNOT_EDNS_EDE_DNSKEY_ALG:
+ case KNOT_EDNS_EDE_DS_DIGEST:
+ case KNOT_EDNS_EDE_NSEC3_ITERS: ;
+ /* These EDEs are meant to result into _INSECURE success. */
+ const struct kr_query *qry = kr_rplan_resolved(&ctx->req->rplan);
+ if (qry->flags.DNSSEC_INSECURE)
+ break;
case KNOT_EDNS_EDE_BOGUS:
case KNOT_EDNS_EDE_NSEC_MISS:
case KNOT_EDNS_EDE_RRSIG_MISS:
case KNOT_EDNS_EDE_SIG_EXPIRED:
+ case KNOT_EDNS_EDE_EXPIRED_INV:
case KNOT_EDNS_EDE_SIG_NOTYET:
- kr_request_set_extended_error(req, KNOT_EDNS_EDE_NONE, NULL);
- break;
- case KNOT_EDNS_EDE_DNSKEY_MISS:
case KNOT_EDNS_EDE_DNSKEY_BIT:
- kr_assert(false); /* These EDE codes aren't used. */
+ case KNOT_EDNS_EDE_DNSKEY_MISS:
+ kr_request_set_extended_error(ctx->req, KNOT_EDNS_EDE_NONE, NULL);
break;
default: break; /* Remaining codes don't indicate hard DNSSEC failure. */
}
}
- return ret;
+
+ return ctx->state;
}
@@ -1385,7 +1445,7 @@ int validate_init(struct kr_module *self)
{
static const kr_layer_api_t layer = {
.consume = &validate_wrapper,
- .answer_finalize = &hide_bogus,
+ .answer_finalize = &validate_finalize,
};
self->layer = &layer;
return kr_ok();
diff --git a/lib/log.h b/lib/log.h
index a3887e57..d3bc9145 100644
--- a/lib/log.h
+++ b/lib/log.h
@@ -109,7 +109,7 @@ enum kr_log_group {
#define LOG_GRP_VALIDATOR_TAG "valdtr" /**< ``valdtr``: operations related to validate layer */
#define LOG_GRP_RESOLVER_TAG "resolv" /**< ``resolv``: operations related to resolving */
#define LOG_GRP_SELECTION_TAG "select" /**< ``select``: operations related to server selection */
-#define LOG_GRP_ZCUT_TAG "zoncut" /**< ``zonecut``: operations related to zone cut */
+#define LOG_GRP_ZCUT_TAG "zoncut" /**< ``zoncut``: operations related to zone cut */
#define LOG_GRP_COOKIES_TAG "cookie" /**< ``cookie``: operations related to cookies */
#define LOG_GRP_STATISTICS_TAG "statis" /**< ``statis``: operations related to statistics */
#define LOG_GRP_REBIND_TAG "rebind" /**< ``rebind``: operations related to rebinding */
diff --git a/lib/resolve-produce.c b/lib/resolve-produce.c
index 563a2ca2..a3a2401e 100644
--- a/lib/resolve-produce.c
+++ b/lib/resolve-produce.c
@@ -697,6 +697,18 @@ int kr_resolve_produce(struct kr_request *request, struct kr_transport **transpo
if (qry->flags.NO_NS_FOUND) {
ITERATE_LAYERS(request, qry, reset);
kr_rplan_pop(rplan, qry);
+
+ /* Construct EDE message. We need it on mempool. */
+ char cut_buf[KR_DNAME_STR_MAXLEN];
+ char *msg = knot_dname_to_str(cut_buf, qry->zone_cut.name, sizeof(cut_buf));
+ if (!kr_fails_assert(msg)) {
+ if (*qry->zone_cut.name != '\0') /* Strip trailing dot. */
+ cut_buf[strlen(cut_buf) - 1] = '\0';
+ msg = kr_strcatdup_pool(&request->pool, 2,
+ "P3CD: delegation ", cut_buf);
+ }
+ kr_request_set_extended_error(request, KNOT_EDNS_EDE_NREACH_AUTH, msg);
+
return KR_STATE_FAIL;
} else {
/* FIXME: This is probably quite inefficient:
diff --git a/lib/resolve.c b/lib/resolve.c
index 4730f105..bc00471b 100644
--- a/lib/resolve.c
+++ b/lib/resolve.c
@@ -738,6 +738,17 @@ int kr_resolve_consume(struct kr_request *request, struct kr_transport **transpo
qry->flags.NO_NS_FOUND = true;
return KR_STATE_PRODUCE;
}
+
+ /* Construct EDE message. We need it on mempool. */
+ char cut_buf[KR_DNAME_STR_MAXLEN];
+ char *msg = knot_dname_to_str(cut_buf, qry->zone_cut.name, sizeof(cut_buf));
+ if (!kr_fails_assert(msg)) {
+ if (*qry->zone_cut.name != '\0') /* Strip trailing dot. */
+ cut_buf[strlen(cut_buf) - 1] = '\0';
+ msg = kr_strcatdup_pool(&request->pool, 2,
+ "OLX2: delegation ", cut_buf);
+ }
+ kr_request_set_extended_error(request, KNOT_EDNS_EDE_NREACH_AUTH, msg);
return KR_STATE_FAIL;
}
} else {
@@ -972,12 +983,15 @@ knot_mm_t *kr_resolve_pool(struct kr_request *request)
static int ede_priority(int info_code)
{
switch(info_code) {
+ case KNOT_EDNS_EDE_TOO_EARLY:
+ return 910;
case KNOT_EDNS_EDE_DNSKEY_BIT:
case KNOT_EDNS_EDE_DNSKEY_MISS:
case KNOT_EDNS_EDE_SIG_EXPIRED:
case KNOT_EDNS_EDE_SIG_NOTYET:
case KNOT_EDNS_EDE_RRSIG_MISS:
case KNOT_EDNS_EDE_NSEC_MISS:
+ case KNOT_EDNS_EDE_EXPIRED_INV:
return 900; /* Specific DNSSEC failures */
case KNOT_EDNS_EDE_BOGUS:
return 800; /* Generic DNSSEC failure */
@@ -990,6 +1004,7 @@ static int ede_priority(int info_code)
return 600; /* Policy related */
case KNOT_EDNS_EDE_DNSKEY_ALG:
case KNOT_EDNS_EDE_DS_DIGEST:
+ case KNOT_EDNS_EDE_NSEC3_ITERS:
return 500; /* Non-critical DNSSEC issues */
case KNOT_EDNS_EDE_STALE:
case KNOT_EDNS_EDE_STALE_NXD:
@@ -1002,10 +1017,12 @@ static int ede_priority(int info_code)
case KNOT_EDNS_EDE_NREACH_AUTH:
case KNOT_EDNS_EDE_NETWORK:
case KNOT_EDNS_EDE_INV_DATA:
+ case KNOT_EDNS_EDE_SYNTHESIZED:
return 200; /* Assorted codes */
case KNOT_EDNS_EDE_OTHER:
return 100; /* Most generic catch-all error */
case KNOT_EDNS_EDE_NONE:
+ case KNOT_EDNS_EDE_NONCONF_POLICY: /* Defined by an expired Internet Draft */
return 0; /* No error - allow overriding */
default:
kr_assert(false); /* Unknown info_code */
diff --git a/lib/resolve.h b/lib/resolve.h
index 443fef29..cbc20877 100644
--- a/lib/resolve.h
+++ b/lib/resolve.h
@@ -260,6 +260,7 @@ struct kr_request {
ranked_rr_array_t add_selected;
bool answ_validated; /**< internal to validator; beware of caching, etc. */
bool auth_validated; /**< see answ_validated ^^ ; TODO */
+ bool stale_accounted;
/** Overall rank for the request.
*
diff --git a/lib/rules/api.c b/lib/rules/api.c
index 5ecbe29e..53ebbf7e 100644
--- a/lib/rules/api.c
+++ b/lib/rules/api.c
@@ -46,8 +46,12 @@ static int answer_exact_match(struct kr_query *qry, knot_pkt_t *pkt, uint16_t ty
const uint8_t *data, const uint8_t *data_bound);
static int answer_zla_empty(val_zla_type_t type, struct kr_query *qry, knot_pkt_t *pkt,
knot_db_val_t zla_lf, uint32_t ttl);
+static int answer_zla_dname(val_zla_type_t type, struct kr_query *qry, knot_pkt_t *pkt,
+ knot_db_val_t zla_lf, uint32_t ttl, knot_db_val_t *val);
static int answer_zla_redirect(struct kr_query *qry, knot_pkt_t *pkt, const char *ruleset_name,
knot_db_val_t zla_lf, uint32_t ttl);
+static int rule_local_subtree(const knot_dname_t *apex, enum kr_rule_sub_t type,
+ const knot_dname_t *target, uint32_t ttl, kr_rule_tags_t tags);
// LATER: doing tag_names_default() and kr_rule_tag_add() inside a RW transaction would be better.
static int tag_names_default(void)
@@ -418,25 +422,30 @@ int rule_local_data_answer(struct kr_query *qry, knot_pkt_t *pkt)
uint32_t ttl = KR_RULE_TTL_DEFAULT;
if (val.len >= sizeof(ttl)) // allow omitting -> can't kr_assert
deserialize_fails_assert(&val, &ttl);
- if (kr_fails_assert(val.len == 0)) {
- kr_log_error(RULES, "ERROR: unused bytes: %zu\n", val.len);
- return kr_error(EILSEQ);
- }
+
// Finally execute the rule.
switch (ztype) {
case KR_RULE_SUB_EMPTY:
case KR_RULE_SUB_NXDOMAIN:
case KR_RULE_SUB_NODATA:
ret = answer_zla_empty(ztype, qry, pkt, zla_lf, ttl);
- if (ret == kr_error(EAGAIN))
- goto shorten;
- return ret ? ret : RET_ANSWERED;
+ break;
case KR_RULE_SUB_REDIRECT:
ret = answer_zla_redirect(qry, pkt, ruleset_name, zla_lf, ttl);
- return ret ? kr_error(ret) : RET_ANSWERED;
+ break;
+ case KR_RULE_SUB_DNAME:
+ ret = answer_zla_dname(ztype, qry, pkt, zla_lf, ttl, &val);
+ break;
default:
return kr_error(EILSEQ);
}
+ if (kr_fails_assert(val.len == 0)) {
+ kr_log_error(RULES, "ERROR: unused bytes: %zu\n", val.len);
+ return kr_error(EILSEQ);
+ }
+ if (ret == kr_error(EAGAIN))
+ goto shorten;
+ return ret ? kr_error(ret) : RET_ANSWERED;
} while (true);
}
@@ -570,7 +579,17 @@ int local_data_ins(knot_db_val_t key, const knot_rrset_t *rrs,
int ret = ruledb_op(write, &key, &val, 1); // TODO: overwriting on ==tags?
// ENOSPC seems to be the only expectable error.
kr_assert(ret == 0 || ret == kr_error(ENOSPC));
- return ret;
+
+ if (ret || rrs->type != KNOT_RRTYPE_DNAME)
+ return ret;
+ // Now we do special handling for DNAMEs
+ // - we inserted as usual, so that it works with QTYPE == DNAME
+ // - now we insert a ZLA to handle generating CNAMEs
+ // - yes, some edge cases won't work as in real DNS zones (e.g. occlusion)
+ if (kr_fails_assert(rrs->rrs.count))
+ return kr_error(EINVAL);
+ return rule_local_subtree(rrs->owner, KR_RULE_SUB_DNAME,
+ knot_dname_target(rrs->rrs.rdata), rrs->ttl, tags);
}
int kr_rule_local_data_del(const knot_rrset_t *rrs, kr_rule_tags_t tags)
{
@@ -697,6 +716,78 @@ static int answer_zla_empty(val_zla_type_t type, struct kr_query *qry, knot_pkt_
return kr_ok();
}
+static int answer_zla_dname(val_zla_type_t type, struct kr_query *qry, knot_pkt_t *pkt,
+ const knot_db_val_t zla_lf, uint32_t ttl, knot_db_val_t *val)
+{
+ if (kr_fails_assert(type == KR_RULE_SUB_DNAME))
+ return kr_error(EINVAL);
+
+ const knot_dname_t *dname_target = val->data;
+ // Theoretically this check could overread the val->len, but that's OK,
+ // as the policy DB contents wouldn't be directly written by a malicious party.
+ // Moreover, an overread shouldn't cause worse than a clean segfault.
+ if (kr_fails_assert(knot_dname_size(dname_target) == val->len))
+ return kr_error(EILSEQ);
+ { // update *val; avoiding void* arithmetics complicates this
+ char *tmp = val->data;
+ tmp += val->len;
+ val->data = tmp;
+
+ val->len = 0;
+ }
+
+ knot_dname_t apex_name[KNOT_DNAME_MAXLEN];
+ int ret = knot_dname_lf2wire(apex_name, zla_lf.len, zla_lf.data);
+ CHECK_RET(ret);
+
+ const bool hit_apex = knot_dname_is_equal(qry->sname, apex_name);
+ if (hit_apex && type == KR_RULE_SUB_DNAME)
+ return kr_error(EAGAIN); // LATER: maybe a type that matches apex
+
+ // Start constructing the (pseudo-)packet.
+ ret = pkt_renew(pkt, qry->sname, qry->stype);
+ CHECK_RET(ret);
+ struct answer_rrset arrset;
+ memset(&arrset, 0, sizeof(arrset));
+
+ arrset.set.rr = knot_rrset_new(qry->sname, KNOT_RRTYPE_CNAME,
+ KNOT_CLASS_IN, ttl, &pkt->mm);
+ if (kr_fails_assert(arrset.set.rr))
+ return kr_error(ENOMEM);
+ const knot_dname_t *cname_target = knot_dname_replace_suffix(qry->sname,
+ knot_dname_labels(apex_name, NULL), dname_target, &pkt->mm);
+ const int rdata_len = knot_dname_size(cname_target);
+ const bool cname_fits = rdata_len <= KNOT_DNAME_MAXLEN;
+ if (cname_fits) {
+ ret = knot_rrset_add_rdata(arrset.set.rr, cname_target,
+ knot_dname_size(cname_target), &pkt->mm);
+ CHECK_RET(ret);
+ }
+
+ arrset.set.rank = KR_RANK_SECURE | KR_RANK_AUTH; // local data has high trust
+ arrset.set.expiring = false;
+
+ if (cname_fits) {
+ knot_wire_set_rcode(pkt->wire, KNOT_RCODE_NOERROR);
+ ret = knot_pkt_begin(pkt, KNOT_ANSWER);
+ CHECK_RET(ret);
+
+ // Put links to the RR into the pkt.
+ ret = pkt_append(pkt, &arrset);
+ CHECK_RET(ret);
+ } else {
+ knot_wire_set_rcode(pkt->wire, KNOT_RCODE_YXDOMAIN);
+ }
+
+ // Finishing touches.
+ qry->flags.EXPIRING = false;
+ qry->flags.CACHED = true;
+ qry->flags.NO_MINIMIZE = true;
+
+ VERBOSE_MSG(qry, "=> satisfied by local data (DNAME)\n");
+ return kr_ok();
+}
+
static int answer_zla_redirect(struct kr_query *qry, knot_pkt_t *pkt, const char *ruleset_name,
const knot_db_val_t zla_lf, uint32_t ttl)
{
@@ -760,6 +851,11 @@ nodata: // Want NODATA answer (or NOERROR if it hits apex SOA).
return kr_ok();
}
+int kr_rule_local_subtree(const knot_dname_t *apex, enum kr_rule_sub_t type,
+ uint32_t ttl, kr_rule_tags_t tags)
+{
+ return rule_local_subtree(apex, type, NULL, ttl, tags);
+}
knot_db_val_t zla_key(const knot_dname_t *apex, uint8_t key_data[KEY_MAXLEN])
{
kr_require(the_rules);
@@ -775,11 +871,16 @@ knot_db_val_t zla_key(const knot_dname_t *apex, uint8_t key_data[KEY_MAXLEN])
key.len = key_data + KEY_DNAME_END_OFFSET - (uint8_t *)key.data;
return key;
}
-int kr_rule_local_subtree(const knot_dname_t *apex, enum kr_rule_sub_t type,
- uint32_t ttl, kr_rule_tags_t tags)
+static int rule_local_subtree(const knot_dname_t *apex, enum kr_rule_sub_t type,
+ const knot_dname_t *target, uint32_t ttl, kr_rule_tags_t tags)
{
// type-check
+ const bool has_target = (type == KR_RULE_SUB_DNAME);
switch (type) {
+ case KR_RULE_SUB_DNAME:
+ if (kr_fails_assert(!!target == has_target))
+ return kr_error(EINVAL);
+ break;
case KR_RULE_SUB_EMPTY:
case KR_RULE_SUB_NXDOMAIN:
case KR_RULE_SUB_NODATA:
@@ -797,8 +898,10 @@ int kr_rule_local_subtree(const knot_dname_t *apex, enum kr_rule_sub_t type,
knot_db_val_t key = zla_key(apex, key_data);
// Prepare the data into a temporary buffer.
- const bool has_ttl = ttl != KR_RULE_TTL_DEFAULT;
- const int val_len = sizeof(tags) + sizeof(ztype) + (has_ttl ? sizeof(ttl) : 0);
+ const int target_len = has_target ? knot_dname_size(target) : 0;
+ const bool has_ttl = ttl != KR_RULE_TTL_DEFAULT || has_target;
+ const int val_len = sizeof(tags) + sizeof(ztype) + (has_ttl ? sizeof(ttl) : 0)
+ + target_len;
uint8_t buf[val_len], *data = buf;
memcpy(data, &tags, sizeof(tags));
data += sizeof(tags);
@@ -808,6 +911,10 @@ int kr_rule_local_subtree(const knot_dname_t *apex, enum kr_rule_sub_t type,
memcpy(data, &ttl, sizeof(ttl));
data += sizeof(ttl);
}
+ if (has_target) {
+ memcpy(data, target, target_len);
+ data += target_len;
+ }
kr_require(data == buf + val_len);
knot_db_val_t val = { .data = buf, .len = val_len };
diff --git a/lib/rules/api.h b/lib/rules/api.h
index f1737a19..c7d1dd29 100644
--- a/lib/rules/api.h
+++ b/lib/rules/api.h
@@ -156,11 +156,14 @@ enum kr_rule_sub_t {
KR_RULE_SUB_NODATA,
/// Redirect: anything beneath has the same data as apex (except NS+SOA).
KR_RULE_SUB_REDIRECT,
+ /// Act similar to DNAME: rebase everything underneath by generated CNAMEs.
+ KR_RULE_SUB_DNAME,
};
/** Insert a simple sub-tree rule.
*
* - into the default rule-set
* - SOA and NS for generated answers aren't overridable.
+ * - type: you can't use _DNAME via this function; insert it by kr_rule_local_data_ins()
*/
KR_EXPORT
int kr_rule_local_subtree(const knot_dname_t *apex, enum kr_rule_sub_t type,
diff --git a/lib/rules/zonefile.c b/lib/rules/zonefile.c
index d308f375..773ca937 100644
--- a/lib/rules/zonefile.c
+++ b/lib/rules/zonefile.c
@@ -47,8 +47,10 @@ static void rr_scan2trie(zs_scanner_t *s)
rr->ttl = s->r_ttl; // we could also warn here
} else {
rr = *rr_p = mm_alloc(s_data->pool, sizeof(*rr));
- knot_rrset_init(rr, NULL, s->r_type, KNOT_CLASS_IN, s->r_ttl);
- // we don't ^^ need owner so save allocation
+ knot_dname_t *owner = NULL; // we only utilize owner for DNAMEs
+ if (s->r_type == KNOT_RRTYPE_DNAME) // Nit: copy could be done a bit faster
+ owner = knot_dname_copy(s->r_owner, s_data->pool);
+ knot_rrset_init(rr, owner, s->r_type, KNOT_CLASS_IN, s->r_ttl);
}
int ret = knot_rrset_add_rdata(rr, s->r_data, s->r_data_length, s_data->pool);
kr_assert(!ret);
diff --git a/lib/utils.c b/lib/utils.c
index de7c02cb..3af2fd04 100644
--- a/lib/utils.c
+++ b/lib/utils.c
@@ -107,7 +107,7 @@ static inline int u16tostr(uint8_t *dst, uint16_t num)
return 5;
}
-char* kr_strcatdup(unsigned n, ...)
+char* kr_strcatdup_pool(knot_mm_t *pool, unsigned n, ...)
{
if (n < 1) {
return NULL;
@@ -132,7 +132,7 @@ char* kr_strcatdup(unsigned n, ...)
char *result = NULL;
if (total_len > 0) {
if (unlikely(total_len == SIZE_MAX)) return NULL;
- result = malloc(total_len + 1);
+ result = mm_alloc(pool, total_len + 1);
}
if (result) {
char *stream = result;
diff --git a/lib/utils.h b/lib/utils.h
index 9fdc2d48..8c1ef8c1 100644
--- a/lib/utils.h
+++ b/lib/utils.h
@@ -170,9 +170,11 @@ typedef struct kr_http_header_array_entry {
/** Array of HTTP headers for DoH. */
typedef array_t(kr_http_header_array_entry_t) kr_http_header_array_t;
-/** Concatenate N strings. */
+/** Concatenate N strings and put the result into a mempool. */
KR_EXPORT
-char* kr_strcatdup(unsigned n, ...);
+char* kr_strcatdup_pool(knot_mm_t *pool, unsigned n, ...);
+/** Concatenate N strings. */
+#define kr_strcatdup(n, ...) kr_strcatdup_pool(NULL, n, ## __VA_ARGS__)
/** Construct absolute file path, without resolving symlinks.
* \return malloc-ed string or NULL (+errno in that case) */
diff --git a/manager/.dockerignore b/manager/.dockerignore
deleted file mode 100644
index f67cf10c..00000000
--- a/manager/.dockerignore
+++ /dev/null
@@ -1,8 +0,0 @@
-node_modules/
-.mypy_cache/
-.pytest_cache/
-.tox/
-.git/
-.vscode/
-
-containers/ \ No newline at end of file
diff --git a/manager/.flake8 b/manager/.flake8
deleted file mode 100644
index 3a7c8e74..00000000
--- a/manager/.flake8
+++ /dev/null
@@ -1,3 +0,0 @@
-[flake8]
-max-line-length = 200
-extend-ignore = E203 \ No newline at end of file
diff --git a/manager/.gitignore b/manager/.gitignore
deleted file mode 100644
index 07e42788..00000000
--- a/manager/.gitignore
+++ /dev/null
@@ -1,20 +0,0 @@
-*junit.xml
-.build_kresd/
-.coverage
-.install_kresd/
-.mypy_cache/
-.podman-cache/
-.pytest_cache/
-.pytype
-.tox/
-.vscode/
-/pkg
-__pycache__/
-build/
-dist/
-docs/_build/*
-knot_resolver_manager.egg-info/
-node_modules/
-package-lock.json
-poetry.lock
-yarn.lock
diff --git a/manager/.python-version b/manager/.python-version
deleted file mode 100644
index 7ee56de1..00000000
--- a/manager/.python-version
+++ /dev/null
@@ -1,5 +0,0 @@
-3.8.18
-3.9.18
-3.10.13
-3.11.8
-3.12.2
diff --git a/manager/README.md b/manager/README.md
deleted file mode 100644
index 48919c21..00000000
--- a/manager/README.md
+++ /dev/null
@@ -1,7 +0,0 @@
-# Knot Resolver Manager
-
-Knot Resolver Manager is a configuration tool for [Knot Resolver](https://gitlab.nic.cz/knot/knot-resolver). The Manager hides the complexity of running several independent resolver processes while ensuring zero-downtime reconfiguration with YAML/JSON declarative configuration and an optional HTTP API for dynamic changes.
-
-## Development
-
-If you want to learn more about the architecture or start developing, check out our [Developer Documentation](https://www.knot-resolver.cz/documentation/latest/dev/). \ No newline at end of file
diff --git a/manager/knot_resolver_manager/__init__.py b/manager/knot_resolver_manager/__init__.py
deleted file mode 100644
index 3dc1f76b..00000000
--- a/manager/knot_resolver_manager/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-__version__ = "0.1.0"
diff --git a/manager/knot_resolver_manager/__main__.py b/manager/knot_resolver_manager/__main__.py
deleted file mode 100644
index 89eabd56..00000000
--- a/manager/knot_resolver_manager/__main__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# pylint: skip-file
-# flake8: noqa
-
-
-def run():
- # throws nice syntax error on old Python versions:
- 0_0 # Python >= 3.7 required
-
- from knot_resolver_manager import main
-
- main.main()
-
-
-if __name__ == "__main__":
- run()
diff --git a/manager/knot_resolver_manager/cli/__init__.py b/manager/knot_resolver_manager/cli/__init__.py
deleted file mode 100644
index d3c6280d..00000000
--- a/manager/knot_resolver_manager/cli/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-from pathlib import Path
-
-from knot_resolver_manager.datamodel.globals import Context, set_global_validation_context
-
-set_global_validation_context(Context(Path("."), False))
diff --git a/manager/knot_resolver_manager/cli/__main__.py b/manager/knot_resolver_manager/cli/__main__.py
deleted file mode 100644
index 88a83a67..00000000
--- a/manager/knot_resolver_manager/cli/__main__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-from knot_resolver_manager.cli.main import main
-
-if __name__ == "__main__":
- main()
diff --git a/manager/knot_resolver_manager/cli/main.py b/manager/knot_resolver_manager/cli/main.py
deleted file mode 100644
index 301a9539..00000000
--- a/manager/knot_resolver_manager/cli/main.py
+++ /dev/null
@@ -1,69 +0,0 @@
-import argparse
-import importlib
-import os
-
-from knot_resolver_manager.cli.command import install_commands_parsers
-from knot_resolver_manager.cli.kresctl import Kresctl
-
-
-def autoimport_commands() -> None:
- prefix = "knot_resolver_manager.cli.cmd."
- for module_name in os.listdir(os.path.dirname(__file__) + "/cmd"):
- if module_name[-3:] != ".py":
- continue
- importlib.import_module(f"{prefix}{module_name[:-3]}")
-
-
-def create_main_argument_parser() -> argparse.ArgumentParser:
- parser = argparse.ArgumentParser(
- "kresctl",
- description="Command-line utility that helps communicate with Knot Resolver's management API."
- "It also provides tooling to work with declarative configuration (validate, convert).",
- )
- # parser.add_argument(
- # "-i",
- # "--interactive",
- # action="store_true",
- # help="Interactive mode of kresctl utility",
- # default=False,
- # required=False,
- # )
- config_or_socket = parser.add_mutually_exclusive_group()
- config_or_socket.add_argument(
- "-s",
- "--socket",
- action="store",
- type=str,
- help="Optional, path to Unix-domain socket or network interface of the management API. "
- "Cannot be used together with '--config'.",
- default=[],
- nargs=1,
- required=False,
- )
- config_or_socket.add_argument(
- "-c",
- "--config",
- action="store",
- type=str,
- help="Optional, path to Knot Resolver declarative configuration to retrieve Unix-domain socket or "
- "network interface of the management API from. Cannot be used together with '--socket'.",
- default=[],
- nargs=1,
- required=False,
- )
- return parser
-
-
-def main() -> None:
- autoimport_commands()
- parser = create_main_argument_parser()
- install_commands_parsers(parser)
-
- namespace = parser.parse_args()
- kresctl = Kresctl(namespace, parser)
- kresctl.execute()
-
- # if namespace.interactive or len(vars(namespace)) == 2:
- # kresctl.interactive()
- # else:
- # kresctl.execute()
diff --git a/manager/knot_resolver_manager/compat/__init__.py b/manager/knot_resolver_manager/compat/__init__.py
deleted file mode 100644
index 410074cd..00000000
--- a/manager/knot_resolver_manager/compat/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-from . import asyncio, dataclasses
-
-__all__ = ["asyncio", "dataclasses"]
diff --git a/manager/knot_resolver_manager/compat/dataclasses.py b/manager/knot_resolver_manager/compat/dataclasses.py
deleted file mode 100644
index f420b03d..00000000
--- a/manager/knot_resolver_manager/compat/dataclasses.py
+++ /dev/null
@@ -1,68 +0,0 @@
-"""
-This module contains rather simplistic reimplementation of dataclasses due to them being unsupported on Python 3.6
-"""
-
-from typing import Any, Dict, Set, Type
-
-dataclasses_import_success = False
-try:
- import dataclasses
-
- dataclasses_import_success = True
-except ImportError:
- pass
-
-
-_CUSTOM_DATACLASS_MARKER = "_CUSTOM_DATACLASS_MARKER"
-
-
-def dataclass(cls: Any) -> Any:
- if dataclasses_import_success:
- return dataclasses.dataclass(cls)
-
- anot: Dict[str, Type[Any]] = cls.__dict__.get("__annotations__", {})
-
- def ninit(slf: Any, *args: Any, **kwargs: Any) -> None:
- nonlocal anot
-
- ianot = iter(anot.keys())
- used: Set[str] = set()
-
- # set normal arguments
- for arg in args:
- name = next(ianot)
- setattr(slf, name, arg)
- used.add(name)
-
- # set keyd arguments
- for key, val in kwargs.items():
- assert key in anot, (
- f"Constructing dataclass with an argument '{key}' which is not defined with a type"
- f" annotation in class {cls.__name__}"
- )
- setattr(slf, key, val)
- used.add(key)
-
- # set default values
- for key in anot:
- if key in used:
- continue
- assert hasattr(
- cls, key
- ), f"Field '{key}' does not have default value and was not defined in the constructor"
- dfl = getattr(cls, key)
- setattr(slf, key, dfl)
-
- setattr(cls, "__init__", ninit)
- setattr(cls, _CUSTOM_DATACLASS_MARKER, ...)
- return cls
-
-
-def is_dataclass(obj: Any) -> bool:
- if dataclasses_import_success:
- return dataclasses.is_dataclass(obj)
-
- return hasattr(obj, _CUSTOM_DATACLASS_MARKER)
-
-
-__all__ = ["dataclass", "is_dataclass"]
diff --git a/manager/knot_resolver_manager/datamodel/__init__.py b/manager/knot_resolver_manager/datamodel/__init__.py
deleted file mode 100644
index a0174acc..00000000
--- a/manager/knot_resolver_manager/datamodel/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-from .config_schema import KresConfig
-
-__all__ = ["KresConfig"]
diff --git a/manager/knot_resolver_manager/exceptions.py b/manager/knot_resolver_manager/exceptions.py
deleted file mode 100644
index 5b05d98e..00000000
--- a/manager/knot_resolver_manager/exceptions.py
+++ /dev/null
@@ -1,28 +0,0 @@
-from typing import List
-
-
-class CancelStartupExecInsteadException(Exception):
- """
- Exception used for terminating system startup and instead
- causing an exec of something else. Could be used by subprocess
- controllers such as supervisord to allow them to run as top-level
- process in a process tree.
- """
-
- def __init__(self, exec_args: List[str], *args: object) -> None:
- self.exec_args = exec_args
- super().__init__(*args)
-
-
-class KresManagerException(Exception):
- """
- Base class for all custom exceptions we use in our code
- """
-
-
-class SubprocessControllerException(KresManagerException):
- pass
-
-
-class SubprocessControllerTimeoutException(KresManagerException):
- pass
diff --git a/manager/meson.build b/manager/meson.build
deleted file mode 100644
index 476532e5..00000000
--- a/manager/meson.build
+++ /dev/null
@@ -1,37 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-build_manager = false
-
-if get_option('manager') != 'disabled'
- message('--- manager dependencies ---')
-
- pymod = import('python')
- py3 = pymod.find_installation('python3')
- py3_deps = run_command(py3, 'tests/packaging/dependencies.py', 'setup.py', check: false)
-
- if py3.language_version().version_compare('<3.6')
- error('At least Python 3.6 is required.')
- elif py3_deps.returncode() != 0
- error(py3_deps.stderr().strip())
- else
- message('all dependencies found')
- build_manager = true
- endif
-
- message('----------------------------')
-endif
-
-if build_manager
-
- # shell completion
- subdir('shell-completion')
-
- # installation script
- meson.add_install_script('scripts/install.sh', py3.path())
-
- # YAML config configuration file
- install_data(
- sources: 'etc/knot-resolver/config.yaml',
- install_dir: etc_dir,
- )
-endif \ No newline at end of file
diff --git a/manager/poe b/manager/poe
deleted file mode 100755
index 29065280..00000000
--- a/manager/poe
+++ /dev/null
@@ -1,22 +0,0 @@
-#!/bin/bash
-
-script_dir="$(dirname "$(readlink -f "$0")")"
-
-if poetry --directory "$script_dir" run python -c 'import sys; sys.exit(0 if sys.version_info >= (3, 8) else 1)'; then
- # Run poethepoet with the project root in the $script_dir directory (requires Python >=3.8)
- poetry --directory "$script_dir" run poe --root "$script_dir" $@
-elif [ "$PWD" == "$script_dir" ]; then
- # Compatibility workarounds for Python <3.8 and poethepoet <0.22.0
- # Only works if the current working directory is the same as the script directory.
- args=("$@")
- if [ "${args[0]}" == "kresctl" ]; then
- echo "WARNING: Workaround for Python <3.8: replacing 'kresctl' with 'kresctl-nocwd'" >&2
- args[0]="kresctl-nocwd"
- fi
- poetry run poe "${args[@]}"
-else
- echo "Running script from non project root is not supported for current 'poethepoet' version." >&2
- echo "poethepoet version - must be >=0.22.0" >&2
- echo "Python version - must be >=3.8" >&2
- exit 1
-fi
diff --git a/manager/scripts/_env.sh b/manager/scripts/_env.sh
deleted file mode 100644
index cabc0025..00000000
--- a/manager/scripts/_env.sh
+++ /dev/null
@@ -1,58 +0,0 @@
-# fail on errors
-set -o errexit
-
-# define color codes
-red="\033[0;31m"
-yellow="\033[0;33m"
-green="\033[0;32m"
-bright_black="\033[0;90m"
-blue="\033[0;34m"
-reset="\033[0m"
-
-# ensure consistent top level directory
-gitroot="$(git rev-parse --show-toplevel)"
-if test -z "$gitroot"; then
- echo -e "${red}This command can be run only in a git repository tree.${reset}"
- exit 1
-fi
-cd $gitroot/manager
-
-# ensure consistent environment with virtualenv
-if test -z "$VIRTUAL_ENV" -a "$CI" != "true" -a -z "$KNOT_ENV"; then
- echo -e "${yellow}You are NOT running the script within the project's virtual environment.${reset}"
- echo -e "Do you want to continue regardless? [yN]"
- read cont
- if test "$cont" != "y" -a "$cont" != "Y"; then
- echo -e "${red}Exiting early...${reset}"
- exit 1
- fi
-fi
-
-# update PATH with node_modules
-PATH="$PATH:$gitroot/node_modules/.bin"
-
-# fail even on unbound variables
-set -o nounset
-
-
-function build_kresd {
- pushd ..
- if [ -d manager/.build_kresd ]; then
- echo
- echo Building Knot Resolver
- echo ----------------------
- echo -e "${blue}In case of an compilation error, run this command to try to fix it:${reset}"
- echo -e "\t${blue}rm -r $(realpath .install_kresd) $(realpath .build_kresd)${reset}"
- echo
- ninja -C manager/.build_kresd
- ninja install -C manager/.build_kresd
- export PYTHONPATH="$(realpath manager/.build_kresd/python):${PYTHONPATH:-}"
- else
- echo
- echo Knot Resolver daemon is not configured.
- echo "Please run './poe configure' (optionally with additional Meson arguments)"
- echo
- exit 2
- fi
- popd
-}
diff --git a/manager/scripts/docs b/manager/scripts/docs
deleted file mode 100755
index 10c83d8f..00000000
--- a/manager/scripts/docs
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/bin/bash
-
-# ensure consistent behaviour
-src_dir="$(dirname "$(realpath "$0")")"
-source $src_dir/_env.sh
-cd ..
-
-echo Building documentation for Knot Resolver
-meson build_doc -Ddoc=enabled
-ninja -C build_doc doc
diff --git a/manager/scripts/examples b/manager/scripts/examples
deleted file mode 100755
index a5de766e..00000000
--- a/manager/scripts/examples
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/bin/bash
-
-# ensure consistent behaviour
-src_dir="$(dirname "$(realpath "$0")")"
-source $src_dir/_env.sh
-
-# validate all configuration examples
-for example in $PWD/etc/knot-resolver/config.example.*.yaml;
-do
- poe kresctl validate --no-strict $example;
-done
diff --git a/manager/scripts/install.sh b/manager/scripts/install.sh
deleted file mode 100644
index 30b08808..00000000
--- a/manager/scripts/install.sh
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/bin/bash
-
-# ensure consistent behaviour
-scripts_dir="$(dirname "$(realpath "$0")")"
-
-# change dir to 'manager'
-cd $scripts_dir
-cd ..
-
-echo "building the Manager ..."
-python3 setup.py install
diff --git a/manager/scripts/make-package.sh b/manager/scripts/make-package.sh
deleted file mode 100644
index 85549e65..00000000
--- a/manager/scripts/make-package.sh
+++ /dev/null
@@ -1,71 +0,0 @@
-#!/bin/bash
-
-set -o errexit
-set -o nounset
-
-function install_pipx {
- python3 -m pip install --user pipx
- python3 -m pipx ensurepath
- export PATH="$PATH:/root/.local/bin" # hack to make binaries installed with pipx work
-}
-
-function pipx {
- python3 -m pipx ${@}
-}
-
-function init_debian {
- export DEBIAN_FRONTEND=noninteractive
-
- # upgrade system to latest
- apt-get update -qqq
- apt-get upgrade -y -qqq
-
- # configure repository with Knot Resolver dependencies
- apt-get -y -qqq install apt-transport-https lsb-release ca-certificates wget curl gnupg2
- sh -c 'echo "deb http://download.opensuse.org/repositories/home:/CZ-NIC:/knot-resolver-build/Debian_10/ /" > /etc/apt/sources.list.d/home:CZ-NIC:knot-resolver-build.list'
- sh -c 'curl -fsSL https://download.opensuse.org/repositories/home:CZ-NIC:knot-resolver-build/Debian_10/Release.key | gpg --dearmor > /etc/apt/trusted.gpg.d/home_CZ-NIC_knot-resolver-build.gpg'
- apt-get update -qqq
-
- # apkg
- apt-get install -y python3-pip meson git python3-venv
-}
-
-function init_fedora {
- # upgrade system to latest and install pip
- dnf upgrade -y
- dnf install -y python3-pip
-}
-
-
-# system setup
-if command -v dnf; then
- init_fedora
-elif command -v apt-get; then
- init_debian
-else
- echo "System not supported."
- exit 1
-fi
-
-# install apkg
-install_pipx
-pipx install apkg
-
-# prepare the repo
-#git clone https://gitlab.nic.cz/knot/knot-resolver
-cd /repo
-git config --global user.email "automated-script"
-git config --global user.name "Automated Script"
-git checkout manager-integration-without-submodule
-git submodule update --init --recursive
-
-# build the package
-apkg system-setup
-apkg build -b
-apkg srcpkg
-
-
-
-
-
-
diff --git a/manager/scripts/man b/manager/scripts/man
deleted file mode 100755
index ba28e414..00000000
--- a/manager/scripts/man
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/bin/bash
-
-# ensure consistent behaviour
-src_dir="$(dirname "$(realpath "$0")")"
-source $src_dir/_env.sh
-
-build_kresd
-
-man -l .install_kresd/share/man/man8/$1* \ No newline at end of file
diff --git a/manager/scripts/meson-configure b/manager/scripts/meson-configure
deleted file mode 100755
index c99ddea7..00000000
--- a/manager/scripts/meson-configure
+++ /dev/null
@@ -1,11 +0,0 @@
-#!/bin/bash
-
-# ensure consistent behaviour
-src_dir="$(dirname "$(realpath "$0")")"
-source $src_dir/_env.sh
-
-pushd ..
-meson setup manager/.build_kresd --reconfigure --prefix=$(realpath manager/.install_kresd) "$@"
-popd
-
-build_kresd
diff --git a/manager/scripts/run b/manager/scripts/run
deleted file mode 100755
index 2080ce2a..00000000
--- a/manager/scripts/run
+++ /dev/null
@@ -1,44 +0,0 @@
-#!/bin/bash
-
-# ensure consistent behaviour
-src_dir="$(dirname "$(realpath "$0")")"
-source $src_dir/_env.sh
-
-build_kresd
-
-echo
-echo Building Knot Resolver Manager native extensions
-echo ------------------------------------------------
-poetry build
-# copy native modules from build directory to source directory
-shopt -s globstar
-shopt -s nullglob
-for d in build/lib*; do
- for f in "$d/"**/*.so; do
- cp -v "$f" ${f#"$d/"}
- done
-done
-shopt -u globstar
-shopt -u nullglob
-
-echo
-echo Knot Manager API is accessible on http://localhost:5000
-echo -------------------------------------------------------
-
-# create runtime directories
-if [ -z "${KRES_MANAGER_RUNTIME:-}" ]; then
- KRES_MANAGER_RUNTIME="etc/knot-resolver"
-fi
-mkdir -p "$KRES_MANAGER_RUNTIME/runtime" "$KRES_MANAGER_RUNTIME/cache"
-
-if [ -z "${KRES_MANAGER_CONFIG:-}" ]; then
- KRES_MANAGER_CONFIG="$KRES_MANAGER_RUNTIME/config.dev.yaml"
-fi
-
-if [ -z "${KRES_MANAGER_API_SOCK:-}" ]; then
- KRES_MANAGER_API_SOCK="$KRES_MANAGER_RUNTIME/manager.sock"
-fi
-
-export KRES_MANAGER_CONFIG
-export KRES_MANAGER_API_SOCK
-python3 -m knot_resolver_manager $@
diff --git a/manager/setup.py b/manager/setup.py
deleted file mode 100644
index e3abf0f1..00000000
--- a/manager/setup.py
+++ /dev/null
@@ -1,54 +0,0 @@
-# -*- coding: utf-8 -*-
-from setuptools import setup
-
-packages = \
-['knot_resolver_manager',
- 'knot_resolver_manager.cli',
- 'knot_resolver_manager.cli.cmd',
- 'knot_resolver_manager.compat',
- 'knot_resolver_manager.datamodel',
- 'knot_resolver_manager.datamodel.templates',
- 'knot_resolver_manager.datamodel.types',
- 'knot_resolver_manager.kresd_controller',
- 'knot_resolver_manager.kresd_controller.supervisord',
- 'knot_resolver_manager.kresd_controller.supervisord.plugin',
- 'knot_resolver_manager.utils',
- 'knot_resolver_manager.utils.modeling']
-
-package_data = \
-{'': ['*'], 'knot_resolver_manager.datamodel.templates': ['macros/*']}
-
-install_requires = \
-['aiohttp', 'jinja2', 'pyyaml', 'supervisor', 'typing-extensions']
-
-extras_require = \
-{'prometheus': ['prometheus-client']}
-
-entry_points = \
-{'console_scripts': ['knot-resolver = knot_resolver_manager.__main__:run',
- 'kresctl = knot_resolver_manager.cli.main:main']}
-
-setup_kwargs = {
- 'name': 'knot-resolver-manager',
- 'version': '6.0.8',
- 'description': 'A central tool for managing individual parts of Knot Resolver',
- 'long_description': 'None',
- 'author': 'Aleš Mrázek',
- 'author_email': 'ales.mrazek@nic.cz',
- 'maintainer': 'None',
- 'maintainer_email': 'None',
- 'url': 'None',
- 'packages': packages,
- 'package_data': package_data,
- 'install_requires': install_requires,
- 'extras_require': extras_require,
- 'entry_points': entry_points,
- 'python_requires': '>=3.8,<4.0',
-}
-from build_c_extensions import *
-build(setup_kwargs)
-
-setup(**setup_kwargs)
-
-
-# This setup.py was autogenerated using Poetry for backward compatibility with setuptools.
diff --git a/manager/tests/README.md b/manager/tests/README.md
deleted file mode 100644
index 1b6fc185..00000000
--- a/manager/tests/README.md
+++ /dev/null
@@ -1,9 +0,0 @@
-# Testing infrastructure
-
-## Unit tests
-
-The unit tests use `pytest` and can be invoked by the command `poe test`. They reside in the `unit` subdirectory. They can be run from freshly cloned repository and they should suceed.
-
-## Integration tests
-
-The integration tests spawn a full manager with `kresd` instances (which it expects to be installed). The tests are implemented by a custom script and they can be invoked by `poe integration` command. \ No newline at end of file
diff --git a/manager/tests/packaging/control b/manager/tests/packaging/control
deleted file mode 100644
index 75c27093..00000000
--- a/manager/tests/packaging/control
+++ /dev/null
@@ -1,41 +0,0 @@
-{# Test that all packages are installed #}
-Tests: dependencies.py
-Tests-Directory: manager/tests/packaging/
-
-
-{# Test that kresctl command exists and is in $PATH #}
-Tests: kresctl.sh
-Tests-Directory: manager/tests/packaging
-
-
-{# Test that knot-resolver command exists and is in $PATH #}
-Tests: knot-resolver.sh
-Tests-Directory: manager/tests/packaging
-
-
-{# Tests that manager can be started with default config and it resolves some domains #}
-Tests: systemd_service.sh
-Tests-Directory: manager/tests/packaging
-Restrictions: needs-root
-{% if distro.match('fedora') -%}
-Depends: knot-utils, jq, curl, procps
-{% elif distro.match('debian') or distro.match('ubuntu') -%}
-Depends: knot-dnsutils, jq, curl, procps
-{% elif distro.match('arch') -%}
-Depends: knot, jq, curl
-{% elif distro.match('rocky', 'centos') -%}
-Depends: knot-utils, jq, curl
-{% elif distro.match('almalinux') -%}
-Depends: knot-utils, jq, curl-minimal, procps
-{% elif distro.match('opensuse') -%}
-Depends: knot-utils, jq, curl
-{% else -%}
-Depends: unsupported-distro-this-package-does-not-exist-and-the-test-should-fail
-{%- endif %}
-
-
-Tests: manpage.sh
-Tests-Directory: manager/tests/packaging
-{% if distro.match('fedora') or distro.match('rocky') or distro.match('opensuse') -%}
-Depends: man
-{%- endif %}
diff --git a/manager/tests/unit/__init__.py b/manager/tests/unit/__init__.py
deleted file mode 100644
index d3c6280d..00000000
--- a/manager/tests/unit/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-from pathlib import Path
-
-from knot_resolver_manager.datamodel.globals import Context, set_global_validation_context
-
-set_global_validation_context(Context(Path("."), False))
diff --git a/manager/tests/unit/test_knot_resolver_manager.py b/manager/tests/unit/test_knot_resolver_manager.py
deleted file mode 100644
index ed67e354..00000000
--- a/manager/tests/unit/test_knot_resolver_manager.py
+++ /dev/null
@@ -1,5 +0,0 @@
-from knot_resolver_manager import __version__
-
-
-def test_version():
- assert __version__ == "0.1.0"
diff --git a/manager/tests/unit/utils/test_dataclasses.py b/manager/tests/unit/utils/test_dataclasses.py
deleted file mode 100644
index c402c092..00000000
--- a/manager/tests/unit/utils/test_dataclasses.py
+++ /dev/null
@@ -1,15 +0,0 @@
-from knot_resolver_manager.compat.dataclasses import dataclass, is_dataclass
-
-
-def test_dataclass():
- @dataclass
- class A:
- b: int = 5
-
- val = A(6)
- assert val.b == 6
-
- val = A(b=7)
- assert val.b == 7
-
- assert is_dataclass(A)
diff --git a/meson.build b/meson.build
index 8b4bd83b..267145ea 100644
--- a/meson.build
+++ b/meson.build
@@ -22,12 +22,12 @@ knot_version = '>=3.3'
libknot = dependency('libknot', version: knot_version)
libdnssec = dependency('libdnssec', version: knot_version)
libzscanner = dependency('libzscanner', version: knot_version)
-libuv = dependency('libuv', version: '>=1.7')
+libuv = dependency('libuv', version: '>=1.27') # need uv_udp_connect()
lmdb = dependency('lmdb', required: false)
if not lmdb.found() # darwin workaround: missing pkgconfig
lmdb = meson.get_compiler('c').find_library('lmdb')
endif
-gnutls = dependency('gnutls')
+gnutls = dependency('gnutls', version: '>=3.4')
luajit = dependency('luajit')
# https://mesonbuild.com/howtox.html#add-math-library-lm-portably
libm = meson.get_compiler('c').find_library('m', required : false)
@@ -35,6 +35,7 @@ message('------------------------------')
# Variables
+auto_prefixes = ['/', '/usr', '/usr/local']
libkres_soversion = 9
libext = '.so'
@@ -56,7 +57,15 @@ modules_dir = lib_dir / 'kres_modules'
sbin_dir = prefix / get_option('sbindir')
bin_dir = prefix / get_option('bindir')
if host_machine.system() == 'linux'
- run_dir = '/run' / 'knot-resolver'
+ # When installing from sources with a non-standard prefix,
+ # we need to set the correct run directory with the prefix,
+ # otherwise rwx permissions will fail with a validation error
+ # on the run directory
+ if prefix in auto_prefixes
+ run_dir = '/run' / 'knot-resolver'
+ else
+ run_dir = prefix / 'run' / 'knot-resolver'
+ endif
elif host_machine.system() == 'darwin'
run_dir = prefix / get_option('localstatedir') / 'run' / 'knot-resolver'
else
@@ -73,7 +82,6 @@ completion_dir = prefix / 'share'
# When installing from sources into a non-standard prefix and the library is
# shared/dynamic, we need to set the executables' RPATH so that they can find
# `libkresd`, otherwise running them will fail with dynamic linkage errors
-auto_prefixes = ['/', '/usr', '/usr/local']
rpath_opt = get_option('install_rpath')
if (get_option('default_library') == 'static' or
rpath_opt == 'disabled' or
@@ -139,9 +147,15 @@ systemd_files = get_option('systemd_files')
systemd_legacy_units = get_option('systemd_legacy_units')
libsystemd = dependency('libsystemd', required: systemd_files == 'enabled')
+# Uh, lifted this trivial line from tests/meson.build due to dependency sorting:
+build_extra_tests = get_option('extra_tests') == 'enabled'
+
### Allocator
# use empty name to disable the dependency, but still compile the dependent kresd
-malloc_name = get_option('malloc') == 'disabled' ? '' : 'jemalloc'
+malloc_name = ''
+if get_option('malloc') == 'jemalloc' or (get_option('malloc') == 'auto' and not build_extra_tests)
+ malloc_name = 'jemalloc'
+endif
malloc = meson.get_compiler('c').find_library(
malloc_name,
required: get_option('malloc') == 'jemalloc',
@@ -264,7 +278,7 @@ subdir('lib')
## Remaining code
subdir('daemon')
subdir('modules')
-subdir('python')
+subdir('python' / 'knot_resolver')
subdir('utils')
if get_option('bench') == 'enabled'
subdir('bench')
@@ -300,7 +314,7 @@ message('--- lint dependencies ---')
clangtidy = find_program('clang-tidy', required: false)
luacheck = find_program('luacheck', required: false)
flake8 = find_program('flake8', required: false)
-pylint_run = find_program('scripts/run-pylint.sh')
+pylint_run = find_program('scripts/meson/run-pylint.sh')
message('-------------------------')
if clangtidy.found()
diff --git a/modules/dns64/dns64.lua b/modules/dns64/dns64.lua
index b4fb1ecb..4dc8cb45 100644
--- a/modules/dns64/dns64.lua
+++ b/modules/dns64/dns64.lua
@@ -152,7 +152,7 @@ function M.layer.consume(state, req, pkt)
end
end
ffi.C.kr_ranked_rrarray_finalize(req.answ_selected, qry.uid, req.pool)
- req:set_extended_error(kres.extended_error.FORGED, "BHD4: DNS64 synthesis")
+ req:set_extended_error(kres.extended_error.SYNTHESIZED, "BHD4: from DNS64")
end
local function hexchar2int(char)
diff --git a/modules/policy/policy.lua b/modules/policy/policy.lua
index bf796a6d..036e8cf6 100644
--- a/modules/policy/policy.lua
+++ b/modules/policy/policy.lua
@@ -857,10 +857,14 @@ function policy.TAGS_ASSIGN(names)
end
-- Perform a list of actions sequentially; meant for kr_view_insert_action().
+-- Return value of the last one is propagated.
function policy.COMBINE(list)
if #list == 1 then return list[1] end
local r = 'function(state,req) '
- for _, item in ipairs(list) do
+ for i, item in ipairs(list) do
+ if i == #list then
+ r = r .. 'return '
+ end
r = r .. item .. '(state,req); '
end
return r .. 'end'
@@ -934,7 +938,11 @@ policy.layer = {
if ffi.C.kr_view_select_action(req, view_action_buf) == 0 then
local act_str = ffi.string(view_action_buf[0].data, view_action_buf[0].len)
- loadstring('return ' .. act_str)()(state, req)
+ local new_state = loadstring('return '..act_str)()(state, req)
+ -- We still respect the chain-rule notion, i.e. we skip
+ -- lua-configured policy rules iff the action was "final"
+ -- (`refused` and `noanswer` in the current 6.x)
+ if new_state ~= nil then return new_state end
end
if ffi.C.ratelimiting_request_begin(req) then return end
diff --git a/modules/serve_stale/serve_stale.lua b/modules/serve_stale/serve_stale.lua
index faf07fbe..d1b18f90 100644
--- a/modules/serve_stale/serve_stale.lua
+++ b/modules/serve_stale/serve_stale.lua
@@ -27,7 +27,9 @@ M.layer = {
local now = ffi.C.kr_now()
local deadline = qry.creation_time_mono + M.timeout
if now > deadline or qry.flags.NO_NS_FOUND then
- log_debug(ffi.C.LOG_GRP_SRVSTALE, ' => no reachable NS, using stale data')
+ log_qry(qry, ffi.C.LOG_GRP_SRVSTALE,
+ ' => no reachable NS, using stale data "%s"',
+ kres.dname2str(qry:name()))
qry.stale_cb = M.callback
-- TODO: probably start the same request that doesn't stale-serve,
-- but first we need some detection of non-interactive / internal requests.
@@ -36,6 +38,23 @@ M.layer = {
return state
end,
+
+ answer_finalize = function (state, req)
+ local qry = req:resolved()
+ if state ~= kres.DONE or qry == nil then
+ return state
+ end
+
+ if req.stale_accounted and qry.stale_cb ~= nil then
+ if req.answer:rcode() == kres.rcode.NOERROR then
+ req:set_extended_error(kres.extended_error.STALE, 'WFAC')
+ elseif req.answer:rcode() == kres.rcode.NXDOMAIN then
+ req:set_extended_error(kres.extended_error.STALE_NXD, 'QSF6')
+ end
+ end
+
+ return state
+ end,
}
return M
diff --git a/modules/stats/README.rst b/modules/stats/README.rst
index 1def925c..e9258274 100644
--- a/modules/stats/README.rst
+++ b/modules/stats/README.rst
@@ -55,6 +55,8 @@ Built-in counters keep track of number of queries and answers matching specific
+-----------------+----------------------------------+
| answer.cached | queries answered from cache |
+-----------------+----------------------------------+
+| answer.stale | queries that utilized stale data |
++-----------------+----------------------------------+
+-----------------+----------------------------------+
| **Answers categorized by RCODE** |
diff --git a/modules/stats/stats.c b/modules/stats/stats.c
index deed9c94..596847d7 100644
--- a/modules/stats/stats.c
+++ b/modules/stats/stats.c
@@ -37,12 +37,17 @@
#define UPSTREAMS_COUNT 512 /* Size of recent upstreams */
#endif
-/** @cond internal Fixed-size map of predefined metrics. */
+/** @cond internal Fixed-size map of predefined metrics.
+ *
+ * When changing the list, don't forget _parse_resolver_metrics()
+ * in ../../manager/knot_resolver_manager/statistics.py
+ */
#define CONST_METRICS(X) \
X(answer,total) X(answer,noerror) X(answer,nodata) X(answer,nxdomain) X(answer,servfail) \
X(answer,cached) X(answer,1ms) X(answer,10ms) X(answer,50ms) X(answer,100ms) \
X(answer,250ms) X(answer,500ms) X(answer,1000ms) X(answer,1500ms) X(answer,slow) \
X(answer,sum_ms) \
+ X(answer,stale) \
X(answer,aa) X(answer,tc) X(answer,rd) X(answer,ra) X(answer, ad) X(answer,cd) \
X(answer,edns0) X(answer,do) \
X(query,edns) X(query,dnssec) \
@@ -303,6 +308,7 @@ static int collect(kr_layer_t *ctx)
DEPRECATED
use new names metric_answer_edns0 and metric_answer_do
*/
+ stat_const_add(data, metric_answer_stale, param->stale_accounted);
stat_const_add(data, metric_query_edns, knot_pkt_has_edns(param->answer));
stat_const_add(data, metric_query_dnssec, knot_pkt_has_dnssec(param->answer));
diff --git a/modules/ta_update/ta_update.test.integr/rfc5011/dns2rpl.py b/modules/ta_update/ta_update.test.integr/rfc5011/dns2rpl.py
index 317d6719..6002e830 100755
--- a/modules/ta_update/ta_update.test.integr/rfc5011/dns2rpl.py
+++ b/modules/ta_update/ta_update.test.integr/rfc5011/dns2rpl.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python3
+#!/usr/bin/env python3
"""
Generate RFC 5011 test simulating successful KSK roll-over in 2017.
diff --git a/modules/ta_update/ta_update.test.integr/rfc5011/genkeyszones.sh b/modules/ta_update/ta_update.test.integr/rfc5011/genkeyszones.sh
index 4a654695..5ff1d8f1 100755
--- a/modules/ta_update/ta_update.test.integr/rfc5011/genkeyszones.sh
+++ b/modules/ta_update/ta_update.test.integr/rfc5011/genkeyszones.sh
@@ -1,4 +1,4 @@
-#!/usr/bin/bash
+#!/usr/bin/env bash
# First, generate DNSSEC keys with timers set to simulate 2017 KSK roll-over.
# Second, fake system time to pretend that we are at the beginning on time slots
diff --git a/modules/workarounds/workarounds.lua b/modules/workarounds/workarounds.lua
index 4ce7c478..4cbfdb9d 100644
--- a/modules/workarounds/workarounds.lua
+++ b/modules/workarounds/workarounds.lua
@@ -4,7 +4,7 @@ if not policy then modules.load('policy') end
local M = {} -- the module
-function M.config()
+function M.init()
policy.add(policy.suffix(policy.FLAGS('NO_0X20'), {
-- https://github.com/DNS-OARC/dns-violations/blob/master/2017/DVE-2017-0003.md
todname('avqs.mcafee.com'), todname('avts.mcafee.com'),
diff --git a/poe b/poe
new file mode 100755
index 00000000..d1f58894
--- /dev/null
+++ b/poe
@@ -0,0 +1,4 @@
+#!/bin/sh
+
+script_dir="$(dirname "$(readlink -f "$0")")"
+exec poetry --directory "$script_dir" run poe --root "$script_dir" "$@"
diff --git a/manager/pyproject.toml b/pyproject.toml
index 7f1bde1f..e545eaa0 100644
--- a/manager/pyproject.toml
+++ b/pyproject.toml
@@ -1,12 +1,24 @@
[tool.poetry]
-name = "knot-resolver-manager"
+name = "knot-resolver"
version = "6.0.8"
-description = "A central tool for managing individual parts of Knot Resolver"
-
+description = "Knot Resolver Manager - a Python program that automatically manages the other components of the resolver"
+license = "GPL-3.0-or-later"
authors = [
"Aleš Mrázek <ales.mrazek@nic.cz>",
"Václav Šraier <vaclav.sraier@nic.cz>"
]
+maintainers = [
+ "Aleš Mrázek <ales.mrazek@nic.cz>"
+]
+readme = "README.md"
+homepage = "https://www.knot-resolver.cz"
+repository = "https://gitlab.nic.cz/knot/knot-resolver"
+documentation = "https://www.knot-resolver.cz/documentation"
+
+packages = [
+ {include = "knot_resolver", from = "python"}
+]
+exclude = ["**/*.in", "**/meson.build"]
# See currently open issue about building C extensions here:
# https://github.com/python-poetry/poetry/issues/2740
@@ -36,6 +48,7 @@ debugpy = "^1.8.1"
pytest = "^8.0.1"
pytest-cov = "^4.1.0"
pytest-asyncio = "^0.23.5"
+toml = "^0.10.2"
[tool.poetry.group.lint.dependencies]
black = "^24.2.0"
@@ -54,30 +67,24 @@ breathe = "^4.35.0"
json-schema-for-humans = "^0.47"
[tool.poetry.scripts]
-kresctl = 'knot_resolver_manager.cli.main:main'
-knot-resolver = 'knot_resolver_manager.__main__:run'
+kresctl = 'knot_resolver.client.main:main'
+knot-resolver = 'knot_resolver.manager.main:main'
[tool.poe.tasks]
-configure = { cmd = "scripts/meson-configure", help = "Configure Knot Resolver daemon" }
-run = { cmd = "scripts/run", help = "Run the manager" }
-run-debug = { cmd = "scripts/run-debug", help = "Run the manager under debugger" }
-docs = { cmd = "scripts/docs", help = "Create HTML documentation" }
-test = { shell = "env PYTHONPATH=. pytest --junitxml=unit.junit.xml --cov=knot_resolver_manager --show-capture=all tests/unit/", help = "Run tests" }
-check = { cmd = "scripts/codecheck", help = "Run static code analysis" }
-format = { shell = "black knot_resolver_manager/ tests/ scripts/ build_c_extensions.py; isort .", help = "Run code formatter" }
-fixdeps = { shell = "poetry install; npm install; npm update", help = "Install/update dependencies according to configuration files"}
-examples = { cmd = "scripts/examples", help = "Validate all configuration examples" }
-kresctl = { script = "knot_resolver_manager.cli.main:main", cwd="${POE_PWD}", help="run kresctl" }
-kresctl-nocwd = { script = "knot_resolver_manager.cli.main:main", help="run kresctl" } # Python <3.8 and poethepoet <0.22.0 compatibility (see also `./poe`)
-clean = """
- rm -rf .coverage
- .mypy_cache
- .pytest_cache
- ./**/__pycache__
- dist
-"""
-gen-setuppy = { shell = "python scripts/create_setup.py > setup.py", help = "Generate setup.py file for backwards compatibility" }
-man = {cmd = "scripts/man", help = "Display manpage from sources" }
+# tasks runed through scripts located in 'scripts/poe-tasks/'
+configure = { cmd = "scripts/poe-tasks/configure", help = "(Re)configure Meson build directory" }
+run = { cmd = "scripts/poe-tasks/run", help = "Run Knot Resolver" }
+run-debug = { cmd = "scripts/poe-tasks/run-debug", help = "Debug Knot Resolver with debugpy" }
+doc = { cmd = "scripts/poe-tasks/doc", help = "Create Knot Resolver HTML documentation" }
+doc-schema = { cmd = "scripts/poe-tasks/doc-schema", help = "Generate a JSON schema of the Knot Resolver configuration"}
+test = { cmd = "scripts/poe-tasks/test", help = "Run pytest unit tests" }
+check = { cmd = "scripts/poe-tasks/check", help = "Check that all dependencies are installed and run static code analysis" }
+examples = { cmd = "scripts/poe-tasks/examples", help = "Validate all configuration examples using 'kresctl validate' utility" }
+gen-constantspy = { cmd = "scripts/poe-tasks/gen-constantspy", help = "Generate 'constants.py' module using Meson configured options" }
+gen-setuppy = { cmd = "scripts/poe-tasks/gen-setuppy", help = "Generate 'setup.py' file for backwards compatibility" }
+format = { cmd = "scripts/poe-tasks/format", help = "Run code formatter" }
+kresctl = { cmd = "scripts/poe-tasks/kresctl", help="Run kresctl utility" }
+clean = { cmd = "scripts/poe-tasks/clean", help="Cleanup build directories and files" }
[tool.black]
line-length = 120
@@ -93,6 +100,7 @@ include_trailing_comma=true # corresponds to -tc flag
skip_glob = '^((?!py$).)*$' # isort all Python files
float_to_top=true
skip = "setup.py" # Poetry generates it and we want to keep it unchanged
+known_first_party="knot_resolver"
[tool.pylint."MESSAGES CONTROL"]
disable= [
diff --git a/python/knot_resolver.py.in b/python/knot_resolver.py.in
deleted file mode 100644
index 262f7a84..00000000
--- a/python/knot_resolver.py.in
+++ /dev/null
@@ -1,10 +0,0 @@
-from pathlib import Path
-
-__version__ = "@kres_version@"
-
-sbin_dir = Path("@sbin_dir@")
-bin_dir = Path("@bin_dir@")
-etc_dir = Path("@etc_dir@")
-run_dir = Path("@run_dir@")
-lib_dir = Path("@lib_dir@")
-modules_dir = Path("@modules_dir@")
diff --git a/python/knot_resolver/__init__.py b/python/knot_resolver/__init__.py
new file mode 100644
index 00000000..a91e4593
--- /dev/null
+++ b/python/knot_resolver/__init__.py
@@ -0,0 +1,6 @@
+from .constants import VERSION
+from .exceptions import KresBaseException
+
+__version__ = VERSION
+
+__all__ = ["KresBaseException"]
diff --git a/python/knot_resolver/client/__init__.py b/python/knot_resolver/client/__init__.py
new file mode 100644
index 00000000..5b82d3be
--- /dev/null
+++ b/python/knot_resolver/client/__init__.py
@@ -0,0 +1,5 @@
+from pathlib import Path
+
+from knot_resolver.datamodel.globals import Context, set_global_validation_context
+
+set_global_validation_context(Context(Path("."), False))
diff --git a/python/knot_resolver/client/__main__.py b/python/knot_resolver/client/__main__.py
new file mode 100644
index 00000000..56200674
--- /dev/null
+++ b/python/knot_resolver/client/__main__.py
@@ -0,0 +1,4 @@
+from knot_resolver.client.main import main
+
+if __name__ == "__main__":
+ main()
diff --git a/manager/knot_resolver_manager/cli/kresctl.py b/python/knot_resolver/client/client.py
index cbcc12a3..4e7d13ea 100644
--- a/manager/knot_resolver_manager/cli/kresctl.py
+++ b/python/knot_resolver/client/client.py
@@ -1,14 +1,16 @@
import argparse
-from knot_resolver_manager.cli.command import CommandArgs
+from knot_resolver.client.command import CommandArgs
+KRES_CLIENT_NAME = "kresctl"
-class Kresctl:
+
+class KresClient:
def __init__(
self,
namespace: argparse.Namespace,
parser: argparse.ArgumentParser,
- prompt: str = "kresctl",
+ prompt: str = KRES_CLIENT_NAME,
) -> None:
self.path = None
self.prompt = prompt
diff --git a/manager/knot_resolver_manager/cli/command.py b/python/knot_resolver/client/command.py
index 72154a40..960ac1f5 100644
--- a/manager/knot_resolver_manager/cli/command.py
+++ b/python/knot_resolver/client/command.py
@@ -1,16 +1,14 @@
import argparse
-import os
from abc import ABC, abstractmethod # pylint: disable=[no-name-in-module]
from pathlib import Path
from typing import Dict, List, Optional, Tuple, Type, TypeVar
from urllib.parse import quote
-from knot_resolver_manager.constants import API_SOCK_ENV_VAR, CONFIG_FILE_ENV_VAR, DEFAULT_MANAGER_CONFIG_FILE
-from knot_resolver_manager.datamodel.config_schema import DEFAULT_MANAGER_API_SOCK
-from knot_resolver_manager.datamodel.types import IPAddressPort
-from knot_resolver_manager.utils.modeling import parsing
-from knot_resolver_manager.utils.modeling.exceptions import DataValidationError
-from knot_resolver_manager.utils.requests import SocketDesc
+from knot_resolver.constants import API_SOCK_FILE, CONFIG_FILE
+from knot_resolver.datamodel.types import IPAddressPort
+from knot_resolver.utils.modeling import parsing
+from knot_resolver.utils.modeling.exceptions import DataValidationError
+from knot_resolver.utils.requests import SocketDesc
T = TypeVar("T", bound=Type["Command"])
@@ -66,31 +64,22 @@ def get_socket_from_config(config: Path, optional_file: bool) -> Optional[Socket
def determine_socket(namespace: argparse.Namespace) -> SocketDesc:
- # 1) socket from 'kresctl --socket' argument
+ # 1) socket from '--socket' argument
if len(namespace.socket) > 0:
return SocketDesc(namespace.socket[0], "--socket argument")
- config_path = os.getenv(CONFIG_FILE_ENV_VAR)
- socket_env = os.getenv(API_SOCK_ENV_VAR)
-
socket: Optional[SocketDesc] = None
- # 2) socket from config file ('kresctl --config' argument)
+ # 2) socket from config file ('--config' argument)
if len(namespace.config) > 0:
socket = get_socket_from_config(namespace.config[0], False)
- # 3) socket from config file (environment variable)
- elif config_path:
- socket = get_socket_from_config(Path(config_path), False)
- # 4) socket from environment variable
- elif socket_env:
- socket = SocketDesc(socket_env, f'Environment variable "{API_SOCK_ENV_VAR}"')
- # 5) socket from config file (default config file constant)
+ # 3) socket from config file (default config file constant)
else:
- socket = get_socket_from_config(DEFAULT_MANAGER_CONFIG_FILE, True)
+ socket = get_socket_from_config(CONFIG_FILE, True)
if socket:
return socket
- # 6) socket default
- return SocketDesc(DEFAULT_MANAGER_API_SOCK, f'Default value "{DEFAULT_MANAGER_API_SOCK}"')
+ # 4) socket default
+ return SocketDesc(str(API_SOCK_FILE), f'Default value "{API_SOCK_FILE}"')
class CommandArgs:
diff --git a/manager/knot_resolver_manager/cli/cmd/cache.py b/python/knot_resolver/client/commands/cache.py
index e5a15035..60417eec 100644
--- a/manager/knot_resolver_manager/cli/cmd/cache.py
+++ b/python/knot_resolver/client/commands/cache.py
@@ -3,11 +3,11 @@ import sys
from enum import Enum
from typing import Any, Dict, List, Optional, Tuple, Type
-from knot_resolver_manager.cli.command import Command, CommandArgs, CompWords, register_command
-from knot_resolver_manager.datamodel.cache_schema import CacheClearRPCSchema
-from knot_resolver_manager.utils.modeling.exceptions import AggregateDataValidationError, DataValidationError
-from knot_resolver_manager.utils.modeling.parsing import DataFormat, parse_json
-from knot_resolver_manager.utils.requests import request
+from knot_resolver.client.command import Command, CommandArgs, CompWords, register_command
+from knot_resolver.datamodel.cache_schema import CacheClearRPCSchema
+from knot_resolver.utils.modeling.exceptions import AggregateDataValidationError, DataValidationError
+from knot_resolver.utils.modeling.parsing import DataFormat, parse_json
+from knot_resolver.utils.requests import request
class CacheOperations(Enum):
@@ -19,7 +19,9 @@ class CacheCommand(Command):
def __init__(self, namespace: argparse.Namespace) -> None:
super().__init__(namespace)
self.operation: Optional[CacheOperations] = namespace.operation if hasattr(namespace, "operation") else None
- self.out_format: DataFormat = namespace.out_format if hasattr(namespace, "out_format") else DataFormat.YAML
+ self.output_format: DataFormat = (
+ namespace.output_format if hasattr(namespace, "output_format") else DataFormat.YAML
+ )
# CLEAR operation
self.clear_dict: Dict[str, Any] = {}
@@ -36,29 +38,32 @@ class CacheCommand(Command):
def register_args_subparser(
subparser: "argparse._SubParsersAction[argparse.ArgumentParser]",
) -> Tuple[argparse.ArgumentParser, "Type[Command]"]:
- cache_parser = subparser.add_parser("cache", help="Performs operations on the running resolver's cache.")
+ cache_parser = subparser.add_parser("cache", help="Performs operations on the cache of the running resolver.")
config_subparsers = cache_parser.add_subparsers(help="operation type")
- # CLEAR operation
- clear_subparser = config_subparsers.add_parser("clear", help="Purge cache records matching specified criteria.")
+ # 'clear' operation
+ clear_subparser = config_subparsers.add_parser(
+ "clear", help="Purge cache records that match specified criteria."
+ )
clear_subparser.set_defaults(operation=CacheOperations.CLEAR, exact_name=False)
clear_subparser.add_argument(
"--exact-name",
- help="If set, only records with the same name are removed.",
+ help="If set, only records with the same name are purged.",
action="store_true",
dest="exact_name",
)
clear_subparser.add_argument(
"--rr-type",
- help="Optional, you may additionally specify the type to remove, but that is only supported with '--exact-name' flag set.",
+ help="Optional, the resource record type to purge. It is supported only with the '--exact-name' flag set.",
action="store",
type=str,
)
clear_subparser.add_argument(
"--chunk-size",
- help="Optional, the number of records to remove in one round; default: 100."
- " The purpose is not to block the resolver for long. The resolver repeats the command after one millisecond until all matching data are cleared.",
+ help="Optional, the number of records to remove in one round; the default is 100."
+ " The purpose is not to block the resolver for long."
+ " The resolver repeats the cache clearing after one millisecond until all matching data is cleared.",
action="store",
type=int,
default=100,
@@ -67,27 +72,27 @@ class CacheCommand(Command):
"name",
type=str,
nargs="?",
- help="Optional, subtree to purge; if the name isn't provided, whole cache is purged (and any other parameters are disregarded).",
+ help="Optional, subtree name to purge; if omitted, the entire cache is purged (and all other parameters are ignored).",
default=None,
)
- out_format = clear_subparser.add_mutually_exclusive_group()
- out_format_default = DataFormat.YAML
- out_format.add_argument(
+ output_format = clear_subparser.add_mutually_exclusive_group()
+ output_format_default = DataFormat.YAML
+ output_format.add_argument(
"--json",
- help="Set output format in JSON format, default.",
+ help="Set JSON as the output format.",
const=DataFormat.JSON,
action="store_const",
- dest="out_format",
- default=out_format_default,
+ dest="output_format",
+ default=output_format_default,
)
- out_format.add_argument(
+ output_format.add_argument(
"--yaml",
- help="Set configuration data in YAML format.",
+ help="Set YAML as the output format. YAML is the default.",
const=DataFormat.YAML,
action="store_const",
- dest="out_format",
- default=out_format_default,
+ dest="output_format",
+ default=output_format_default,
)
return cache_parser, CacheCommand
@@ -115,4 +120,4 @@ class CacheCommand(Command):
if response.status != 200:
print(response, file=sys.stderr)
sys.exit(1)
- print(self.out_format.dict_dump(body_dict, indent=4))
+ print(self.output_format.dict_dump(body_dict, indent=4))
diff --git a/manager/knot_resolver_manager/cli/cmd/completion.py b/python/knot_resolver/client/commands/completion.py
index 87a91838..05fdded8 100644
--- a/manager/knot_resolver_manager/cli/cmd/completion.py
+++ b/python/knot_resolver/client/commands/completion.py
@@ -2,7 +2,7 @@ import argparse
from enum import Enum
from typing import List, Tuple, Type
-from knot_resolver_manager.cli.command import Command, CommandArgs, CompWords, register_command
+from knot_resolver.client.command import Command, CommandArgs, CompWords, register_command
class Shells(Enum):
diff --git a/manager/knot_resolver_manager/cli/cmd/config.py b/python/knot_resolver/client/commands/config.py
index f0be2cbc..c3c976e4 100644
--- a/manager/knot_resolver_manager/cli/cmd/config.py
+++ b/python/knot_resolver/client/commands/config.py
@@ -1,13 +1,11 @@
import argparse
import sys
from enum import Enum
-from typing import List, Optional, Tuple, Type
+from typing import List, Literal, Optional, Tuple, Type
-from typing_extensions import Literal
-
-from knot_resolver_manager.cli.command import Command, CommandArgs, CompWords, register_command
-from knot_resolver_manager.utils.modeling.parsing import DataFormat, parse_json, try_to_parse
-from knot_resolver_manager.utils.requests import request
+from knot_resolver.client.command import Command, CommandArgs, CompWords, register_command
+from knot_resolver.utils.modeling.parsing import DataFormat, parse_json, try_to_parse
+from knot_resolver.utils.requests import request
class Operations(Enum):
diff --git a/manager/knot_resolver_manager/cli/cmd/convert.py b/python/knot_resolver/client/commands/convert.py
index 7bb2858f..412ed334 100644
--- a/manager/knot_resolver_manager/cli/cmd/convert.py
+++ b/python/knot_resolver/client/commands/convert.py
@@ -3,15 +3,11 @@ import sys
from pathlib import Path
from typing import List, Optional, Tuple, Type
-from knot_resolver_manager.cli.command import Command, CommandArgs, CompWords, register_command
-from knot_resolver_manager.datamodel import KresConfig
-from knot_resolver_manager.datamodel.globals import (
- Context,
- reset_global_validation_context,
- set_global_validation_context,
-)
-from knot_resolver_manager.utils.modeling import try_to_parse
-from knot_resolver_manager.utils.modeling.exceptions import DataParsingError, DataValidationError
+from knot_resolver.client.command import Command, CommandArgs, CompWords, register_command
+from knot_resolver.datamodel import KresConfig
+from knot_resolver.datamodel.globals import Context, reset_global_validation_context, set_global_validation_context
+from knot_resolver.utils.modeling import try_to_parse
+from knot_resolver.utils.modeling.exceptions import DataParsingError, DataValidationError
@register_command
diff --git a/manager/knot_resolver_manager/cli/cmd/help.py b/python/knot_resolver/client/commands/help.py
index d374005e..87306c2a 100644
--- a/manager/knot_resolver_manager/cli/cmd/help.py
+++ b/python/knot_resolver/client/commands/help.py
@@ -1,7 +1,7 @@
import argparse
from typing import List, Tuple, Type
-from knot_resolver_manager.cli.command import Command, CommandArgs, CompWords, register_command
+from knot_resolver.client.command import Command, CommandArgs, CompWords, register_command
@register_command
diff --git a/manager/knot_resolver_manager/cli/cmd/metrics.py b/python/knot_resolver/client/commands/metrics.py
index 7f466ec3..058cad8b 100644
--- a/manager/knot_resolver_manager/cli/cmd/metrics.py
+++ b/python/knot_resolver/client/commands/metrics.py
@@ -2,9 +2,9 @@ import argparse
import sys
from typing import List, Optional, Tuple, Type
-from knot_resolver_manager.cli.command import Command, CommandArgs, CompWords, register_command
-from knot_resolver_manager.utils.modeling.parsing import DataFormat, parse_json
-from knot_resolver_manager.utils.requests import request
+from knot_resolver.client.command import Command, CommandArgs, CompWords, register_command
+from knot_resolver.utils.modeling.parsing import DataFormat, parse_json
+from knot_resolver.utils.requests import request
@register_command
diff --git a/manager/knot_resolver_manager/cli/cmd/reload.py b/python/knot_resolver/client/commands/reload.py
index 89782f4e..c1350fc5 100644
--- a/manager/knot_resolver_manager/cli/cmd/reload.py
+++ b/python/knot_resolver/client/commands/reload.py
@@ -2,8 +2,8 @@ import argparse
import sys
from typing import List, Tuple, Type
-from knot_resolver_manager.cli.command import Command, CommandArgs, CompWords, register_command
-from knot_resolver_manager.utils.requests import request
+from knot_resolver.client.command import Command, CommandArgs, CompWords, register_command
+from knot_resolver.utils.requests import request
@register_command
diff --git a/manager/knot_resolver_manager/cli/cmd/schema.py b/python/knot_resolver/client/commands/schema.py
index 25369946..0c63f398 100644
--- a/manager/knot_resolver_manager/cli/cmd/schema.py
+++ b/python/knot_resolver/client/commands/schema.py
@@ -3,9 +3,9 @@ import json
import sys
from typing import List, Optional, Tuple, Type
-from knot_resolver_manager.cli.command import Command, CommandArgs, CompWords, register_command
-from knot_resolver_manager.datamodel.config_schema import KresConfig
-from knot_resolver_manager.utils.requests import request
+from knot_resolver.client.command import Command, CommandArgs, CompWords, register_command
+from knot_resolver.datamodel import kres_config_json_schema
+from knot_resolver.utils.requests import request
@register_command
@@ -46,7 +46,7 @@ class SchemaCommand(Command):
sys.exit(1)
schema = response.body
else:
- schema = json.dumps(KresConfig.json_schema(), indent=4)
+ schema = json.dumps(kres_config_json_schema(), indent=4)
if self.file:
with open(self.file, "w") as f:
diff --git a/manager/knot_resolver_manager/cli/cmd/stop.py b/python/knot_resolver/client/commands/stop.py
index a3f46354..35baf36c 100644
--- a/manager/knot_resolver_manager/cli/cmd/stop.py
+++ b/python/knot_resolver/client/commands/stop.py
@@ -2,8 +2,8 @@ import argparse
import sys
from typing import List, Tuple, Type
-from knot_resolver_manager.cli.command import Command, CommandArgs, CompWords, register_command
-from knot_resolver_manager.utils.requests import request
+from knot_resolver.client.command import Command, CommandArgs, CompWords, register_command
+from knot_resolver.utils.requests import request
@register_command
diff --git a/manager/knot_resolver_manager/cli/cmd/validate.py b/python/knot_resolver/client/commands/validate.py
index aacd1989..f7477748 100644
--- a/manager/knot_resolver_manager/cli/cmd/validate.py
+++ b/python/knot_resolver/client/commands/validate.py
@@ -3,15 +3,11 @@ import sys
from pathlib import Path
from typing import List, Tuple, Type
-from knot_resolver_manager.cli.command import Command, CommandArgs, CompWords, register_command
-from knot_resolver_manager.datamodel import KresConfig
-from knot_resolver_manager.datamodel.globals import (
- Context,
- reset_global_validation_context,
- set_global_validation_context,
-)
-from knot_resolver_manager.utils.modeling import try_to_parse
-from knot_resolver_manager.utils.modeling.exceptions import DataParsingError, DataValidationError
+from knot_resolver.client.command import Command, CommandArgs, CompWords, register_command
+from knot_resolver.datamodel import KresConfig
+from knot_resolver.datamodel.globals import Context, reset_global_validation_context, set_global_validation_context
+from knot_resolver.utils.modeling import try_to_parse
+from knot_resolver.utils.modeling.exceptions import DataParsingError, DataValidationError
@register_command
diff --git a/python/knot_resolver/client/main.py b/python/knot_resolver/client/main.py
new file mode 100644
index 00000000..75cd6a77
--- /dev/null
+++ b/python/knot_resolver/client/main.py
@@ -0,0 +1,78 @@
+import argparse
+import importlib
+import os
+
+from knot_resolver.constants import VERSION
+
+from .client import KRES_CLIENT_NAME, KresClient
+from .command import install_commands_parsers
+
+
+def auto_import_commands() -> None:
+ prefix = f"{'.'.join(__name__.split('.')[:-1])}.commands."
+ for module_name in os.listdir(os.path.dirname(__file__) + "/commands"):
+ if module_name[-3:] != ".py":
+ continue
+ importlib.import_module(f"{prefix}{module_name[:-3]}")
+
+
+def create_main_argument_parser() -> argparse.ArgumentParser:
+ parser = argparse.ArgumentParser(
+ KRES_CLIENT_NAME,
+ description="Knot Resolver command-line utility that serves as a client for communicating with the Knot Resolver management API."
+ " The utility also provides tools to work with the resolver's declarative configuration (validate, convert, ...).",
+ )
+ parser.add_argument(
+ "-V",
+ "--version",
+ action="version",
+ version=VERSION,
+ help="Get version",
+ )
+ # parser.add_argument(
+ # "-i",
+ # "--interactive",
+ # action="store_true",
+ # help="Use the utility in interactive mode.",
+ # default=False,
+ # required=False,
+ # )
+ config_or_socket = parser.add_mutually_exclusive_group()
+ config_or_socket.add_argument(
+ "-s",
+ "--socket",
+ action="store",
+ type=str,
+ help="Optional, path to the resolver's management API, unix-domain socket, or network interface."
+ " Cannot be used together with '--config'.",
+ default=[],
+ nargs=1,
+ required=False,
+ )
+ config_or_socket.add_argument(
+ "-c",
+ "--config",
+ action="store",
+ type=str,
+ help="Optional, path to the resolver's declarative configuration to retrieve the management API configuration."
+ " Cannot be used together with '--socket'.",
+ default=[],
+ nargs=1,
+ required=False,
+ )
+ return parser
+
+
+def main() -> None:
+ auto_import_commands()
+ parser = create_main_argument_parser()
+ install_commands_parsers(parser)
+
+ namespace = parser.parse_args()
+ client = KresClient(namespace, parser)
+ client.execute()
+
+ # if namespace.interactive or len(vars(namespace)) == 2:
+ # client.interactive()
+ # else:
+ # client.execute()
diff --git a/python/knot_resolver/constants.py b/python/knot_resolver/constants.py
new file mode 100644
index 00000000..2acb8660
--- /dev/null
+++ b/python/knot_resolver/constants.py
@@ -0,0 +1,19 @@
+from pathlib import Path
+
+VERSION = "6.0.8"
+USER = "knot-resolver"
+GROUP = "knot-resolver"
+
+# dirs paths
+RUN_DIR = Path("/run/knot-resolver")
+ETC_DIR = Path("/etc/knot-resolver")
+SBIN_DIR = Path("/usr/sbin")
+CACHE_DIR = Path("/var/cache/knot-resolver")
+
+# files paths
+CONFIG_FILE = ETC_DIR / "config.yaml"
+API_SOCK_FILE = RUN_DIR / "kres-api.sock"
+
+# executables paths
+KRESD_EXECUTABLE = SBIN_DIR / "kresd"
+KRES_CACHE_GC_EXECUTABLE = SBIN_DIR / "kres-cache-gc"
diff --git a/python/knot_resolver/constants.py.in b/python/knot_resolver/constants.py.in
new file mode 100644
index 00000000..0f1c3a88
--- /dev/null
+++ b/python/knot_resolver/constants.py.in
@@ -0,0 +1,19 @@
+from pathlib import Path
+
+VERSION = "@version@"
+USER = "@user@"
+GROUP = "@group@"
+
+# dirs paths
+RUN_DIR = Path("@run_dir@")
+ETC_DIR = Path("@etc_dir@")
+SBIN_DIR = Path("@sbin_dir@")
+CACHE_DIR = Path("@cache_dir@")
+
+# files paths
+CONFIG_FILE = ETC_DIR / "config.yaml"
+API_SOCK_FILE = RUN_DIR / "kres-api.sock"
+
+# executables paths
+KRESD_EXECUTABLE = SBIN_DIR / "kresd"
+KRES_CACHE_GC_EXECUTABLE = SBIN_DIR / "kres-cache-gc"
diff --git a/manager/knot_resolver_manager/kresd_controller/__init__.py b/python/knot_resolver/controller/__init__.py
index a21bc44c..5dc2fc55 100644
--- a/manager/knot_resolver_manager/kresd_controller/__init__.py
+++ b/python/knot_resolver/controller/__init__.py
@@ -12,8 +12,8 @@ import asyncio
import logging
from typing import List, Optional
-from knot_resolver_manager.datamodel.config_schema import KresConfig
-from knot_resolver_manager.kresd_controller.interface import SubprocessController
+from knot_resolver.controller.interface import SubprocessController
+from knot_resolver.datamodel.config_schema import KresConfig
logger = logging.getLogger(__name__)
@@ -29,7 +29,7 @@ def try_supervisord():
Attempt to load supervisord controllers.
"""
try:
- from knot_resolver_manager.kresd_controller.supervisord import SupervisordSubprocessController
+ from knot_resolver.controller.supervisord import SupervisordSubprocessController
_registered_controllers.append(SupervisordSubprocessController())
except ImportError:
diff --git a/python/knot_resolver/controller/exceptions.py b/python/knot_resolver/controller/exceptions.py
new file mode 100644
index 00000000..149c2989
--- /dev/null
+++ b/python/knot_resolver/controller/exceptions.py
@@ -0,0 +1,19 @@
+from typing import List
+
+from knot_resolver import KresBaseException
+
+
+class SubprocessControllerException(KresBaseException):
+ pass
+
+
+class SubprocessControllerExecException(Exception):
+ """
+ Exception that is used to deliberately terminate system startup
+ and make exec() of something else. This is used by the subprocess controller
+ as supervisord to run as the top-level process in a process tree hierarchy.
+ """
+
+ def __init__(self, exec_args: List[str], *args: object) -> None:
+ self.exec_args = exec_args
+ super().__init__(*args)
diff --git a/manager/knot_resolver_manager/kresd_controller/interface.py b/python/knot_resolver/controller/interface.py
index 63caea49..906592cb 100644
--- a/manager/knot_resolver_manager/kresd_controller/interface.py
+++ b/python/knot_resolver/controller/interface.py
@@ -10,11 +10,11 @@ from pathlib import Path
from typing import Dict, Iterable, Optional, Type, TypeVar
from weakref import WeakValueDictionary
-from knot_resolver_manager.constants import kresd_config_file, policy_loader_config_file
-from knot_resolver_manager.datamodel.config_schema import KresConfig
-from knot_resolver_manager.exceptions import SubprocessControllerException
-from knot_resolver_manager.kresd_controller.registered_workers import register_worker, unregister_worker
-from knot_resolver_manager.utils.async_utils import writefile
+from knot_resolver.controller.exceptions import SubprocessControllerException
+from knot_resolver.controller.registered_workers import register_worker, unregister_worker
+from knot_resolver.datamodel.config_schema import KresConfig
+from knot_resolver.manager.constants import kresd_config_file, policy_loader_config_file
+from knot_resolver.utils.async_utils import writefile
logger = logging.getLogger(__name__)
diff --git a/manager/knot_resolver_manager/kresd_controller/registered_workers.py b/python/knot_resolver/controller/registered_workers.py
index b6ea834e..eed1aded 100644
--- a/manager/knot_resolver_manager/kresd_controller/registered_workers.py
+++ b/python/knot_resolver/controller/registered_workers.py
@@ -2,10 +2,10 @@ import asyncio
import logging
from typing import TYPE_CHECKING, Dict, List, Tuple
-from knot_resolver_manager.exceptions import SubprocessControllerException
+from .exceptions import SubprocessControllerException
if TYPE_CHECKING:
- from knot_resolver_manager.kresd_controller.interface import KresID, Subprocess
+ from knot_resolver.controller.interface import KresID, Subprocess
logger = logging.getLogger(__name__)
diff --git a/manager/knot_resolver_manager/kresd_controller/supervisord/__init__.py b/python/knot_resolver/controller/supervisord/__init__.py
index 5fb4d81d..cbc181d2 100644
--- a/manager/knot_resolver_manager/kresd_controller/supervisord/__init__.py
+++ b/python/knot_resolver/controller/supervisord/__init__.py
@@ -6,20 +6,20 @@ from xmlrpc.client import Fault, ServerProxy
import supervisor.xmlrpc # type: ignore[import]
-from knot_resolver_manager.compat.asyncio import async_in_a_thread
-from knot_resolver_manager.constants import supervisord_config_file, supervisord_pid_file, supervisord_sock_file
-from knot_resolver_manager.datamodel.config_schema import KresConfig
-from knot_resolver_manager.exceptions import CancelStartupExecInsteadException, SubprocessControllerException
-from knot_resolver_manager.kresd_controller.interface import (
+from knot_resolver.controller.exceptions import SubprocessControllerException, SubprocessControllerExecException
+from knot_resolver.controller.interface import (
KresID,
Subprocess,
SubprocessController,
SubprocessStatus,
SubprocessType,
)
-from knot_resolver_manager.kresd_controller.supervisord.config_file import SupervisordKresID, write_config_file
-from knot_resolver_manager.utils import which
-from knot_resolver_manager.utils.async_utils import call, readfile
+from knot_resolver.controller.supervisord.config_file import SupervisordKresID, write_config_file
+from knot_resolver.datamodel.config_schema import KresConfig
+from knot_resolver.manager.constants import supervisord_config_file, supervisord_pid_file, supervisord_sock_file
+from knot_resolver.utils import which
+from knot_resolver.utils.async_utils import call, readfile
+from knot_resolver.utils.compat.asyncio import async_in_a_thread
logger = logging.getLogger(__name__)
@@ -37,7 +37,7 @@ async def _exec_supervisord(config: KresConfig) -> NoReturn:
logger.debug("Writing supervisord config")
await write_config_file(config)
logger.debug("Execing supervisord")
- raise CancelStartupExecInsteadException(
+ raise SubprocessControllerExecException(
[
str(which.which("supervisord")),
"supervisord",
diff --git a/manager/knot_resolver_manager/kresd_controller/supervisord/config_file.py b/python/knot_resolver/controller/supervisord/config_file.py
index 15a81ba7..45a1a83e 100644
--- a/manager/knot_resolver_manager/kresd_controller/supervisord/config_file.py
+++ b/python/knot_resolver/controller/supervisord/config_file.py
@@ -1,15 +1,18 @@
import logging
import os
+from dataclasses import dataclass
+from pathlib import Path
+from typing import Literal
from jinja2 import Template
-from typing_extensions import Literal
-from knot_resolver_manager.compat.dataclasses import dataclass
-from knot_resolver_manager.constants import (
- kres_gc_executable,
- kresd_cache_dir,
+from knot_resolver.constants import KRES_CACHE_GC_EXECUTABLE, KRESD_EXECUTABLE
+from knot_resolver.controller.interface import KresID, SubprocessType
+from knot_resolver.datamodel.config_schema import KresConfig
+from knot_resolver.datamodel.logging_schema import LogTargetEnum
+from knot_resolver.manager.constants import (
+ kres_cache_dir,
kresd_config_file_supervisord_pattern,
- kresd_executable,
policy_loader_config_file,
supervisord_config_file,
supervisord_config_file_tmp,
@@ -18,10 +21,7 @@ from knot_resolver_manager.constants import (
supervisord_subprocess_log_dir,
user_constants,
)
-from knot_resolver_manager.datamodel.config_schema import KresConfig
-from knot_resolver_manager.datamodel.logging_schema import LogTargetEnum
-from knot_resolver_manager.kresd_controller.interface import KresID, SubprocessType
-from knot_resolver_manager.utils.async_utils import read_resource, writefile
+from knot_resolver.utils.async_utils import read_resource, writefile
logger = logging.getLogger(__name__)
@@ -83,7 +83,7 @@ class ProcessTypeConfig:
Data structure holding data for supervisord config template
"""
- logfile: str
+ logfile: Path
workdir: str
command: str
environment: str
@@ -95,7 +95,7 @@ class ProcessTypeConfig:
return ProcessTypeConfig( # type: ignore[call-arg]
logfile=supervisord_subprocess_log_dir(config) / "gc.log",
workdir=cwd,
- command=f"{kres_gc_executable()} -c {kresd_cache_dir(config)}{kres_cache_gc_args(config)}",
+ command=f"{KRES_CACHE_GC_EXECUTABLE} -c {kres_cache_dir(config)}{kres_cache_gc_args(config)}",
environment="",
)
@@ -105,7 +105,7 @@ class ProcessTypeConfig:
return ProcessTypeConfig( # type: ignore[call-arg]
logfile=supervisord_subprocess_log_dir(config) / "policy-loader.log",
workdir=cwd,
- command=f"{kresd_executable()} -c {(policy_loader_config_file(config))} -c - -n",
+ command=f"{KRESD_EXECUTABLE} -c {(policy_loader_config_file(config))} -c - -n",
environment="X-SUPERVISORD-TYPE=notify",
)
@@ -115,7 +115,7 @@ class ProcessTypeConfig:
return ProcessTypeConfig( # type: ignore[call-arg]
logfile=supervisord_subprocess_log_dir(config) / "kresd%(process_num)d.log",
workdir=cwd,
- command=f"{kresd_executable()} -c {kresd_config_file_supervisord_pattern(config)} -n",
+ command=f"{KRESD_EXECUTABLE} -c {kresd_config_file_supervisord_pattern(config)} -n",
environment='SYSTEMD_INSTANCE="%(process_num)d",X-SUPERVISORD-TYPE=notify',
max_procs=int(config.max_workers) + 1, # +1 for the canary process
)
@@ -130,7 +130,7 @@ class ProcessTypeConfig:
if os.environ.get("KRES_DEBUG_MANAGER"):
logger.warning("Injecting debugger into the supervisord config")
# the args array looks like this:
- # [PYTHON_PATH, "-m", "knot_resolver_manager", ...]
+ # [PYTHON_PATH, "-m", "knot_resolver", ...]
args = args[:1] + ["-m", "debugpy", "--listen", "0.0.0.0:5678", "--wait-for-client"] + args[2:]
cmd = '"' + '" "'.join(args) + '"'
@@ -139,16 +139,16 @@ class ProcessTypeConfig:
workdir=user_constants().working_directory_on_startup,
command=cmd,
environment="X-SUPERVISORD-TYPE=notify",
- logfile="", # this will be ignored
+ logfile=Path(""), # this will be ignored
)
@dataclass
class SupervisordConfig:
- unix_http_server: str
- pid_file: str
+ unix_http_server: Path
+ pid_file: Path
workdir: str
- logfile: str
+ logfile: Path
loglevel: Literal["critical", "error", "warn", "info", "debug", "trace", "blather"]
target: LogTargetEnum
@@ -166,14 +166,13 @@ class SupervisordConfig:
"info": "info",
"debug": "debug",
}[config.logging.level]
-
cwd = str(os.getcwd())
return SupervisordConfig( # type: ignore[call-arg]
unix_http_server=supervisord_sock_file(config),
pid_file=supervisord_pid_file(config),
workdir=cwd,
- logfile="syslog" if config.logging.target == "syslog" else "/dev/null",
- loglevel=loglevel,
+ logfile=Path("syslog" if config.logging.target == "syslog" else "/dev/null"),
+ loglevel=loglevel, # type: ignore[arg-type]
target=config.logging.target,
)
diff --git a/manager/knot_resolver_manager/kresd_controller/supervisord/plugin/fast_rpcinterface.py b/python/knot_resolver/controller/supervisord/plugin/fast_rpcinterface.py
index c3834784..c3834784 100644
--- a/manager/knot_resolver_manager/kresd_controller/supervisord/plugin/fast_rpcinterface.py
+++ b/python/knot_resolver/controller/supervisord/plugin/fast_rpcinterface.py
diff --git a/manager/knot_resolver_manager/kresd_controller/supervisord/plugin/manager_integration.py b/python/knot_resolver/controller/supervisord/plugin/manager_integration.py
index 81115617..2fc8cf94 100644
--- a/manager/knot_resolver_manager/kresd_controller/supervisord/plugin/manager_integration.py
+++ b/python/knot_resolver/controller/supervisord/plugin/manager_integration.py
@@ -12,7 +12,7 @@ from supervisor.process import Subprocess
from supervisor.states import SupervisorStates
from supervisor.supervisord import Supervisor
-from knot_resolver_manager.utils.systemd_notify import systemd_notify
+from knot_resolver.utils.systemd_notify import systemd_notify
superd: Optional[Supervisor] = None
diff --git a/manager/knot_resolver_manager/kresd_controller/supervisord/plugin/notifymodule.c b/python/knot_resolver/controller/supervisord/plugin/notifymodule.c
index d56ee7d2..d56ee7d2 100644
--- a/manager/knot_resolver_manager/kresd_controller/supervisord/plugin/notifymodule.c
+++ b/python/knot_resolver/controller/supervisord/plugin/notifymodule.c
diff --git a/manager/knot_resolver_manager/kresd_controller/supervisord/plugin/patch_logger.py b/python/knot_resolver/controller/supervisord/plugin/patch_logger.py
index 411f232e..b5f96617 100644
--- a/manager/knot_resolver_manager/kresd_controller/supervisord/plugin/patch_logger.py
+++ b/python/knot_resolver/controller/supervisord/plugin/patch_logger.py
@@ -4,12 +4,11 @@
import os
import sys
import traceback
-from typing import Any
+from typing import Any, Literal
from supervisor.dispatchers import POutputDispatcher
from supervisor.loggers import LevelsByName, StreamHandler, SyslogHandler
from supervisor.supervisord import Supervisor
-from typing_extensions import Literal
FORWARD_LOG_LEVEL = LevelsByName.CRIT # to make sure it's always printed
diff --git a/manager/knot_resolver_manager/kresd_controller/supervisord/plugin/sd_notify.py b/python/knot_resolver/controller/supervisord/plugin/sd_notify.py
index cffe6fd3..ff32828b 100644
--- a/manager/knot_resolver_manager/kresd_controller/supervisord/plugin/sd_notify.py
+++ b/python/knot_resolver/controller/supervisord/plugin/sd_notify.py
@@ -13,7 +13,7 @@ from supervisor.process import Subprocess
from supervisor.states import ProcessStates
from supervisor.supervisord import Supervisor
-from knot_resolver_manager.kresd_controller.supervisord.plugin import notify
+from knot_resolver.controller.supervisord.plugin import notify
starting_processes: List[Subprocess] = []
diff --git a/manager/knot_resolver_manager/kresd_controller/supervisord/supervisord.conf.j2 b/python/knot_resolver/controller/supervisord/supervisord.conf.j2
index b1fed1aa..4179d522 100644
--- a/manager/knot_resolver_manager/kresd_controller/supervisord/supervisord.conf.j2
+++ b/python/knot_resolver/controller/supervisord/supervisord.conf.j2
@@ -20,21 +20,21 @@ serverurl = unix://{{ config.unix_http_server }}
{# Extensions to changing the supervisord behavior #}
[rpcinterface:patch_logger]
-supervisor.rpcinterface_factory = knot_resolver_manager.kresd_controller.supervisord.plugin.patch_logger:inject
+supervisor.rpcinterface_factory = knot_resolver.controller.supervisord.plugin.patch_logger:inject
target = {{ config.target }}
[rpcinterface:manager_integration]
-supervisor.rpcinterface_factory = knot_resolver_manager.kresd_controller.supervisord.plugin.manager_integration:inject
+supervisor.rpcinterface_factory = knot_resolver.controller.supervisord.plugin.manager_integration:inject
[rpcinterface:sd_notify]
-supervisor.rpcinterface_factory = knot_resolver_manager.kresd_controller.supervisord.plugin.sd_notify:inject
+supervisor.rpcinterface_factory = knot_resolver.controller.supervisord.plugin.sd_notify:inject
{# Extensions for actual API control #}
[rpcinterface:supervisor]
supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
[rpcinterface:fast]
-supervisor.rpcinterface_factory = knot_resolver_manager.kresd_controller.supervisord.plugin.fast_rpcinterface:make_main_rpcinterface
+supervisor.rpcinterface_factory = knot_resolver.controller.supervisord.plugin.fast_rpcinterface:make_main_rpcinterface
[program:manager]
redirect_stderr=false
diff --git a/python/knot_resolver/datamodel/__init__.py b/python/knot_resolver/datamodel/__init__.py
new file mode 100644
index 00000000..81fd1ee9
--- /dev/null
+++ b/python/knot_resolver/datamodel/__init__.py
@@ -0,0 +1,3 @@
+from .config_schema import KresConfig, kres_config_json_schema
+
+__all__ = ["KresConfig", "kres_config_json_schema"]
diff --git a/manager/knot_resolver_manager/datamodel/cache_schema.py b/python/knot_resolver/datamodel/cache_schema.py
index ac30f0d0..d40ee2a0 100644
--- a/manager/knot_resolver_manager/datamodel/cache_schema.py
+++ b/python/knot_resolver/datamodel/cache_schema.py
@@ -1,22 +1,21 @@
-from typing import List, Optional, Union
+from typing import List, Literal, Optional, Union
-from typing_extensions import Literal
-
-from knot_resolver_manager.datamodel.templates import template_from_str
-from knot_resolver_manager.datamodel.types import (
- Dir,
+from knot_resolver.constants import CACHE_DIR
+from knot_resolver.datamodel.templates import template_from_str
+from knot_resolver.datamodel.types import (
DNSRecordTypeEnum,
DomainName,
EscapedStr,
- File,
IntNonNegative,
IntPositive,
Percent,
+ ReadableFile,
SizeUnit,
TimeUnit,
+ WritableDir,
)
-from knot_resolver_manager.utils.modeling import ConfigSchema
-from knot_resolver_manager.utils.modeling.base_schema import lazy_default
+from knot_resolver.utils.modeling import ConfigSchema
+from knot_resolver.utils.modeling.base_schema import lazy_default
_CACHE_CLEAR_TEMPLATE = template_from_str(
"{% from 'macros/cache_macros.lua.j2' import cache_clear %} {{ cache_clear(params) }}"
@@ -51,7 +50,7 @@ class PrefillSchema(ConfigSchema):
origin: DomainName
url: EscapedStr
refresh_interval: TimeUnit = TimeUnit("1d")
- ca_file: Optional[File] = None
+ ca_file: Optional[ReadableFile] = None
def _validate(self) -> None:
if str(self.origin) != ".":
@@ -125,7 +124,7 @@ class CacheSchema(ConfigSchema):
prefetch: These options help keep the cache hot by prefetching expiring records or learning usage patterns and repetitive queries.
"""
- storage: Dir = lazy_default(Dir, "/var/cache/knot-resolver")
+ storage: WritableDir = lazy_default(WritableDir, str(CACHE_DIR))
size_max: SizeUnit = SizeUnit("100M")
garbage_collector: Union[GarbageCollectorSchema, Literal[False]] = GarbageCollectorSchema()
ttl_min: TimeUnit = TimeUnit("5s")
@@ -135,5 +134,5 @@ class CacheSchema(ConfigSchema):
prefetch: PrefetchSchema = PrefetchSchema()
def _validate(self):
- if self.ttl_min.seconds() >= self.ttl_max.seconds():
- raise ValueError("'ttl-max' must be larger then 'ttl-min'")
+ if self.ttl_min.seconds() > self.ttl_max.seconds():
+ raise ValueError("'ttl-max' can't be smaller than 'ttl-min'")
diff --git a/manager/knot_resolver_manager/datamodel/config_schema.py b/python/knot_resolver/datamodel/config_schema.py
index d80f664a..7942eb73 100644
--- a/manager/knot_resolver_manager/datamodel/config_schema.py
+++ b/python/knot_resolver/datamodel/config_schema.py
@@ -1,34 +1,31 @@
import logging
import os
import socket
-from typing import Any, Dict, List, Optional, Tuple, Union
-
-from typing_extensions import Literal
-
-from knot_resolver_manager.constants import MAX_WORKERS
-from knot_resolver_manager.datamodel.cache_schema import CacheSchema
-from knot_resolver_manager.datamodel.dns64_schema import Dns64Schema
-from knot_resolver_manager.datamodel.dnssec_schema import DnssecSchema
-from knot_resolver_manager.datamodel.forward_schema import ForwardSchema
-from knot_resolver_manager.datamodel.local_data_schema import LocalDataSchema, RPZSchema, RuleSchema
-from knot_resolver_manager.datamodel.logging_schema import LoggingSchema
-from knot_resolver_manager.datamodel.lua_schema import LuaSchema
-from knot_resolver_manager.datamodel.management_schema import ManagementSchema
-from knot_resolver_manager.datamodel.monitoring_schema import MonitoringSchema
-from knot_resolver_manager.datamodel.network_schema import NetworkSchema
-from knot_resolver_manager.datamodel.options_schema import OptionsSchema
-from knot_resolver_manager.datamodel.templates import POLICY_CONFIG_TEMPLATE, WORKER_CONFIG_TEMPLATE
-from knot_resolver_manager.datamodel.types import Dir, EscapedStr, IntPositive
-from knot_resolver_manager.datamodel.view_schema import ViewSchema
-from knot_resolver_manager.datamodel.webmgmt_schema import WebmgmtSchema
+from typing import Any, Dict, List, Literal, Optional, Tuple, Union
from knot_resolver_manager.datamodel.rate_limiting_schema import RateLimitingSchema
-from knot_resolver_manager.utils.modeling import ConfigSchema
-from knot_resolver_manager.utils.modeling.base_schema import lazy_default
-from knot_resolver_manager.utils.modeling.exceptions import AggregateDataValidationError, DataValidationError
-_DEFAULT_RUNDIR = "/var/run/knot-resolver"
-
-DEFAULT_MANAGER_API_SOCK = _DEFAULT_RUNDIR + "/manager.sock"
+from knot_resolver.constants import API_SOCK_FILE, RUN_DIR, VERSION
+from knot_resolver.datamodel.cache_schema import CacheSchema
+from knot_resolver.datamodel.dns64_schema import Dns64Schema
+from knot_resolver.datamodel.dnssec_schema import DnssecSchema
+from knot_resolver.datamodel.forward_schema import ForwardSchema
+from knot_resolver.datamodel.globals import Context, get_global_validation_context, set_global_validation_context
+from knot_resolver.datamodel.local_data_schema import LocalDataSchema, RPZSchema, RuleSchema
+from knot_resolver.datamodel.logging_schema import LoggingSchema
+from knot_resolver.datamodel.lua_schema import LuaSchema
+from knot_resolver.datamodel.management_schema import ManagementSchema
+from knot_resolver.datamodel.monitoring_schema import MonitoringSchema
+from knot_resolver.datamodel.network_schema import NetworkSchema
+from knot_resolver.datamodel.options_schema import OptionsSchema
+from knot_resolver.datamodel.templates import POLICY_CONFIG_TEMPLATE, WORKER_CONFIG_TEMPLATE
+from knot_resolver.datamodel.types import EscapedStr, IntPositive, WritableDir
+from knot_resolver.datamodel.view_schema import ViewSchema
+from knot_resolver.datamodel.webmgmt_schema import WebmgmtSchema
+from knot_resolver.utils.modeling import ConfigSchema
+from knot_resolver.utils.modeling.base_schema import lazy_default
+from knot_resolver.utils.modeling.exceptions import AggregateDataValidationError, DataValidationError
+
+WORKERS_MAX = 256
logger = logging.getLogger(__name__)
@@ -44,11 +41,11 @@ def _cpu_count() -> Optional[int]:
return cpus
-def _default_max_worker_count() -> int:
+def _workers_max_count() -> int:
c = _cpu_count()
if c:
return c * 10
- return MAX_WORKERS
+ return WORKERS_MAX
def _get_views_tags(views: List[ViewSchema]) -> List[str]:
@@ -116,10 +113,10 @@ class KresConfig(ConfigSchema):
version: int = 1
nsid: Optional[EscapedStr] = None
hostname: Optional[EscapedStr] = None
- rundir: Dir = lazy_default(Dir, _DEFAULT_RUNDIR)
+ rundir: WritableDir = lazy_default(WritableDir, str(RUN_DIR))
workers: Union[Literal["auto"], IntPositive] = IntPositive(1)
- max_workers: IntPositive = IntPositive(_default_max_worker_count())
- management: ManagementSchema = lazy_default(ManagementSchema, {"unix-socket": DEFAULT_MANAGER_API_SOCK})
+ max_workers: IntPositive = IntPositive(WORKERS_MAX)
+ management: ManagementSchema = lazy_default(ManagementSchema, {"unix-socket": str(API_SOCK_FILE)})
webmgmt: Optional[WebmgmtSchema] = None
options: OptionsSchema = OptionsSchema()
network: NetworkSchema = NetworkSchema()
@@ -138,7 +135,7 @@ class KresConfig(ConfigSchema):
nsid: Optional[EscapedStr]
hostname: EscapedStr
- rundir: Dir
+ rundir: WritableDir
workers: IntPositive
max_workers: IntPositive
management: ManagementSchema
@@ -184,8 +181,11 @@ class KresConfig(ConfigSchema):
def _validate(self) -> None:
# enforce max-workers config
- if int(self.workers) > int(self.max_workers):
- raise ValueError(f"can't run with more workers then the configured maximum {self.max_workers}")
+ workers_max = _workers_max_count()
+ if int(self.workers) > workers_max:
+ raise ValueError(
+ f"can't run with more workers then the recommended maximum {workers_max} or hardcoded {WORKERS_MAX}"
+ )
# sanity check
cpu_count = _cpu_count()
@@ -235,7 +235,7 @@ class KresConfig(ConfigSchema):
return POLICY_CONFIG_TEMPLATE.render(cfg=self, cwd=os.getcwd())
-def get_rundir_without_validation(data: Dict[str, Any]) -> Dir:
+def get_rundir_without_validation(data: Dict[str, Any]) -> WritableDir:
"""
Without fully parsing, try to get a rundir from a raw config data, otherwise use default.
Attempts a dir validation to produce a good error message.
@@ -243,4 +243,26 @@ def get_rundir_without_validation(data: Dict[str, Any]) -> Dir:
Used for initial manager startup.
"""
- return Dir(data["rundir"] if "rundir" in data else _DEFAULT_RUNDIR, object_path="/rundir")
+ return WritableDir(data["rundir"] if "rundir" in data else str(RUN_DIR), object_path="/rundir")
+
+
+def kres_config_json_schema() -> Dict[str, Any]:
+ """
+ At this moment, to create any instance of 'ConfigSchema' even with default values, it is necessary to set the global context.
+ In the case of generating a JSON schema, strict validation must be turned off, otherwise it may happen that the creation of the JSON schema fails,
+ It may fail due to non-existence of the directory/file or their rights.
+ This should be fixed in the future. For more info, see 'datamodel.globals.py' module.
+ """
+
+ context = get_global_validation_context()
+ set_global_validation_context(Context(None, False))
+
+ schema = KresConfig.json_schema(
+ schema_id=f"https://www.knot-resolver.cz/documentation/v{VERSION}/_static/config.schema.json",
+ title="Knot Resolver configuration JSON schema",
+ description=f"Version Knot Resolver {VERSION}",
+ )
+ # setting back to previous values
+ set_global_validation_context(context)
+
+ return schema
diff --git a/manager/knot_resolver_manager/datamodel/design-notes.yml b/python/knot_resolver/datamodel/design-notes.yml
index e4424bc8..e4424bc8 100644
--- a/manager/knot_resolver_manager/datamodel/design-notes.yml
+++ b/python/knot_resolver/datamodel/design-notes.yml
diff --git a/manager/knot_resolver_manager/datamodel/dns64_schema.py b/python/knot_resolver/datamodel/dns64_schema.py
index 60b92f2b..cc0fa06a 100644
--- a/manager/knot_resolver_manager/datamodel/dns64_schema.py
+++ b/python/knot_resolver/datamodel/dns64_schema.py
@@ -1,7 +1,7 @@
from typing import List, Optional
-from knot_resolver_manager.datamodel.types import IPv6Network, IPv6Network96, TimeUnit
-from knot_resolver_manager.utils.modeling import ConfigSchema
+from knot_resolver.datamodel.types import IPv6Network, IPv6Network96, TimeUnit
+from knot_resolver.utils.modeling import ConfigSchema
class Dns64Schema(ConfigSchema):
diff --git a/manager/knot_resolver_manager/datamodel/dnssec_schema.py b/python/knot_resolver/datamodel/dnssec_schema.py
index 5e274c9a..6f51d5eb 100644
--- a/manager/knot_resolver_manager/datamodel/dnssec_schema.py
+++ b/python/knot_resolver/datamodel/dnssec_schema.py
@@ -1,7 +1,7 @@
from typing import List, Optional
-from knot_resolver_manager.datamodel.types import DomainName, EscapedStr, File, IntNonNegative, TimeUnit
-from knot_resolver_manager.utils.modeling import ConfigSchema
+from knot_resolver.datamodel.types import DomainName, EscapedStr, IntNonNegative, ReadableFile, TimeUnit
+from knot_resolver.utils.modeling import ConfigSchema
class TrustAnchorFileSchema(ConfigSchema):
@@ -14,7 +14,7 @@ class TrustAnchorFileSchema(ConfigSchema):
"""
- file: File
+ file: ReadableFile
read_only: bool = False
diff --git a/manager/knot_resolver_manager/datamodel/forward_schema.py b/python/knot_resolver/datamodel/forward_schema.py
index ee5206c2..3e3af21d 100644
--- a/manager/knot_resolver_manager/datamodel/forward_schema.py
+++ b/python/knot_resolver/datamodel/forward_schema.py
@@ -1,9 +1,7 @@
-from typing import Any, List, Optional, Union
+from typing import Any, List, Literal, Optional, Union
-from typing_extensions import Literal
-
-from knot_resolver_manager.datamodel.types import DomainName, File, IPAddressOptionalPort, ListOrItem, PinSha256
-from knot_resolver_manager.utils.modeling import ConfigSchema
+from knot_resolver.datamodel.types import DomainName, IPAddressOptionalPort, ListOrItem, PinSha256, ReadableFile
+from knot_resolver.utils.modeling import ConfigSchema
class ForwardServerSchema(ConfigSchema):
@@ -22,7 +20,7 @@ class ForwardServerSchema(ConfigSchema):
transport: Optional[Literal["tls"]] = None
pin_sha256: Optional[ListOrItem[PinSha256]] = None
hostname: Optional[DomainName] = None
- ca_file: Optional[File] = None
+ ca_file: Optional[ReadableFile] = None
def _validate(self) -> None:
if self.pin_sha256 and (self.hostname or self.ca_file):
diff --git a/manager/knot_resolver_manager/datamodel/globals.py b/python/knot_resolver/datamodel/globals.py
index 610323fa..88f95c2a 100644
--- a/manager/knot_resolver_manager/datamodel/globals.py
+++ b/python/knot_resolver/datamodel/globals.py
@@ -38,6 +38,10 @@ def set_global_validation_context(context: Context) -> None:
_global_context = context
+def get_global_validation_context() -> Context:
+ return _global_context
+
+
def reset_global_validation_context() -> None:
global _global_context
_global_context = Context(None)
diff --git a/manager/knot_resolver_manager/datamodel/local_data_schema.py b/python/knot_resolver/datamodel/local_data_schema.py
index e891601c..ee229778 100644
--- a/manager/knot_resolver_manager/datamodel/local_data_schema.py
+++ b/python/knot_resolver/datamodel/local_data_schema.py
@@ -1,17 +1,15 @@
-from typing import Dict, List, Optional
+from typing import Dict, List, Literal, Optional
-from typing_extensions import Literal
-
-from knot_resolver_manager.datamodel.types import (
+from knot_resolver.datamodel.types import (
DomainName,
EscapedStr,
- File,
IDPattern,
IPAddress,
ListOrItem,
+ ReadableFile,
TimeUnit,
)
-from knot_resolver_manager.utils.modeling import ConfigSchema
+from knot_resolver.utils.modeling import ConfigSchema
class RuleSchema(ConfigSchema):
@@ -32,7 +30,7 @@ class RuleSchema(ConfigSchema):
name: Optional[ListOrItem[DomainName]] = None
subtree: Optional[Literal["empty", "nxdomain", "redirect"]] = None
address: Optional[ListOrItem[IPAddress]] = None
- file: Optional[ListOrItem[File]] = None
+ file: Optional[ListOrItem[ReadableFile]] = None
records: Optional[EscapedStr] = None
tags: Optional[List[IDPattern]] = None
ttl: Optional[TimeUnit] = None
@@ -64,7 +62,7 @@ class RPZSchema(ConfigSchema):
tags: Tags to link with other policy rules.
"""
- file: File
+ file: ReadableFile
tags: Optional[List[IDPattern]] = None
@@ -87,9 +85,9 @@ class LocalDataSchema(ConfigSchema):
ttl: Optional[TimeUnit] = None
nodata: bool = True
root_fallback_addresses: Optional[Dict[DomainName, ListOrItem[IPAddress]]] = None
- root_fallback_addresses_files: Optional[List[File]] = None
+ root_fallback_addresses_files: Optional[List[ReadableFile]] = None
addresses: Optional[Dict[DomainName, ListOrItem[IPAddress]]] = None
- addresses_files: Optional[List[File]] = None
+ addresses_files: Optional[List[ReadableFile]] = None
records: Optional[EscapedStr] = None
rules: Optional[List[RuleSchema]] = None
rpz: Optional[List[RPZSchema]] = None
diff --git a/manager/knot_resolver_manager/datamodel/logging_schema.py b/python/knot_resolver/datamodel/logging_schema.py
index d2b7b7e7..9961eb6c 100644
--- a/manager/knot_resolver_manager/datamodel/logging_schema.py
+++ b/python/knot_resolver/datamodel/logging_schema.py
@@ -1,24 +1,13 @@
import os
-from typing import Any, List, Optional, Set, Type, Union, cast
-
-from typing_extensions import Literal
-
-from knot_resolver_manager.datamodel.types import FilePath, TimeUnit
-from knot_resolver_manager.utils.modeling import ConfigSchema
-from knot_resolver_manager.utils.modeling.base_schema import is_obj_type_valid
-
-try:
- # On Debian 10, the typing_extensions library does not contain TypeAlias.
- # We don't strictly need the import for anything except for type checking,
- # so this try-except makes sure it works either way.
- from typing_extensions import TypeAlias # pylint: disable=ungrouped-imports
-except ImportError:
- TypeAlias = None # type: ignore
+from typing import Any, List, Literal, Optional, Set, Type, Union, cast
+from knot_resolver.datamodel.types import TimeUnit, WritableFilePath
+from knot_resolver.utils.modeling import ConfigSchema
+from knot_resolver.utils.modeling.base_schema import is_obj_type_valid
LogLevelEnum = Literal["crit", "err", "warning", "notice", "info", "debug"]
LogTargetEnum = Literal["syslog", "stderr", "stdout"]
-LogGroupsEnum: TypeAlias = Literal[
+LogGroupsEnum = Literal[
"manager",
"supervisord",
"cache-gc",
@@ -84,7 +73,7 @@ class DnstapSchema(ConfigSchema):
log_tcp_rtt: Log TCP RTT (Round-trip time).
"""
- unix_socket: FilePath
+ unix_socket: WritableFilePath
log_queries: bool = True
log_responses: bool = True
log_tcp_rtt: bool = True
diff --git a/manager/knot_resolver_manager/datamodel/lua_schema.py b/python/knot_resolver/datamodel/lua_schema.py
index cf49b712..56e8ee09 100644
--- a/manager/knot_resolver_manager/datamodel/lua_schema.py
+++ b/python/knot_resolver/datamodel/lua_schema.py
@@ -1,7 +1,7 @@
from typing import Optional
-from knot_resolver_manager.datamodel.types import File
-from knot_resolver_manager.utils.modeling import ConfigSchema
+from knot_resolver.datamodel.types import ReadableFile
+from knot_resolver.utils.modeling import ConfigSchema
class LuaSchema(ConfigSchema):
@@ -16,7 +16,7 @@ class LuaSchema(ConfigSchema):
script_only: bool = False
script: Optional[str] = None
- script_file: Optional[File] = None
+ script_file: Optional[ReadableFile] = None
def _validate(self) -> None:
if self.script and self.script_file:
diff --git a/manager/knot_resolver_manager/datamodel/management_schema.py b/python/knot_resolver/datamodel/management_schema.py
index 09daa3ff..1ad01d37 100644
--- a/manager/knot_resolver_manager/datamodel/management_schema.py
+++ b/python/knot_resolver/datamodel/management_schema.py
@@ -1,7 +1,7 @@
from typing import Optional
-from knot_resolver_manager.datamodel.types import FilePath, IPAddressPort
-from knot_resolver_manager.utils.modeling import ConfigSchema
+from knot_resolver.datamodel.types import IPAddressPort, WritableFilePath
+from knot_resolver.utils.modeling import ConfigSchema
class ManagementSchema(ConfigSchema):
@@ -13,7 +13,7 @@ class ManagementSchema(ConfigSchema):
interface: IP address and port number to listen to.
"""
- unix_socket: Optional[FilePath] = None
+ unix_socket: Optional[WritableFilePath] = None
interface: Optional[IPAddressPort] = None
def _validate(self) -> None:
diff --git a/manager/knot_resolver_manager/datamodel/monitoring_schema.py b/python/knot_resolver/datamodel/monitoring_schema.py
index dfc4a116..f7a49f22 100644
--- a/manager/knot_resolver_manager/datamodel/monitoring_schema.py
+++ b/python/knot_resolver/datamodel/monitoring_schema.py
@@ -1,9 +1,7 @@
-from typing import Union
+from typing import Literal, Union
-from typing_extensions import Literal
-
-from knot_resolver_manager.datamodel.types import DomainName, EscapedStr, IPAddress, PortNumber, TimeUnit
-from knot_resolver_manager.utils.modeling import ConfigSchema
+from knot_resolver.datamodel.types import DomainName, EscapedStr, IPAddress, PortNumber, TimeUnit
+from knot_resolver.utils.modeling import ConfigSchema
class GraphiteSchema(ConfigSchema):
diff --git a/manager/knot_resolver_manager/datamodel/network_schema.py b/python/knot_resolver/datamodel/network_schema.py
index 289104b8..3063cef2 100644
--- a/manager/knot_resolver_manager/datamodel/network_schema.py
+++ b/python/knot_resolver/datamodel/network_schema.py
@@ -1,11 +1,7 @@
-from typing import List, Optional, Union
+from typing import List, Literal, Optional, Union
-from typing_extensions import Literal
-
-from knot_resolver_manager.datamodel.types import (
+from knot_resolver.datamodel.types import (
EscapedStr32B,
- File,
- FilePath,
Int0_512,
Int0_65535,
InterfaceOptionalPort,
@@ -16,9 +12,11 @@ from knot_resolver_manager.datamodel.types import (
IPv6Address,
ListOrItem,
PortNumber,
+ ReadableFile,
SizeUnit,
+ WritableFilePath,
)
-from knot_resolver_manager.utils.modeling import ConfigSchema
+from knot_resolver.utils.modeling import ConfigSchema
KindEnum = Literal["dns", "xdp", "dot", "doh-legacy", "doh2"]
@@ -62,10 +60,10 @@ class TLSSchema(ConfigSchema):
padding: EDNS(0) padding of queries and answers sent over an encrypted channel.
"""
- cert_file: Optional[File] = None
- key_file: Optional[File] = None
+ cert_file: Optional[ReadableFile] = None
+ key_file: Optional[ReadableFile] = None
sticket_secret: Optional[EscapedStr32B] = None
- sticket_secret_file: Optional[File] = None
+ sticket_secret_file: Optional[ReadableFile] = None
auto_discovery: bool = False
padding: Union[bool, Int0_512] = True
@@ -88,7 +86,7 @@ class ListenSchema(ConfigSchema):
"""
interface: Optional[ListOrItem[InterfaceOptionalPort]] = None
- unix_socket: Optional[ListOrItem[FilePath]] = None
+ unix_socket: Optional[ListOrItem[WritableFilePath]] = None
port: Optional[PortNumber] = None
kind: KindEnum = "dns"
freebind: bool = False
@@ -96,7 +94,7 @@ class ListenSchema(ConfigSchema):
_LAYER = Raw
interface: Optional[ListOrItem[InterfaceOptionalPort]]
- unix_socket: Optional[ListOrItem[FilePath]]
+ unix_socket: Optional[ListOrItem[WritableFilePath]]
port: Optional[PortNumber]
kind: KindEnum
freebind: bool
diff --git a/manager/knot_resolver_manager/datamodel/options_schema.py b/python/knot_resolver/datamodel/options_schema.py
index d0bb0399..a9936eb4 100644
--- a/manager/knot_resolver_manager/datamodel/options_schema.py
+++ b/python/knot_resolver/datamodel/options_schema.py
@@ -1,6 +1,6 @@
-from typing_extensions import Literal
+from typing import Literal
-from knot_resolver_manager.utils.modeling import ConfigSchema
+from knot_resolver.utils.modeling import ConfigSchema
GlueCheckingEnum = Literal["normal", "strict", "permissive"]
diff --git a/manager/knot_resolver_manager/datamodel/policy_schema.py b/python/knot_resolver/datamodel/policy_schema.py
index bbc61cd1..8f9d8b26 100644
--- a/manager/knot_resolver_manager/datamodel/policy_schema.py
+++ b/python/knot_resolver/datamodel/policy_schema.py
@@ -1,15 +1,15 @@
from typing import List, Optional, Union
-from knot_resolver_manager.datamodel.forward_schema import ForwardServerSchema
-from knot_resolver_manager.datamodel.network_schema import AddressRenumberingSchema
-from knot_resolver_manager.datamodel.types import (
+from knot_resolver.datamodel.forward_schema import ForwardServerSchema
+from knot_resolver.datamodel.network_schema import AddressRenumberingSchema
+from knot_resolver.datamodel.types import (
DNSRecordTypeEnum,
IPAddressOptionalPort,
PolicyActionEnum,
PolicyFlagEnum,
TimeUnit,
)
-from knot_resolver_manager.utils.modeling import ConfigSchema
+from knot_resolver.utils.modeling import ConfigSchema
class FilterSchema(ConfigSchema):
diff --git a/manager/knot_resolver_manager/datamodel/rate_limiting_schema.py b/python/knot_resolver/datamodel/rate_limiting_schema.py
index 4733223f..4733223f 100644
--- a/manager/knot_resolver_manager/datamodel/rate_limiting_schema.py
+++ b/python/knot_resolver/datamodel/rate_limiting_schema.py
diff --git a/manager/knot_resolver_manager/datamodel/rpz_schema.py b/python/knot_resolver/datamodel/rpz_schema.py
index 633e34a5..96d79293 100644
--- a/manager/knot_resolver_manager/datamodel/rpz_schema.py
+++ b/python/knot_resolver/datamodel/rpz_schema.py
@@ -1,7 +1,7 @@
from typing import List, Optional
-from knot_resolver_manager.datamodel.types import File, PolicyActionEnum, PolicyFlagEnum
-from knot_resolver_manager.utils.modeling import ConfigSchema
+from knot_resolver.datamodel.types import PolicyActionEnum, PolicyFlagEnum, ReadableFile
+from knot_resolver.utils.modeling import ConfigSchema
class RPZSchema(ConfigSchema):
@@ -18,7 +18,7 @@ class RPZSchema(ConfigSchema):
"""
action: PolicyActionEnum
- file: File
+ file: ReadableFile
watch: bool = True
views: Optional[List[str]] = None
options: Optional[List[PolicyFlagEnum]] = None
diff --git a/manager/knot_resolver_manager/datamodel/slice_schema.py b/python/knot_resolver/datamodel/slice_schema.py
index 0c7cdea1..f807c298 100644
--- a/manager/knot_resolver_manager/datamodel/slice_schema.py
+++ b/python/knot_resolver/datamodel/slice_schema.py
@@ -1,9 +1,7 @@
-from typing import List, Optional
+from typing import List, Literal, Optional
-from typing_extensions import Literal
-
-from knot_resolver_manager.datamodel.policy_schema import ActionSchema
-from knot_resolver_manager.utils.modeling import ConfigSchema
+from knot_resolver.datamodel.policy_schema import ActionSchema
+from knot_resolver.utils.modeling import ConfigSchema
class SliceSchema(ConfigSchema):
diff --git a/manager/knot_resolver_manager/datamodel/static_hints_schema.py b/python/knot_resolver/datamodel/static_hints_schema.py
index 7d39fcf4..ac64c311 100644
--- a/manager/knot_resolver_manager/datamodel/static_hints_schema.py
+++ b/python/knot_resolver/datamodel/static_hints_schema.py
@@ -1,7 +1,7 @@
from typing import Dict, List, Optional
-from knot_resolver_manager.datamodel.types import DomainName, File, IPAddress, TimeUnit
-from knot_resolver_manager.utils.modeling import ConfigSchema
+from knot_resolver.datamodel.types import DomainName, IPAddress, ReadableFile, TimeUnit
+from knot_resolver.utils.modeling import ConfigSchema
class StaticHintsSchema(ConfigSchema):
@@ -22,6 +22,6 @@ class StaticHintsSchema(ConfigSchema):
nodata: bool = True
etc_hosts: bool = False
root_hints: Optional[Dict[DomainName, List[IPAddress]]] = None
- root_hints_file: Optional[File] = None
+ root_hints_file: Optional[ReadableFile] = None
hints: Optional[Dict[DomainName, List[IPAddress]]] = None
- hints_files: Optional[List[File]] = None
+ hints_files: Optional[List[ReadableFile]] = None
diff --git a/manager/knot_resolver_manager/datamodel/stub_zone_schema.py b/python/knot_resolver/datamodel/stub_zone_schema.py
index b9945ecc..afd1cc79 100644
--- a/manager/knot_resolver_manager/datamodel/stub_zone_schema.py
+++ b/python/knot_resolver/datamodel/stub_zone_schema.py
@@ -1,7 +1,7 @@
from typing import List, Optional, Union
-from knot_resolver_manager.datamodel.types import DomainName, IPAddressOptionalPort, PolicyFlagEnum
-from knot_resolver_manager.utils.modeling import ConfigSchema
+from knot_resolver.datamodel.types import DomainName, IPAddressOptionalPort, PolicyFlagEnum
+from knot_resolver.utils.modeling import ConfigSchema
class StubServerSchema(ConfigSchema):
diff --git a/manager/knot_resolver_manager/datamodel/templates/__init__.py b/python/knot_resolver/datamodel/templates/__init__.py
index 832503b7..fdb91dd2 100644
--- a/manager/knot_resolver_manager/datamodel/templates/__init__.py
+++ b/python/knot_resolver/datamodel/templates/__init__.py
@@ -5,13 +5,13 @@ from jinja2 import Environment, FileSystemLoader, Template
def _get_templates_dir() -> str:
- module = sys.modules["knot_resolver_manager.datamodel"].__file__
+ module = sys.modules["knot_resolver.datamodel"].__file__
if module:
templates_dir = os.path.join(os.path.dirname(module), "templates")
if os.path.isdir(templates_dir):
return templates_dir
raise NotADirectoryError(f"the templates dir '{templates_dir}' is not a directory or does not exist")
- raise OSError("package 'knot_resolver_manager.datamodel' cannot be located or loaded")
+ raise OSError("package 'knot_resolver.datamodel' cannot be located or loaded")
_TEMPLATES_DIR = _get_templates_dir()
diff --git a/manager/knot_resolver_manager/datamodel/templates/cache.lua.j2 b/python/knot_resolver/datamodel/templates/cache.lua.j2
index f0176a59..f0176a59 100644
--- a/manager/knot_resolver_manager/datamodel/templates/cache.lua.j2
+++ b/python/knot_resolver/datamodel/templates/cache.lua.j2
diff --git a/manager/knot_resolver_manager/datamodel/templates/dns64.lua.j2 b/python/knot_resolver/datamodel/templates/dns64.lua.j2
index c5239f00..c5239f00 100644
--- a/manager/knot_resolver_manager/datamodel/templates/dns64.lua.j2
+++ b/python/knot_resolver/datamodel/templates/dns64.lua.j2
diff --git a/manager/knot_resolver_manager/datamodel/templates/dnssec.lua.j2 b/python/knot_resolver/datamodel/templates/dnssec.lua.j2
index 05d1fa68..05d1fa68 100644
--- a/manager/knot_resolver_manager/datamodel/templates/dnssec.lua.j2
+++ b/python/knot_resolver/datamodel/templates/dnssec.lua.j2
diff --git a/manager/knot_resolver_manager/datamodel/templates/forward.lua.j2 b/python/knot_resolver/datamodel/templates/forward.lua.j2
index 24311da1..24311da1 100644
--- a/manager/knot_resolver_manager/datamodel/templates/forward.lua.j2
+++ b/python/knot_resolver/datamodel/templates/forward.lua.j2
diff --git a/manager/knot_resolver_manager/datamodel/templates/local_data.lua.j2 b/python/knot_resolver/datamodel/templates/local_data.lua.j2
index 8882471f..8882471f 100644
--- a/manager/knot_resolver_manager/datamodel/templates/local_data.lua.j2
+++ b/python/knot_resolver/datamodel/templates/local_data.lua.j2
diff --git a/manager/knot_resolver_manager/datamodel/templates/logging.lua.j2 b/python/knot_resolver/datamodel/templates/logging.lua.j2
index 2d5937a8..2d5937a8 100644
--- a/manager/knot_resolver_manager/datamodel/templates/logging.lua.j2
+++ b/python/knot_resolver/datamodel/templates/logging.lua.j2
diff --git a/manager/knot_resolver_manager/datamodel/templates/macros/cache_macros.lua.j2 b/python/knot_resolver/datamodel/templates/macros/cache_macros.lua.j2
index 51df48da..51df48da 100644
--- a/manager/knot_resolver_manager/datamodel/templates/macros/cache_macros.lua.j2
+++ b/python/knot_resolver/datamodel/templates/macros/cache_macros.lua.j2
diff --git a/manager/knot_resolver_manager/datamodel/templates/macros/common_macros.lua.j2 b/python/knot_resolver/datamodel/templates/macros/common_macros.lua.j2
index 4c2ba11a..4c2ba11a 100644
--- a/manager/knot_resolver_manager/datamodel/templates/macros/common_macros.lua.j2
+++ b/python/knot_resolver/datamodel/templates/macros/common_macros.lua.j2
diff --git a/manager/knot_resolver_manager/datamodel/templates/macros/forward_macros.lua.j2 b/python/knot_resolver/datamodel/templates/macros/forward_macros.lua.j2
index b7723fb0..b7723fb0 100644
--- a/manager/knot_resolver_manager/datamodel/templates/macros/forward_macros.lua.j2
+++ b/python/knot_resolver/datamodel/templates/macros/forward_macros.lua.j2
diff --git a/manager/knot_resolver_manager/datamodel/templates/macros/local_data_macros.lua.j2 b/python/knot_resolver/datamodel/templates/macros/local_data_macros.lua.j2
index 0898571c..0898571c 100644
--- a/manager/knot_resolver_manager/datamodel/templates/macros/local_data_macros.lua.j2
+++ b/python/knot_resolver/datamodel/templates/macros/local_data_macros.lua.j2
diff --git a/manager/knot_resolver_manager/datamodel/templates/macros/network_macros.lua.j2 b/python/knot_resolver/datamodel/templates/macros/network_macros.lua.j2
index 79800f7d..79800f7d 100644
--- a/manager/knot_resolver_manager/datamodel/templates/macros/network_macros.lua.j2
+++ b/python/knot_resolver/datamodel/templates/macros/network_macros.lua.j2
diff --git a/manager/knot_resolver_manager/datamodel/templates/macros/policy_macros.lua.j2 b/python/knot_resolver/datamodel/templates/macros/policy_macros.lua.j2
index 347532e6..347532e6 100644
--- a/manager/knot_resolver_manager/datamodel/templates/macros/policy_macros.lua.j2
+++ b/python/knot_resolver/datamodel/templates/macros/policy_macros.lua.j2
diff --git a/manager/knot_resolver_manager/datamodel/templates/macros/view_macros.lua.j2 b/python/knot_resolver/datamodel/templates/macros/view_macros.lua.j2
index 2f1a7964..2f1a7964 100644
--- a/manager/knot_resolver_manager/datamodel/templates/macros/view_macros.lua.j2
+++ b/python/knot_resolver/datamodel/templates/macros/view_macros.lua.j2
diff --git a/manager/knot_resolver_manager/datamodel/templates/monitoring.lua.j2 b/python/knot_resolver/datamodel/templates/monitoring.lua.j2
index 624b59ab..624b59ab 100644
--- a/manager/knot_resolver_manager/datamodel/templates/monitoring.lua.j2
+++ b/python/knot_resolver/datamodel/templates/monitoring.lua.j2
diff --git a/manager/knot_resolver_manager/datamodel/templates/network.lua.j2 b/python/knot_resolver/datamodel/templates/network.lua.j2
index 665ee454..665ee454 100644
--- a/manager/knot_resolver_manager/datamodel/templates/network.lua.j2
+++ b/python/knot_resolver/datamodel/templates/network.lua.j2
diff --git a/manager/knot_resolver_manager/datamodel/templates/options.lua.j2 b/python/knot_resolver/datamodel/templates/options.lua.j2
index 8210fb6d..8210fb6d 100644
--- a/manager/knot_resolver_manager/datamodel/templates/options.lua.j2
+++ b/python/knot_resolver/datamodel/templates/options.lua.j2
diff --git a/manager/knot_resolver_manager/datamodel/templates/policy-config.lua.j2 b/python/knot_resolver/datamodel/templates/policy-config.lua.j2
index 4c5c9048..4c5c9048 100644
--- a/manager/knot_resolver_manager/datamodel/templates/policy-config.lua.j2
+++ b/python/knot_resolver/datamodel/templates/policy-config.lua.j2
diff --git a/manager/knot_resolver_manager/datamodel/templates/rate_limiting.lua.j2 b/python/knot_resolver/datamodel/templates/rate_limiting.lua.j2
index 096c7f3c..096c7f3c 100644
--- a/manager/knot_resolver_manager/datamodel/templates/rate_limiting.lua.j2
+++ b/python/knot_resolver/datamodel/templates/rate_limiting.lua.j2
diff --git a/manager/knot_resolver_manager/datamodel/templates/static_hints.lua.j2 b/python/knot_resolver/datamodel/templates/static_hints.lua.j2
index 130facf9..130facf9 100644
--- a/manager/knot_resolver_manager/datamodel/templates/static_hints.lua.j2
+++ b/python/knot_resolver/datamodel/templates/static_hints.lua.j2
diff --git a/manager/knot_resolver_manager/datamodel/templates/views.lua.j2 b/python/knot_resolver/datamodel/templates/views.lua.j2
index 81de8c7b..81de8c7b 100644
--- a/manager/knot_resolver_manager/datamodel/templates/views.lua.j2
+++ b/python/knot_resolver/datamodel/templates/views.lua.j2
diff --git a/manager/knot_resolver_manager/datamodel/templates/webmgmt.lua.j2 b/python/knot_resolver/datamodel/templates/webmgmt.lua.j2
index 938ea8da..938ea8da 100644
--- a/manager/knot_resolver_manager/datamodel/templates/webmgmt.lua.j2
+++ b/python/knot_resolver/datamodel/templates/webmgmt.lua.j2
diff --git a/manager/knot_resolver_manager/datamodel/templates/worker-config.lua.j2 b/python/knot_resolver/datamodel/templates/worker-config.lua.j2
index c97f0820..c97f0820 100644
--- a/manager/knot_resolver_manager/datamodel/templates/worker-config.lua.j2
+++ b/python/knot_resolver/datamodel/templates/worker-config.lua.j2
diff --git a/manager/knot_resolver_manager/datamodel/types/__init__.py b/python/knot_resolver/datamodel/types/__init__.py
index 350cf213..a3d7db3e 100644
--- a/manager/knot_resolver_manager/datamodel/types/__init__.py
+++ b/python/knot_resolver/datamodel/types/__init__.py
@@ -1,5 +1,5 @@
from .enums import DNSRecordTypeEnum, PolicyActionEnum, PolicyFlagEnum
-from .files import AbsoluteDir, Dir, File, FilePath
+from .files import AbsoluteDir, Dir, File, FilePath, ReadableFile, WritableDir, WritableFilePath
from .generic_types import ListOrItem
from .types import (
DomainName,
@@ -60,6 +60,9 @@ __all__ = [
"SizeUnit",
"TimeUnit",
"AbsoluteDir",
+ "ReadableFile",
+ "WritableDir",
+ "WritableFilePath",
"File",
"FilePath",
"Dir",
diff --git a/manager/knot_resolver_manager/datamodel/types/base_types.py b/python/knot_resolver/datamodel/types/base_types.py
index 91f53406..c2d60312 100644
--- a/manager/knot_resolver_manager/datamodel/types/base_types.py
+++ b/python/knot_resolver/datamodel/types/base_types.py
@@ -1,7 +1,7 @@
import re
from typing import Any, Dict, Pattern, Type
-from knot_resolver_manager.utils.modeling import BaseValueType
+from knot_resolver.utils.modeling import BaseValueType
class IntBase(BaseValueType):
diff --git a/manager/knot_resolver_manager/datamodel/types/enums.py b/python/knot_resolver/datamodel/types/enums.py
index bc93ae2f..aca1b433 100644
--- a/manager/knot_resolver_manager/datamodel/types/enums.py
+++ b/python/knot_resolver/datamodel/types/enums.py
@@ -1,4 +1,4 @@
-from typing_extensions import Literal
+from typing import Literal
# Policy actions
PolicyActionEnum = Literal[
diff --git a/manager/knot_resolver_manager/datamodel/types/files.py b/python/knot_resolver/datamodel/types/files.py
index 49b51f71..c2962729 100644
--- a/manager/knot_resolver_manager/datamodel/types/files.py
+++ b/python/knot_resolver/datamodel/types/files.py
@@ -1,8 +1,14 @@
+import os
+import stat
+from enum import Flag, auto
+from grp import getgrnam
from pathlib import Path
+from pwd import getpwnam
from typing import Any, Dict, Tuple, Type, TypeVar
-from knot_resolver_manager.datamodel.globals import get_resolve_root, get_strict_validation
-from knot_resolver_manager.utils.modeling.base_value_type import BaseValueType
+from knot_resolver.constants import GROUP, USER
+from knot_resolver.datamodel.globals import get_resolve_root, get_strict_validation
+from knot_resolver.utils.modeling.base_value_type import BaseValueType
class UncheckedPath(BaseValueType):
@@ -133,5 +139,97 @@ class FilePath(UncheckedPath):
p = self._value.parent
if self.strict_validation and (not p.exists() or not p.is_dir()):
raise ValueError(f"path '{self._value}' does not point inside an existing directory")
+
if self.strict_validation and self._value.is_dir():
raise ValueError(f"path '{self._value}' points to a directory when we expected a file")
+
+
+class _PermissionMode(Flag):
+ READ = auto()
+ WRITE = auto()
+ EXECUTE = auto()
+
+
+def _kres_accessible(dest_path: Path, perm_mode: _PermissionMode) -> bool:
+ chflags = {
+ _PermissionMode.READ: [stat.S_IRUSR, stat.S_IRGRP, stat.S_IROTH],
+ _PermissionMode.WRITE: [stat.S_IWUSR, stat.S_IWGRP, stat.S_IWOTH],
+ _PermissionMode.EXECUTE: [stat.S_IXUSR, stat.S_IXGRP, stat.S_IXOTH],
+ }
+
+ user_uid = getpwnam(USER).pw_uid
+ user_gid = getgrnam(GROUP).gr_gid
+
+ dest_stat = os.stat(dest_path)
+ dest_uid = dest_stat.st_uid
+ dest_gid = dest_stat.st_gid
+ dest_mode = dest_stat.st_mode
+
+ def accessible(perm: _PermissionMode) -> bool:
+ if user_uid == dest_uid:
+ return bool(dest_mode & chflags[perm][0])
+ b_groups = os.getgrouplist(os.getlogin(), user_gid)
+ if user_gid == dest_gid or dest_gid in b_groups:
+ return bool(dest_mode & chflags[perm][1])
+ return bool(dest_mode & chflags[perm][2])
+
+ # __iter__ for class enum.Flag added in python3.11
+ # 'for perm in perm_mode:' failes for <=python3.11
+ for perm in _PermissionMode:
+ if perm in perm_mode:
+ if not accessible(perm):
+ return False
+ return True
+
+
+class ReadableFile(File):
+ """
+ Path, that is enforced to be:
+ - an existing file
+ - readable by knot-resolver processes
+ """
+
+ def __init__(
+ self, source_value: Any, parents: Tuple["UncheckedPath", ...] = tuple(), object_path: str = "/"
+ ) -> None:
+ super().__init__(source_value, parents=parents, object_path=object_path)
+
+ if self.strict_validation and not _kres_accessible(self._value, _PermissionMode.READ):
+ raise ValueError(f"{USER}:{GROUP} has insufficient permissions to read '{self._value}'")
+
+
+class WritableDir(Dir):
+ """
+ Path, that is enforced to be:
+ - an existing directory
+ - writable/executable by knot-resolver processes
+ """
+
+ def __init__(
+ self, source_value: Any, parents: Tuple["UncheckedPath", ...] = tuple(), object_path: str = "/"
+ ) -> None:
+ super().__init__(source_value, parents=parents, object_path=object_path)
+
+ if self.strict_validation and not _kres_accessible(
+ self._value, _PermissionMode.WRITE | _PermissionMode.EXECUTE
+ ):
+ raise ValueError(f"{USER}:{GROUP} has insufficient permissions to write/execute '{self._value}'")
+
+
+class WritableFilePath(FilePath):
+ """
+ Path, that is enforced to be:
+ - parent of the last path segment is an existing directory
+ - it does not point to a dir
+ - writable/executable parent directory by knot-resolver processes
+ """
+
+ def __init__(
+ self, source_value: Any, parents: Tuple["UncheckedPath", ...] = tuple(), object_path: str = "/"
+ ) -> None:
+ super().__init__(source_value, parents=parents, object_path=object_path)
+
+ if self.strict_validation and not _kres_accessible(
+ self._value.parent, _PermissionMode.WRITE | _PermissionMode.EXECUTE
+ ):
+ raise ValueError(f"{USER}:{GROUP} has insufficient permissions to write/execute'{self._value.parent}'")
diff --git a/manager/knot_resolver_manager/datamodel/types/generic_types.py b/python/knot_resolver/datamodel/types/generic_types.py
index 549b11a8..8649a0f0 100644
--- a/manager/knot_resolver_manager/datamodel/types/generic_types.py
+++ b/python/knot_resolver/datamodel/types/generic_types.py
@@ -1,6 +1,6 @@
from typing import Any, List, TypeVar, Union
-from knot_resolver_manager.utils.modeling import BaseGenericTypeWrapper
+from knot_resolver.utils.modeling import BaseGenericTypeWrapper
T = TypeVar("T")
diff --git a/manager/knot_resolver_manager/datamodel/types/types.py b/python/knot_resolver/datamodel/types/types.py
index fa0d2793..1a125720 100644
--- a/manager/knot_resolver_manager/datamodel/types/types.py
+++ b/python/knot_resolver/datamodel/types/types.py
@@ -2,14 +2,8 @@ import ipaddress
import re
from typing import Any, Dict, Optional, Type, Union
-from knot_resolver_manager.datamodel.types.base_types import (
- IntRangeBase,
- PatternBase,
- StrBase,
- StringLengthBase,
- UnitBase,
-)
-from knot_resolver_manager.utils.modeling import BaseValueType
+from knot_resolver.datamodel.types.base_types import IntRangeBase, PatternBase, StrBase, StringLengthBase, UnitBase
+from knot_resolver.utils.modeling import BaseValueType
class IntNonNegative(IntRangeBase):
@@ -195,7 +189,7 @@ class PinSha256(PatternBase):
A string that stores base64 encoded sha256.
"""
- _re = re.compile(r"^[A-Za-z\d+/]{86}==$")
+ _re = re.compile(r"^[A-Za-z\d+/]{43}=$")
class InterfacePort(StrBase):
diff --git a/manager/knot_resolver_manager/datamodel/view_schema.py b/python/knot_resolver/datamodel/view_schema.py
index ad44eb3b..be678e68 100644
--- a/manager/knot_resolver_manager/datamodel/view_schema.py
+++ b/python/knot_resolver/datamodel/view_schema.py
@@ -1,9 +1,7 @@
-from typing import List, Optional
+from typing import List, Literal, Optional
-from typing_extensions import Literal
-
-from knot_resolver_manager.datamodel.types import IDPattern, IPNetwork
-from knot_resolver_manager.utils.modeling import ConfigSchema
+from knot_resolver.datamodel.types import IDPattern, IPNetwork
+from knot_resolver.utils.modeling import ConfigSchema
class ViewOptionsSchema(ConfigSchema):
diff --git a/manager/knot_resolver_manager/datamodel/webmgmt_schema.py b/python/knot_resolver/datamodel/webmgmt_schema.py
index 41cc3387..c39f84d2 100644
--- a/manager/knot_resolver_manager/datamodel/webmgmt_schema.py
+++ b/python/knot_resolver/datamodel/webmgmt_schema.py
@@ -1,7 +1,7 @@
from typing import Optional
-from knot_resolver_manager.datamodel.types import File, FilePath, InterfacePort
-from knot_resolver_manager.utils.modeling import ConfigSchema
+from knot_resolver.datamodel.types import InterfacePort, ReadableFile, WritableFilePath
+from knot_resolver.utils.modeling import ConfigSchema
class WebmgmtSchema(ConfigSchema):
@@ -16,11 +16,11 @@ class WebmgmtSchema(ConfigSchema):
key_file: Path to certificate key.
"""
- unix_socket: Optional[FilePath] = None
+ unix_socket: Optional[WritableFilePath] = None
interface: Optional[InterfacePort] = None
tls: bool = False
- cert_file: Optional[File] = None
- key_file: Optional[File] = None
+ cert_file: Optional[ReadableFile] = None
+ key_file: Optional[ReadableFile] = None
def _validate(self) -> None:
if bool(self.unix_socket) == bool(self.interface):
diff --git a/python/knot_resolver/exceptions.py b/python/knot_resolver/exceptions.py
new file mode 100644
index 00000000..3e90b0bc
--- /dev/null
+++ b/python/knot_resolver/exceptions.py
@@ -0,0 +1,4 @@
+class KresBaseException(Exception):
+ """
+ Base class for all custom exceptions we use in Knot Resolver.
+ """
diff --git a/python/knot_resolver/manager/__init__.py b/python/knot_resolver/manager/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/python/knot_resolver/manager/__init__.py
diff --git a/python/knot_resolver/manager/__main__.py b/python/knot_resolver/manager/__main__.py
new file mode 100644
index 00000000..1063c0a5
--- /dev/null
+++ b/python/knot_resolver/manager/__main__.py
@@ -0,0 +1,4 @@
+from knot_resolver.manager.main import main
+
+if __name__ == "__main__":
+ main()
diff --git a/manager/knot_resolver_manager/config_store.py b/python/knot_resolver/manager/config_store.py
index e5fbaf60..214062b2 100644
--- a/manager/knot_resolver_manager/config_store.py
+++ b/python/knot_resolver/manager/config_store.py
@@ -2,11 +2,12 @@ import asyncio
from asyncio import Lock
from typing import Any, Awaitable, Callable, List, Tuple
-from knot_resolver_manager.datamodel import KresConfig
-from knot_resolver_manager.exceptions import KresManagerException
-from knot_resolver_manager.utils.functional import Result
-from knot_resolver_manager.utils.modeling.exceptions import DataParsingError
-from knot_resolver_manager.utils.modeling.types import NoneType
+from knot_resolver.datamodel import KresConfig
+from knot_resolver.utils.functional import Result
+from knot_resolver.utils.modeling.exceptions import DataParsingError
+from knot_resolver.utils.modeling.types import NoneType
+
+from .exceptions import KresManagerException
VerifyCallback = Callable[[KresConfig, KresConfig], Awaitable[Result[None, str]]]
UpdateCallback = Callable[[KresConfig], Awaitable[None]]
diff --git a/manager/knot_resolver_manager/constants.py b/python/knot_resolver/manager/constants.py
index 90ceed9f..43b1c04b 100644
--- a/manager/knot_resolver_manager/constants.py
+++ b/python/knot_resolver/manager/constants.py
@@ -1,41 +1,22 @@
-import importlib.util
import logging
from pathlib import Path
from typing import TYPE_CHECKING, Optional
-# Install config is semi-optional - only needed to actually run Manager, but not
-# for its unit tests.
-if importlib.util.find_spec("knot_resolver"):
- import knot_resolver # type: ignore[import-not-found]
-else:
- knot_resolver = None
-
if TYPE_CHECKING:
- from knot_resolver_manager.config_store import ConfigStore
- from knot_resolver_manager.datamodel.config_schema import KresConfig
- from knot_resolver_manager.kresd_controller.interface import KresID
-
-STARTUP_LOG_LEVEL = logging.DEBUG
-DEFAULT_MANAGER_CONFIG_FILE = Path("/etc/knot-resolver/config.yaml")
-CONFIG_FILE_ENV_VAR = "KRES_MANAGER_CONFIG"
-API_SOCK_ENV_VAR = "KRES_MANAGER_API_SOCK"
-MANAGER_FIX_ATTEMPT_MAX_COUNTER = 2
-FIX_COUNTER_DECREASE_INTERVAL_SEC = 30 * 60
-PID_FILE_NAME = "manager.pid"
-MAX_WORKERS = 256
-
+ from knot_resolver.controller.interface import KresID
+ from knot_resolver.datamodel.config_schema import KresConfig
+ from knot_resolver.manager.config_store import ConfigStore
-def kresd_executable() -> Path:
- assert knot_resolver is not None
- return knot_resolver.sbin_dir / "kresd"
+LOGGING_LEVEL_STARTUP = logging.DEBUG
+PID_FILE_NAME = "knot-resolver.pid"
-def kres_gc_executable() -> Path:
- assert knot_resolver is not None
- return knot_resolver.sbin_dir / "kres-cache-gc"
+FIX_COUNTER_ATTEMPTS_MAX = 2
+FIX_COUNTER_DECREASE_INTERVAL_SEC = 30 * 60
+WATCHDOG_INTERVAL_SEC: float = 5
-def kresd_cache_dir(config: "KresConfig") -> Path:
+def kres_cache_dir(config: "KresConfig") -> Path:
return config.cache.storage.to_path()
@@ -71,12 +52,6 @@ def supervisord_subprocess_log_dir(_config: "KresConfig") -> Path:
return Path("logs")
-WATCHDOG_INTERVAL: float = 5
-"""
-Used in KresdManager. It's a number of seconds in between system health checks.
-"""
-
-
class _UserConstants:
"""
Class for accessing constants, which are technically not constants as they are user configurable.
diff --git a/python/knot_resolver/manager/exceptions.py b/python/knot_resolver/manager/exceptions.py
new file mode 100644
index 00000000..77bc4d9f
--- /dev/null
+++ b/python/knot_resolver/manager/exceptions.py
@@ -0,0 +1,5 @@
+from knot_resolver import KresBaseException
+
+
+class KresManagerException(KresBaseException):
+ pass
diff --git a/manager/knot_resolver_manager/log.py b/python/knot_resolver/manager/logging.py
index 19271c52..c9b44653 100644
--- a/manager/knot_resolver_manager/log.py
+++ b/python/knot_resolver/manager/logging.py
@@ -4,10 +4,11 @@ import os
import sys
from typing import Optional
-from knot_resolver_manager.config_store import ConfigStore, only_on_real_changes_update
-from knot_resolver_manager.constants import STARTUP_LOG_LEVEL
-from knot_resolver_manager.datamodel.config_schema import KresConfig
-from knot_resolver_manager.datamodel.logging_schema import LogTargetEnum
+from knot_resolver.datamodel.config_schema import KresConfig
+from knot_resolver.datamodel.logging_schema import LogTargetEnum
+from knot_resolver.manager.config_store import ConfigStore, only_on_real_changes_update
+
+from .constants import LOGGING_LEVEL_STARTUP
logger = logging.getLogger(__name__)
@@ -99,7 +100,7 @@ async def logger_init(config_store: ConfigStore) -> None:
def logger_startup() -> None:
- logging.getLogger().setLevel(STARTUP_LOG_LEVEL)
+ logging.getLogger().setLevel(LOGGING_LEVEL_STARTUP)
err_handler = logging.StreamHandler(sys.stderr)
err_handler.setFormatter(logging.Formatter(logging.BASIC_FORMAT))
logging.getLogger().addHandler(logging.handlers.MemoryHandler(10_000, logging.ERROR, err_handler))
diff --git a/manager/knot_resolver_manager/main.py b/python/knot_resolver/manager/main.py
index 9428c880..dac47bed 100644
--- a/manager/knot_resolver_manager/main.py
+++ b/python/knot_resolver/manager/main.py
@@ -4,23 +4,29 @@ file to allow us to exclude the __main__.py file from black's autoformatting
"""
import argparse
-import os
import sys
from pathlib import Path
from typing import NoReturn
-from knot_resolver_manager import compat
-from knot_resolver_manager.constants import CONFIG_FILE_ENV_VAR, DEFAULT_MANAGER_CONFIG_FILE
-from knot_resolver_manager.log import logger_startup
-from knot_resolver_manager.server import start_server
+from knot_resolver.constants import CONFIG_FILE, VERSION
+from knot_resolver.manager.logging import logger_startup
+from knot_resolver.manager.server import start_server
+from knot_resolver.utils import compat
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(description="Knot Resolver - caching DNS resolver")
parser.add_argument(
+ "-V",
+ "--version",
+ help="Get version",
+ action="version",
+ version=VERSION,
+ )
+ parser.add_argument(
"-c",
"--config",
- help="Config file to load. Overrides default config location at '" + str(DEFAULT_MANAGER_CONFIG_FILE) + "'",
+ help="Config file to load. Overrides default config location at '" + str(CONFIG_FILE) + "'",
type=str,
nargs=1,
required=False,
@@ -37,13 +43,10 @@ def main() -> NoReturn:
args = parse_args()
# where to look for config
- config_env = os.getenv(CONFIG_FILE_ENV_VAR)
if args.config is not None:
config_path = Path(args.config[0])
- elif config_env is not None:
- config_path = Path(config_env)
else:
- config_path = DEFAULT_MANAGER_CONFIG_FILE
+ config_path = CONFIG_FILE
exit_code = compat.asyncio.run(start_server(config=config_path))
sys.exit(exit_code)
diff --git a/manager/knot_resolver_manager/kres_manager.py b/python/knot_resolver/manager/manager.py
index dbe56817..f295de25 100644
--- a/manager/knot_resolver_manager/kres_manager.py
+++ b/python/knot_resolver/manager/manager.py
@@ -7,28 +7,16 @@ from secrets import token_hex
from subprocess import SubprocessError
from typing import Any, Callable, List, Optional
-from knot_resolver_manager.compat.asyncio import create_task
-from knot_resolver_manager.config_store import ConfigStore, only_on_real_changes_update, only_on_real_changes_verifier
-from knot_resolver_manager.constants import (
- FIX_COUNTER_DECREASE_INTERVAL_SEC,
- MANAGER_FIX_ATTEMPT_MAX_COUNTER,
- WATCHDOG_INTERVAL,
-)
-from knot_resolver_manager.exceptions import SubprocessControllerException
-from knot_resolver_manager.kresd_controller.interface import (
- Subprocess,
- SubprocessController,
- SubprocessStatus,
- SubprocessType,
-)
-from knot_resolver_manager.kresd_controller.registered_workers import (
- command_registered_workers,
- get_registered_workers_kresids,
-)
-from knot_resolver_manager.utils.functional import Result
-from knot_resolver_manager.utils.modeling.types import NoneType
-
-from .datamodel import KresConfig
+from knot_resolver.controller.exceptions import SubprocessControllerException
+from knot_resolver.controller.interface import Subprocess, SubprocessController, SubprocessStatus, SubprocessType
+from knot_resolver.controller.registered_workers import command_registered_workers, get_registered_workers_kresids
+from knot_resolver.datamodel import KresConfig
+from knot_resolver.manager.config_store import ConfigStore, only_on_real_changes_update, only_on_real_changes_verifier
+from knot_resolver.utils.compat.asyncio import create_task
+from knot_resolver.utils.functional import Result
+from knot_resolver.utils.modeling.types import NoneType
+
+from .constants import FIX_COUNTER_ATTEMPTS_MAX, FIX_COUNTER_DECREASE_INTERVAL_SEC, WATCHDOG_INTERVAL_SEC
logger = logging.getLogger(__name__)
@@ -55,7 +43,7 @@ class _FixCounter:
return str(self._counter)
def is_too_high(self) -> bool:
- return self._counter >= MANAGER_FIX_ATTEMPT_MAX_COUNTER
+ return self._counter >= FIX_COUNTER_ATTEMPTS_MAX
async def _deny_max_worker_changes(config_old: KresConfig, config_new: KresConfig) -> Result[None, str]:
@@ -372,7 +360,7 @@ class KresManager: # pylint: disable=too-many-instance-attributes
async def _watchdog(self) -> None: # pylint: disable=too-many-branches
while True:
- await asyncio.sleep(WATCHDOG_INTERVAL)
+ await asyncio.sleep(WATCHDOG_INTERVAL_SEC)
self._fix_counter.try_decrease()
diff --git a/python/knot_resolver/manager/metrics/__init__.py b/python/knot_resolver/manager/metrics/__init__.py
new file mode 100644
index 00000000..7e3a968d
--- /dev/null
+++ b/python/knot_resolver/manager/metrics/__init__.py
@@ -0,0 +1,4 @@
+from .collect import report_json
+from .prometheus import init_prometheus, report_prometheus
+
+__all__ = ["init_prometheus", "report_json", "report_prometheus"]
diff --git a/python/knot_resolver/manager/metrics/collect.py b/python/knot_resolver/manager/metrics/collect.py
new file mode 100644
index 00000000..cc9a0712
--- /dev/null
+++ b/python/knot_resolver/manager/metrics/collect.py
@@ -0,0 +1,38 @@
+import logging
+from typing import Dict, Optional
+
+from knot_resolver.controller.interface import KresID
+from knot_resolver.controller.registered_workers import command_registered_workers, get_registered_workers_kresids
+from knot_resolver.datamodel import KresConfig
+from knot_resolver.utils.modeling.parsing import DataFormat
+
+logger = logging.getLogger(__name__)
+
+
+async def collect_kresd_workers_metrics(config: KresConfig) -> Optional[Dict[KresID, object]]:
+ if config.monitoring.enabled == "manager-only":
+ logger.debug("Skipping kresd stat collection due to configuration")
+ return None
+
+ cmd = "collect_statistics()"
+ if config.monitoring.enabled == "lazy":
+ cmd = "collect_lazy_statistics()"
+ logger.debug(f"Collecting stats from all kresd workers using method '{cmd}'")
+
+ metrics_dict = await command_registered_workers(cmd)
+ return metrics_dict
+
+
+async def report_json(config: KresConfig) -> bytes:
+ metrics_raw = await collect_kresd_workers_metrics(config)
+ metrics_dict: Dict[str, Optional[object]] = {}
+
+ if metrics_raw:
+ for kresd_id, kresd_metrics in metrics_raw.items():
+ metrics_dict[str(kresd_id)] = kresd_metrics
+ else:
+ # if we have no metrics, return None for every kresd worker
+ for kresd_id in get_registered_workers_kresids():
+ metrics_dict[str(kresd_id)] = None
+
+ return DataFormat.JSON.dict_dump(metrics_dict).encode()
diff --git a/manager/knot_resolver_manager/statistics.py b/python/knot_resolver/manager/metrics/prometheus.py
index 4a0eb783..ba5f6334 100644
--- a/manager/knot_resolver_manager/statistics.py
+++ b/python/knot_resolver/manager/metrics/prometheus.py
@@ -1,36 +1,34 @@
import asyncio
import importlib
-import json
import logging
-from typing import TYPE_CHECKING, Any, Dict, Generator, List, Optional, Tuple
+from typing import Any, Dict, Generator, List, Optional, Tuple
-from knot_resolver_manager import compat
-from knot_resolver_manager.config_store import ConfigStore, only_on_real_changes_update
-from knot_resolver_manager.datamodel.config_schema import KresConfig
-from knot_resolver_manager.kresd_controller.registered_workers import (
- command_registered_workers,
- get_registered_workers_kresids,
-)
-from knot_resolver_manager.utils.functional import Result
-from knot_resolver_manager.utils.modeling.parsing import DataFormat
+from knot_resolver.controller.interface import KresID
+from knot_resolver.controller.registered_workers import get_registered_workers_kresids
+from knot_resolver.datamodel.config_schema import KresConfig
+from knot_resolver.manager.config_store import ConfigStore, only_on_real_changes_update
+from knot_resolver.utils import compat
+from knot_resolver.utils.functional import Result
-if TYPE_CHECKING:
- from knot_resolver_manager.kresd_controller.interface import KresID
+from .collect import collect_kresd_workers_metrics
-logger = logging.getLogger(__name__)
-
-
-_prometheus_support = False
+_prometheus_client = False
if importlib.util.find_spec("prometheus_client"):
- _prometheus_support = True
+ _prometheus_client = True
+logger = logging.getLogger(__name__)
+
+if _prometheus_client:
-if _prometheus_support:
from prometheus_client import exposition # type: ignore
from prometheus_client.bridge.graphite import GraphiteBridge # type: ignore
from prometheus_client.core import GaugeMetricFamily # type: ignore
from prometheus_client.core import REGISTRY, CounterMetricFamily, HistogramMetricFamily, Metric
+ _graphite_bridge: Optional[GraphiteBridge] = None
+
+ _metrics_collector: Optional["KresPrometheusMetricsCollector"] = None
+
def _counter(name: str, description: str, label: Tuple[str, str], value: float) -> CounterMetricFamily:
c = CounterMetricFamily(name, description, labels=(label[0],))
c.add_metric((label[1],), value) # type: ignore
@@ -120,6 +118,12 @@ if _prometheus_support:
value=metrics["answer"]["cached"],
)
yield _counter(
+ "resolver_answer_stale",
+ "number of queries that utilized stale data",
+ label=("instance_id", sid),
+ value=metrics["answer"]["stale"],
+ )
+ yield _counter(
"resolver_answer_rcode_noerror",
"number of NOERROR answers",
label=("instance_id", sid),
@@ -233,51 +237,12 @@ if _prometheus_support:
value=int(loaded),
)
- async def _deny_turning_off_graphite_bridge(old_config: KresConfig, new_config: KresConfig) -> Result[None, str]:
- if old_config.monitoring.graphite and not new_config.monitoring.graphite:
- return Result.err(
- "You can't turn off graphite monitoring dynamically. If you really want this feature, please let the developers know."
- )
-
- if (
- old_config.monitoring.graphite is not None
- and new_config.monitoring.graphite is not None
- and old_config.monitoring.graphite != new_config.monitoring.graphite
- ):
- return Result.err("Changing graphite exporter configuration in runtime is not allowed.")
-
- return Result.ok(None)
-
- _graphite_bridge: Optional[GraphiteBridge] = None
-
- @only_on_real_changes_update(lambda c: c.monitoring.graphite)
- async def _configure_graphite_bridge(config: KresConfig) -> None:
- """
- Starts graphite bridge if required
- """
- global _graphite_bridge
- if config.monitoring.graphite is not False and _graphite_bridge is None:
- logger.info(
- "Starting Graphite metrics exporter for [%s]:%d",
- str(config.monitoring.graphite.host),
- int(config.monitoring.graphite.port),
- )
- _graphite_bridge = GraphiteBridge(
- (str(config.monitoring.graphite.host), int(config.monitoring.graphite.port))
- )
- _graphite_bridge.start( # type: ignore
- interval=config.monitoring.graphite.interval.seconds(), prefix=str(config.monitoring.graphite.prefix)
- )
-
-
-class ResolverCollector:
- def __init__(self, config_store: ConfigStore) -> None:
- self._stats_raw: "Optional[Dict[KresID, object]]" = None
- self._config_store: ConfigStore = config_store
- self._collection_task: "Optional[asyncio.Task[None]]" = None
- self._skip_immediate_collection: bool = False
-
- if _prometheus_support:
+ class KresPrometheusMetricsCollector:
+ def __init__(self, config_store: ConfigStore) -> None:
+ self._stats_raw: "Optional[Dict[KresID, object]]" = None
+ self._config_store: ConfigStore = config_store
+ self._collection_task: "Optional[asyncio.Task[None]]" = None
+ self._skip_immediate_collection: bool = False
def collect(self) -> Generator[Metric, None, None]:
# schedule new stats collection
@@ -297,10 +262,6 @@ class ResolverCollector:
metrics = self._stats_raw[kresid]
yield from _parse_resolver_metrics(kresid, metrics)
success = True
- except json.JSONDecodeError:
- logger.warning(
- "Failed to load metrics from resolver instance %s: failed to parse statistics", str(kresid)
- )
except KeyError as e:
logger.warning(
"Failed to load metrics from resolver instance %s: attempted to read missing statistic %s",
@@ -314,115 +275,103 @@ class ResolverCollector:
# this function prevents the collector registry from invoking the collect function on startup
return []
- def report_json(self) -> str:
- # schedule new stats collection
- self._trigger_stats_collection()
-
- # if we have no data, return metrics with information about it and exit
- if self._stats_raw is None:
- no_stats_dict: Dict[str, None] = {}
- for kresid in get_registered_workers_kresids():
- no_stats_dict[str(kresid)] = None
- return DataFormat.JSON.dict_dump(no_stats_dict)
+ async def collect_kresd_stats(self, _triggered_from_prometheus_library: bool = False) -> None:
+ if self._skip_immediate_collection:
+ # this would happen because we are calling this function first manually before stat generation,
+ # and once again immediately afterwards caused by the prometheus library's stat collection
+ #
+ # this is a code made to solve problem with calling async functions from sync methods
+ self._skip_immediate_collection = False
+ return
- stats_dict: Dict[str, object] = {}
- for kresid, stats in self._stats_raw.items():
- stats_dict[str(kresid)] = stats
+ config = self._config_store.get()
+ self._stats_raw = await collect_kresd_workers_metrics(config)
- return DataFormat.JSON.dict_dump(stats_dict)
+ # if this function was not called by the prometheus library and calling collect() is imminent,
+ # we should block the next collection cycle as it would be useless
+ if not _triggered_from_prometheus_library:
+ self._skip_immediate_collection = True
- async def collect_kresd_stats(self, _triggered_from_prometheus_library: bool = False) -> None:
- if self._skip_immediate_collection:
- # this would happen because we are calling this function first manually before stat generation,
- # and once again immediately afterwards caused by the prometheus library's stat collection
+ def _trigger_stats_collection(self) -> None:
+ # we are running inside an event loop, but in a synchronous function and that sucks a lot
+ # it means that we shouldn't block the event loop by performing a blocking stats collection
+ # but it also means that we can't yield to the event loop as this function is synchronous
+ # therefore we can only start a new task, but we can't wait for it
+ # which causes the metrics to be delayed by one collection pass (not the best, but probably good enough)
#
- # this is a code made to solve problem with calling async functions from sync methods
- self._skip_immediate_collection = False
- return
-
- config = self._config_store.get()
-
- if config.monitoring.enabled == "manager-only":
- logger.debug("Skipping kresd stat collection due to configuration")
- self._stats_raw = None
- return
-
- lazy = config.monitoring.enabled == "lazy"
- cmd = "collect_lazy_statistics()" if lazy else "collect_statistics()"
- logger.debug("Collecting kresd stats with method '%s'", cmd)
- stats_raw = await command_registered_workers(cmd)
- self._stats_raw = stats_raw
-
- # if this function was not called by the prometheus library and calling collect() is imminent,
- # we should block the next collection cycle as it would be useless
- if not _triggered_from_prometheus_library:
- self._skip_immediate_collection = True
-
- def _trigger_stats_collection(self) -> None:
- # we are running inside an event loop, but in a synchronous function and that sucks a lot
- # it means that we shouldn't block the event loop by performing a blocking stats collection
- # but it also means that we can't yield to the event loop as this function is synchronous
- # therefore we can only start a new task, but we can't wait for it
- # which causes the metrics to be delayed by one collection pass (not the best, but probably good enough)
- #
- # this issue can be prevented by calling the `collect_kresd_stats()` function manually before entering
- # the Prometheus library. We just have to prevent the library from invoking it again. See the mentioned
- # function for details
-
- if compat.asyncio.is_event_loop_running():
- # when running, we can schedule the new data collection
- if self._collection_task is not None and not self._collection_task.done():
- logger.warning("Statistics collection task is still running. Skipping scheduling of a new one!")
- else:
- self._collection_task = compat.asyncio.create_task(
- self.collect_kresd_stats(_triggered_from_prometheus_library=True)
- )
-
- else:
- # when not running, we can start a new loop (we are not in the manager's main thread)
- compat.asyncio.run(self.collect_kresd_stats(_triggered_from_prometheus_library=True))
-
-
-_resolver_collector: Optional[ResolverCollector] = None
-
+ # this issue can be prevented by calling the `collect_kresd_stats()` function manually before entering
+ # the Prometheus library. We just have to prevent the library from invoking it again. See the mentioned
+ # function for details
+
+ if compat.asyncio.is_event_loop_running():
+ # when running, we can schedule the new data collection
+ if self._collection_task is not None and not self._collection_task.done():
+ logger.warning("Statistics collection task is still running. Skipping scheduling of a new one!")
+ else:
+ self._collection_task = compat.asyncio.create_task(
+ self.collect_kresd_stats(_triggered_from_prometheus_library=True)
+ )
-async def _collect_stats() -> None:
- # manually trigger stat collection so that we do not have to wait for it
- if _resolver_collector is not None:
- await _resolver_collector.collect_kresd_stats()
- else:
- raise RuntimeError("Function invoked before initializing the module!")
+ else:
+ # when not running, we can start a new loop (we are not in the manager's main thread)
+ compat.asyncio.run(self.collect_kresd_stats(_triggered_from_prometheus_library=True))
+ @only_on_real_changes_update(lambda c: c.monitoring.graphite)
+ async def _init_graphite_bridge(config: KresConfig) -> None:
+ """
+ Starts graphite bridge if required
+ """
+ global _graphite_bridge
+ if config.monitoring.graphite is not False and _graphite_bridge is None:
+ logger.info(
+ "Starting Graphite metrics exporter for [%s]:%d",
+ str(config.monitoring.graphite.host),
+ int(config.monitoring.graphite.port),
+ )
+ _graphite_bridge = GraphiteBridge(
+ (str(config.monitoring.graphite.host), int(config.monitoring.graphite.port))
+ )
+ _graphite_bridge.start( # type: ignore
+ interval=config.monitoring.graphite.interval.seconds(), prefix=str(config.monitoring.graphite.prefix)
+ )
-async def report_stats(prometheus_format: bool = False) -> Optional[bytes]:
- """
- Collects metrics from everything, returns data string in JSON (default) or Prometheus format.
- """
+ async def _deny_turning_off_graphite_bridge(old_config: KresConfig, new_config: KresConfig) -> Result[None, str]:
+ if old_config.monitoring.graphite and not new_config.monitoring.graphite:
+ return Result.err(
+ "You can't turn off graphite monitoring dynamically. If you really want this feature, please let the developers know."
+ )
- # manually trigger stat collection so that we do not have to wait for it
- if _resolver_collector is not None:
- await _resolver_collector.collect_kresd_stats()
- else:
- raise RuntimeError("Function invoked before initializing the module!")
+ if (
+ old_config.monitoring.graphite is not None
+ and new_config.monitoring.graphite is not None
+ and old_config.monitoring.graphite != new_config.monitoring.graphite
+ ):
+ return Result.err("Changing graphite exporter configuration in runtime is not allowed.")
- if prometheus_format:
- if _prometheus_support:
- return exposition.generate_latest() # type: ignore
- return None
- return _resolver_collector.report_json().encode()
+ return Result.ok(None)
-async def init_monitoring(config_store: ConfigStore) -> None:
+async def init_prometheus(config_store: ConfigStore) -> None:
"""
- Initialize monitoring. Must be called before any other function from this module.
+ Initialize metrics collection. Must be called before any other function from this module.
"""
- global _resolver_collector
- _resolver_collector = ResolverCollector(config_store)
-
- if _prometheus_support:
- # register metrics collector
- REGISTRY.register(_resolver_collector) # type: ignore
+ if _prometheus_client:
+ # init and register metrics collector
+ global _metrics_collector
+ _metrics_collector = KresPrometheusMetricsCollector(config_store)
+ REGISTRY.register(_metrics_collector) # type: ignore
# register graphite bridge
await config_store.register_verifier(_deny_turning_off_graphite_bridge)
- await config_store.register_on_change_callback(_configure_graphite_bridge)
+ await config_store.register_on_change_callback(_init_graphite_bridge)
+
+
+async def report_prometheus() -> Optional[bytes]:
+ if _prometheus_client:
+ # manually trigger stat collection so that we do not have to wait for it
+ if _metrics_collector is not None:
+ await _metrics_collector.collect_kresd_stats()
+ else:
+ raise RuntimeError("Function invoked before initializing the module!")
+ return exposition.generate_latest() # type: ignore
+ return None
diff --git a/manager/knot_resolver_manager/server.py b/python/knot_resolver/manager/server.py
index b27cadb3..972b167f 100644
--- a/manager/knot_resolver_manager/server.py
+++ b/python/knot_resolver/manager/server.py
@@ -9,42 +9,41 @@ from functools import partial
from http import HTTPStatus
from pathlib import Path
from time import time
-from typing import Any, Dict, List, Optional, Set, Union, cast
+from typing import Any, Dict, List, Literal, Optional, Set, Union, cast
from aiohttp import web
from aiohttp.web import middleware
from aiohttp.web_app import Application
from aiohttp.web_response import json_response
from aiohttp.web_runner import AppRunner, TCPSite, UnixSite
-from typing_extensions import Literal
-
-import knot_resolver_manager.utils.custom_atexit as atexit
-from knot_resolver_manager import log, statistics
-from knot_resolver_manager.compat import asyncio as asyncio_compat
-from knot_resolver_manager.config_store import ConfigStore
-from knot_resolver_manager.constants import DEFAULT_MANAGER_CONFIG_FILE, PID_FILE_NAME, init_user_constants
-from knot_resolver_manager.datamodel.cache_schema import CacheClearRPCSchema
-from knot_resolver_manager.datamodel.config_schema import KresConfig, get_rundir_without_validation
-from knot_resolver_manager.datamodel.globals import Context, set_global_validation_context
-from knot_resolver_manager.datamodel.management_schema import ManagementSchema
-from knot_resolver_manager.exceptions import CancelStartupExecInsteadException, KresManagerException
-from knot_resolver_manager.kresd_controller import get_best_controller_implementation
-from knot_resolver_manager.kresd_controller.registered_workers import command_single_registered_worker
-from knot_resolver_manager.utils import ignore_exceptions_optional
-from knot_resolver_manager.utils.async_utils import readfile
-from knot_resolver_manager.utils.etag import structural_etag
-from knot_resolver_manager.utils.functional import Result
-from knot_resolver_manager.utils.modeling.exceptions import (
- AggregateDataValidationError,
- DataParsingError,
- DataValidationError,
-)
-from knot_resolver_manager.utils.modeling.parsing import DataFormat, try_to_parse
-from knot_resolver_manager.utils.modeling.query import query
-from knot_resolver_manager.utils.modeling.types import NoneType
-from knot_resolver_manager.utils.systemd_notify import systemd_notify
-
-from .kres_manager import KresManager
+
+from knot_resolver.constants import CONFIG_FILE
+from knot_resolver.controller import get_best_controller_implementation
+from knot_resolver.controller.exceptions import SubprocessControllerExecException
+from knot_resolver.controller.registered_workers import command_single_registered_worker
+from knot_resolver.datamodel import kres_config_json_schema
+from knot_resolver.datamodel.cache_schema import CacheClearRPCSchema
+from knot_resolver.datamodel.config_schema import KresConfig, get_rundir_without_validation
+from knot_resolver.datamodel.globals import Context, set_global_validation_context
+from knot_resolver.datamodel.management_schema import ManagementSchema
+from knot_resolver.manager import metrics
+from knot_resolver.utils import custom_atexit as atexit
+from knot_resolver.utils import ignore_exceptions_optional
+from knot_resolver.utils.async_utils import readfile
+from knot_resolver.utils.compat import asyncio as asyncio_compat
+from knot_resolver.utils.etag import structural_etag
+from knot_resolver.utils.functional import Result
+from knot_resolver.utils.modeling.exceptions import AggregateDataValidationError, DataParsingError, DataValidationError
+from knot_resolver.utils.modeling.parsing import DataFormat, try_to_parse
+from knot_resolver.utils.modeling.query import query
+from knot_resolver.utils.modeling.types import NoneType
+from knot_resolver.utils.systemd_notify import systemd_notify
+
+from .config_store import ConfigStore
+from .constants import PID_FILE_NAME, init_user_constants
+from .exceptions import KresManagerException
+from .logging import logger_init
+from .manager import KresManager
logger = logging.getLogger(__name__)
@@ -107,7 +106,7 @@ class Server:
async def _deny_management_changes(self, config_old: KresConfig, config_new: KresConfig) -> Result[None, str]:
if config_old.management != config_new.management:
return Result.err(
- "/server/management: Changing management API address/unix-socket dynamically is not allowed as it's really dangerous."
+ "/server/management: Changing management API address/uTruenix-socket dynamically is not allowed as it's really dangerous."
" If you really need this feature, please contact the developers and explain why. Technically,"
" there are no problems in supporting it. We are only blocking the dynamic changes because"
" we think the consequences of leaving this footgun unprotected are worse than its usefulness."
@@ -241,15 +240,18 @@ class Server:
raise web.HTTPMovedPermanently("/metrics/json")
async def _handler_metrics_json(self, _request: web.Request) -> web.Response:
+
+ config = self.config_store.get()
+
return web.Response(
- body=await statistics.report_stats(),
+ body=await metrics.report_json(config),
content_type="application/json",
charset="utf8",
)
async def _handler_metrics_prometheus(self, _request: web.Request) -> web.Response:
- metrics_report = await statistics.report_stats(prometheus_format=True)
+ metrics_report = await metrics.report_prometheus()
if not metrics_report:
raise web.HTTPNotFound()
@@ -281,7 +283,7 @@ class Server:
async def _handler_schema(self, _request: web.Request) -> web.Response:
return web.json_response(
- KresConfig.json_schema(), headers={"Access-Control-Allow-Origin": "*"}, dumps=partial(json.dumps, indent=4)
+ kres_config_json_schema(), headers={"Access-Control-Allow-Origin": "*"}, dumps=partial(json.dumps, indent=4)
)
async def _handle_view_schema(self, _request: web.Request) -> web.Response:
@@ -504,7 +506,7 @@ async def _sigterm_while_shutting_down():
sys.exit(128 + signal.SIGTERM)
-async def start_server(config: Path = DEFAULT_MANAGER_CONFIG_FILE) -> int:
+async def start_server(config: Path = CONFIG_FILE) -> int:
# This function is quite long, but it describes how manager runs. So let's silence pylint
# pylint: disable=too-many-statements
@@ -553,11 +555,11 @@ async def start_server(config: Path = DEFAULT_MANAGER_CONFIG_FILE) -> int:
# Up to this point, we have been logging to memory buffer. But now, when we have the configuration loaded, we
# can flush the buffer into the proper place
- await log.logger_init(config_store)
+ await logger_init(config_store)
# With configuration on hand, we can initialize monitoring. We want to do this before any subprocesses are
# started, therefore before initializing manager
- await statistics.init_monitoring(config_store)
+ await metrics.init_prometheus(config_store)
# prepare instance of the server (no side effects)
server = Server(config_store, config)
@@ -565,7 +567,7 @@ async def start_server(config: Path = DEFAULT_MANAGER_CONFIG_FILE) -> int:
# After we have loaded the configuration, we can start worring about subprocess management.
manager = await _init_manager(config_store, server)
- except CancelStartupExecInsteadException as e:
+ except SubprocessControllerExecException as e:
# if we caught this exception, some component wants to perform a reexec during startup. Most likely, it would
# be a subprocess manager like supervisord, which wants to make sure the manager runs under supervisord in
# the process tree. So now we stop everything, and exec what we are told to. We are assuming, that the thing
diff --git a/python/knot_resolver/meson.build b/python/knot_resolver/meson.build
new file mode 100644
index 00000000..38794daa
--- /dev/null
+++ b/python/knot_resolver/meson.build
@@ -0,0 +1,17 @@
+# python
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+constants_config = configuration_data()
+constants_config.set('version', meson.project_version())
+constants_config.set('user', user)
+constants_config.set('group', group)
+constants_config.set('run_dir', run_dir)
+constants_config.set('etc_dir', etc_dir)
+constants_config.set('sbin_dir', sbin_dir)
+constants_config.set('cache_dir', systemd_cache_dir)
+
+configure_file(
+ input: 'constants.py.in',
+ output: 'constants.py',
+ configuration: constants_config,
+)
diff --git a/manager/knot_resolver_manager/utils/__init__.py b/python/knot_resolver/utils/__init__.py
index edc36fca..edc36fca 100644
--- a/manager/knot_resolver_manager/utils/__init__.py
+++ b/python/knot_resolver/utils/__init__.py
diff --git a/manager/knot_resolver_manager/utils/async_utils.py b/python/knot_resolver/utils/async_utils.py
index 1cd7303e..c530c68b 100644
--- a/manager/knot_resolver_manager/utils/async_utils.py
+++ b/python/knot_resolver/utils/async_utils.py
@@ -9,7 +9,7 @@ from pathlib import PurePath
from threading import Thread
from typing import Any, Dict, Generic, List, Optional, TypeVar, Union
-from knot_resolver_manager.compat.asyncio import to_thread
+from knot_resolver.utils.compat.asyncio import to_thread
def unblock_signals():
diff --git a/python/knot_resolver/utils/compat/__init__.py b/python/knot_resolver/utils/compat/__init__.py
new file mode 100644
index 00000000..53993f6c
--- /dev/null
+++ b/python/knot_resolver/utils/compat/__init__.py
@@ -0,0 +1,3 @@
+from . import asyncio
+
+__all__ = ["asyncio"]
diff --git a/manager/knot_resolver_manager/compat/asyncio.py b/python/knot_resolver/utils/compat/asyncio.py
index 9e10e6c6..9e10e6c6 100644
--- a/manager/knot_resolver_manager/compat/asyncio.py
+++ b/python/knot_resolver/utils/compat/asyncio.py
diff --git a/manager/knot_resolver_manager/utils/custom_atexit.py b/python/knot_resolver/utils/custom_atexit.py
index 2fe55433..2fe55433 100644
--- a/manager/knot_resolver_manager/utils/custom_atexit.py
+++ b/python/knot_resolver/utils/custom_atexit.py
diff --git a/manager/knot_resolver_manager/utils/etag.py b/python/knot_resolver/utils/etag.py
index bb80700b..bb80700b 100644
--- a/manager/knot_resolver_manager/utils/etag.py
+++ b/python/knot_resolver/utils/etag.py
diff --git a/manager/knot_resolver_manager/utils/functional.py b/python/knot_resolver/utils/functional.py
index 43abd705..43abd705 100644
--- a/manager/knot_resolver_manager/utils/functional.py
+++ b/python/knot_resolver/utils/functional.py
diff --git a/manager/knot_resolver_manager/utils/modeling/README.md b/python/knot_resolver/utils/modeling/README.md
index 97c68b54..97c68b54 100644
--- a/manager/knot_resolver_manager/utils/modeling/README.md
+++ b/python/knot_resolver/utils/modeling/README.md
diff --git a/manager/knot_resolver_manager/utils/modeling/__init__.py b/python/knot_resolver/utils/modeling/__init__.py
index d16f6c12..d16f6c12 100644
--- a/manager/knot_resolver_manager/utils/modeling/__init__.py
+++ b/python/knot_resolver/utils/modeling/__init__.py
diff --git a/manager/knot_resolver_manager/utils/modeling/base_generic_type_wrapper.py b/python/knot_resolver/utils/modeling/base_generic_type_wrapper.py
index 1f2c1767..1f2c1767 100644
--- a/manager/knot_resolver_manager/utils/modeling/base_generic_type_wrapper.py
+++ b/python/knot_resolver/utils/modeling/base_generic_type_wrapper.py
diff --git a/manager/knot_resolver_manager/utils/modeling/base_schema.py b/python/knot_resolver/utils/modeling/base_schema.py
index 78fe187a..13539fe0 100644
--- a/manager/knot_resolver_manager/utils/modeling/base_schema.py
+++ b/python/knot_resolver/utils/modeling/base_schema.py
@@ -5,7 +5,7 @@ from typing import Any, Callable, Dict, Generic, List, Optional, Set, Tuple, Typ
import yaml
-from knot_resolver_manager.utils.functional import all_matches
+from knot_resolver.utils.functional import all_matches
from .base_generic_type_wrapper import BaseGenericTypeWrapper
from .base_value_type import BaseValueType
@@ -474,8 +474,9 @@ class ObjectMapper:
return obj
# when the specified type is Any, just return the given value
- # (pylint does something weird on the following line and it happens only on python 3.10)
- elif tp == Any: # pylint: disable=comparison-with-callable
+ # on mypy version 1.11.0 comparison-overlap error started popping up
+ # https://github.com/python/mypy/issues/17665
+ elif tp == Any: # type: ignore[comparison-overlap]
return obj
# BaseValueType subclasses
@@ -753,14 +754,31 @@ class BaseSchema(Serializable):
return True
@classmethod
- def json_schema(cls: Type["BaseSchema"], include_schema_definition: bool = True) -> Dict[Any, Any]:
+ def json_schema(
+ cls: Type["BaseSchema"],
+ schema_id: Optional[str] = None,
+ title: Optional[str] = None,
+ description: Optional[str] = None,
+ include_schema_definition: bool = True,
+ ) -> Dict[Any, Any]:
if cls._LAYER is not None:
- return cls._LAYER.json_schema(include_schema_definition=include_schema_definition)
+ return cls._LAYER.json_schema(
+ schema_id=schema_id,
+ title=title,
+ description=description,
+ include_schema_definition=include_schema_definition,
+ )
schema: Dict[Any, Any] = {}
if include_schema_definition:
schema["$schema"] = "https://json-schema.org/draft/2020-12/schema"
- if cls.__doc__ is not None:
+ if schema_id is not None:
+ schema["$id"] = schema_id
+ if title is not None:
+ schema["title"] = title
+ if description is not None:
+ schema["description"] = description
+ elif cls.__doc__ is not None:
schema["description"] = _split_docstring(cls.__doc__)[0]
schema["type"] = "object"
schema["properties"] = _get_properties_schema(cls)
diff --git a/manager/knot_resolver_manager/utils/modeling/base_value_type.py b/python/knot_resolver/utils/modeling/base_value_type.py
index dff4a3fe..dff4a3fe 100644
--- a/manager/knot_resolver_manager/utils/modeling/base_value_type.py
+++ b/python/knot_resolver/utils/modeling/base_value_type.py
diff --git a/manager/knot_resolver_manager/utils/modeling/exceptions.py b/python/knot_resolver/utils/modeling/exceptions.py
index c2a28817..478f5488 100644
--- a/manager/knot_resolver_manager/utils/modeling/exceptions.py
+++ b/python/knot_resolver/utils/modeling/exceptions.py
@@ -1,9 +1,9 @@
from typing import Iterable, List
-from knot_resolver_manager.exceptions import KresManagerException
+from knot_resolver import KresBaseException
-class DataModelingBaseException(KresManagerException):
+class DataModelingBaseException(KresBaseException):
"""
Base class for all exceptions used in modelling.
"""
diff --git a/manager/knot_resolver_manager/utils/modeling/json_pointer.py b/python/knot_resolver/utils/modeling/json_pointer.py
index a60ba5d1..a60ba5d1 100644
--- a/manager/knot_resolver_manager/utils/modeling/json_pointer.py
+++ b/python/knot_resolver/utils/modeling/json_pointer.py
diff --git a/manager/knot_resolver_manager/utils/modeling/parsing.py b/python/knot_resolver/utils/modeling/parsing.py
index 185a53a1..185a53a1 100644
--- a/manager/knot_resolver_manager/utils/modeling/parsing.py
+++ b/python/knot_resolver/utils/modeling/parsing.py
diff --git a/manager/knot_resolver_manager/utils/modeling/query.py b/python/knot_resolver/utils/modeling/query.py
index cfea82f6..2e378609 100644
--- a/manager/knot_resolver_manager/utils/modeling/query.py
+++ b/python/knot_resolver/utils/modeling/query.py
@@ -1,11 +1,9 @@
import copy
from abc import ABC, abstractmethod # pylint: disable=[no-name-in-module]
-from typing import Any, List, Optional, Tuple, Union
+from typing import Any, List, Literal, Optional, Tuple, Union
-from typing_extensions import Literal
-
-from knot_resolver_manager.utils.modeling.base_schema import BaseSchema, map_object
-from knot_resolver_manager.utils.modeling.json_pointer import json_ptr_resolve
+from knot_resolver.utils.modeling.base_schema import BaseSchema, map_object
+from knot_resolver.utils.modeling.json_pointer import json_ptr_resolve
class PatchError(Exception):
diff --git a/manager/knot_resolver_manager/utils/modeling/renaming.py b/python/knot_resolver/utils/modeling/renaming.py
index 2420ed04..2420ed04 100644
--- a/manager/knot_resolver_manager/utils/modeling/renaming.py
+++ b/python/knot_resolver/utils/modeling/renaming.py
diff --git a/manager/knot_resolver_manager/utils/modeling/types.py b/python/knot_resolver/utils/modeling/types.py
index 4ce9aecc..c7452672 100644
--- a/manager/knot_resolver_manager/utils/modeling/types.py
+++ b/python/knot_resolver/utils/modeling/types.py
@@ -4,9 +4,7 @@
import enum
import inspect
import sys
-from typing import Any, Dict, List, Optional, Tuple, Type, TypeVar, Union
-
-from typing_extensions import Literal
+from typing import Any, Dict, List, Literal, Optional, Tuple, Type, TypeVar, Union
from .base_generic_type_wrapper import BaseGenericTypeWrapper
diff --git a/manager/knot_resolver_manager/utils/requests.py b/python/knot_resolver/utils/requests.py
index e52e54a3..72d54e13 100644
--- a/manager/knot_resolver_manager/utils/requests.py
+++ b/python/knot_resolver/utils/requests.py
@@ -2,13 +2,11 @@ import errno
import socket
import sys
from http.client import HTTPConnection
-from typing import Any, Optional, Union
+from typing import Any, Literal, Optional, Union
from urllib.error import HTTPError, URLError
from urllib.parse import quote, unquote, urlparse
from urllib.request import AbstractHTTPHandler, Request, build_opener, install_opener, urlopen
-from typing_extensions import Literal
-
class SocketDesc:
def __init__(self, socket_def: str, source: str):
diff --git a/manager/knot_resolver_manager/utils/systemd_notify.py b/python/knot_resolver/utils/systemd_notify.py
index 44e8dee1..44e8dee1 100644
--- a/manager/knot_resolver_manager/utils/systemd_notify.py
+++ b/python/knot_resolver/utils/systemd_notify.py
diff --git a/manager/knot_resolver_manager/utils/which.py b/python/knot_resolver/utils/which.py
index 450102f3..450102f3 100644
--- a/manager/knot_resolver_manager/utils/which.py
+++ b/python/knot_resolver/utils/which.py
diff --git a/python/meson.build b/python/meson.build
deleted file mode 100644
index e209df54..00000000
--- a/python/meson.build
+++ /dev/null
@@ -1,23 +0,0 @@
-# python
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-python_config = configuration_data()
-python_config.set('kres_version', meson.project_version())
-python_config.set('sbin_dir', sbin_dir)
-python_config.set('bin_dir', bin_dir)
-python_config.set('etc_dir', etc_dir)
-python_config.set('run_dir', run_dir)
-python_config.set('lib_dir', lib_dir)
-python_config.set('modules_dir', modules_dir)
-
-configure_file(
- input: 'knot_resolver.py.in',
- output: 'knot_resolver.py',
- configuration: python_config,
-)
-
-configure_file(
- input: 'setup.py.in',
- output: 'setup.py',
- configuration: python_config,
-)
diff --git a/python/setup.py.in b/python/setup.py.in
deleted file mode 100644
index 07b71454..00000000
--- a/python/setup.py.in
+++ /dev/null
@@ -1,12 +0,0 @@
-from setuptools import setup
-
-# TODO: Migrate this to a pyproject.toml once Debian 11 support is dropped.
-setup(
- name="knot_resolver",
- version="@kres_version@",
- description="Knot Resolver helper data for Python",
- author="Oto Šťáva",
- author_email="oto.stava@nic.cz",
- python_requires=">=3.8,<4.0",
- py_modules=["knot_resolver"],
-)
diff --git a/scripts/README.md b/scripts/README.md
new file mode 100644
index 00000000..dcfe8c68
--- /dev/null
+++ b/scripts/README.md
@@ -0,0 +1,18 @@
+# Knot Resolver scripts
+
+These are auxillary scripts used for Knot Resolver development.
+
+The scripts in the root of this directory are meant to be executed directly by
+developers. Some may also be run by automated tools.
+
+There are also the following subdirectories. The scripts in these are *only
+ever* meant to be run by automated tools:
+
+- `ci`: specific to the CI/CD pipeline
+- `lib`: (potentially) generally useful scripts to be called by other scripts
+- `meson`: specific to the build system
+- `poe-tasks`: run by the `poe` script in the repository root
+ - `utils`: scripts additionally called by the `poe` tasks
+
+For more information about each script, see its content for explanatory
+comments.
diff --git a/scripts/bugreport-journals.py b/scripts/bugreport-journals.py
index d66ddfba..bb4d9f24 100755
--- a/scripts/bugreport-journals.py
+++ b/scripts/bugreport-journals.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python3
+#!/usr/bin/env python3
"""
Collect systemd-journal log entries around time of daemon exit and coredumps.
"""
diff --git a/scripts/build-in-obs.sh b/scripts/ci/build-in-obs.sh
index 3256ddee..eaa54671 100755
--- a/scripts/build-in-obs.sh
+++ b/scripts/ci/build-in-obs.sh
@@ -1,11 +1,11 @@
-#!/bin/bash
+#!/usr/bin/env bash
# SPDX-License-Identifier: GPL-3.0-or-later
#
# Push packaging files to OBS
#
# Example usage:
-# 1. ./scripts/make-obs.sh
-# 2. ./scripts/build-in-obs.sh knot-resolver-latest
+# 1. ./scripts/ci/make-obs.sh
+# 2. ./scripts/ci/build-in-obs.sh knot-resolver-latest
set -o errexit -o nounset -o xtrace
pkgdir='pkg/obs'
diff --git a/scripts/enable-repo-cznic-labs.sh b/scripts/ci/enable-repo-cznic-labs.sh
index cbc64c68..e7c53a82 100755
--- a/scripts/enable-repo-cznic-labs.sh
+++ b/scripts/ci/enable-repo-cznic-labs.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
# enable CZ.NIC Labs Debian/Ubuntu repos - see https://pkg.labs.nic.cz/doc/
set -e
diff --git a/scripts/make-obs.sh b/scripts/ci/make-obs.sh
index abe96701..4d0666cb 100755
--- a/scripts/make-obs.sh
+++ b/scripts/ci/make-obs.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
# SPDX-License-Identifier: GPL-3.0-or-later
#
# create OpenSUSE Build System (OBS) source package
@@ -9,7 +9,7 @@
# * dpkg-buildpackage
#
# usage:
-# ./scripts/make-obs.sh [path.to.archive.xz] [1]
+# ./scripts/ci/make-obs.sh [path.to.archive.xz] [1]
#
# supply archives as optional arguments to build from,
# otherwise archive will be built from sources by apkg
@@ -18,7 +18,7 @@
# output at pkg/obs/ (removed on each run)
set -o errexit -o nounset
-pushd "$(dirname ${0})/.."
+pushd "$(dirname ${0})/../.."
OUTDIR="pkg/obs"
APKG_OPTS="-O $OUTDIR"
diff --git a/scripts/coverage_c_combine.sh b/scripts/coverage_c_combine.sh
deleted file mode 100755
index a891ded5..00000000
--- a/scripts/coverage_c_combine.sh
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/bin/bash
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# $1 = top source directory
-# $2 = coverage data directory path
-# $3 = output directory for *.info files
-
-set -o errexit -o nounset
-shopt -s nullglob
-IFS=$'\n'
-
-TOPSRCDIR="$1"
-DATAROOT="$2"
-OUTDIR="$3"
-
-cd "${TOPSRCDIR}"
-for COVNAME in $(find "${DATAROOT}" -name .topdir_kresd_coverage)
-do
- find "${DATAROOT}" -name '*.gcda' -not -path "${DATAROOT}/*" -delete
- COVDIR="$(dirname "${COVNAME}")"
- COVDATA_FILENAMES=("${COVDIR}"/*) # filenames in BASH array
- (( ${#COVDATA_FILENAMES[*]} )) || continue # skip empty dirs
-
- cp -r -t ${TOPSRCDIR} "${COVDIR}"/*
- ${LCOV} -q --no-external --capture -d lib -d daemon -d modules -o "$(mktemp -p "${OUTDIR}" -t XXXXXXXX.c.info)" > /dev/null
-done
diff --git a/scripts/coverage_env.sh b/scripts/coverage_env.sh
deleted file mode 100755
index 0f6810f7..00000000
--- a/scripts/coverage_env.sh
+++ /dev/null
@@ -1,42 +0,0 @@
-#!/bin/bash
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# generate variables for coverage testing
-# $1 = top source directory
-# $2 = coverage data directory path
-# $3 = name of test/new subdirectory name
-# $4 = [optional] --export to generate export commands
-
-set -o errexit -o nounset
-shopt -s nullglob
-
-test -z "${COVERAGE:-}" && exit 0 # not enabled, do nothing
-test ! -z "${V:-}" && set -o xtrace # verbose mode
-
-EXPORT=""
-test "${4:-}" == "--export" && EXPORT="export "
-TOPSRCDIR="$1"
-DATAROOT="$2"
-OUTPATH="$2/$3"
-
-# check that output directory is empty
-# beware: Makefile will always call coverage_env.sh for all targets
-# so directories get created but not populated
-# i.e. test -d is not sufficient check
-OUTPATH_FILENAMES=("${OUTPATH}"/*) # filenames in BASH array
-(( ${#OUTPATH_FILENAMES[*]} )) && echo "false" && >&2 echo "fatal: output directory ${OUTPATH} must be empty (or non-existent)" && exit 1
-
-mkdir -p "${OUTPATH}"
-# convert paths to absolute
-pushd "${OUTPATH}" &> /dev/null
-touch .topdir_kresd_coverage
-OUTPATH="$(pwd -P)"
-popd &> /dev/null
-
-# determine GCOV_PREFIX_STRIP value for current source directory
-TOPSRCDIR_SLASHES="${TOPSRCDIR//[^\/]/}" # remove everything except /
-GCOV_PREFIX_STRIP="${#TOPSRCDIR_SLASHES}" # number of / == number of components
-
-KRESD_COVERAGE_STATS="${OUTPATH}/luacov.stats.out"
-GCOV_PREFIX="${OUTPATH}"
-echo "${EXPORT}KRESD_COVERAGE_STATS=\"${KRESD_COVERAGE_STATS}\" ${EXPORT}GCOV_PREFIX=\"${GCOV_PREFIX}\" ${EXPORT}GCOV_PREFIX_STRIP=\"${GCOV_PREFIX_STRIP}\""
diff --git a/scripts/gen-pgp-keyblock.sh b/scripts/gen-pgp-keyblock.sh
index 29855312..bfdb2349 100755
--- a/scripts/gen-pgp-keyblock.sh
+++ b/scripts/gen-pgp-keyblock.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
# Script to create/update Knot Resolver PGP keyring
set -o errexit -o nounset
diff --git a/scripts/get-date.sh b/scripts/lib/get-date.sh
index 36531558..1480f984 100755
--- a/scripts/get-date.sh
+++ b/scripts/lib/get-date.sh
@@ -1,7 +1,7 @@
-#!/bin/bash
+#!/usr/bin/env bash
# SPDX-License-Identifier: GPL-3.0-or-later
set -o nounset
-cd "$(dirname $0)/.."
+cd "$(dirname $0)/../.."
# Get date from NEWS if possible (regular release)
DATE=$(head -n1 < NEWS | sed 's/.*(\(.*\)).*/\1/' | grep -E '^[0-9]{4}-[0-9]{2}-[0-9]{2}$$')
diff --git a/scripts/upstream-version.sh b/scripts/lib/upstream-version.sh
index 77613b70..4efa42db 100755
--- a/scripts/upstream-version.sh
+++ b/scripts/lib/upstream-version.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
# SPDX-License-Identifier: GPL-3.0-or-later
#
# return latest upstream version of Knot Resolver
diff --git a/scripts/luacov_gen_empty.sh b/scripts/luacov_gen_empty.sh
deleted file mode 100755
index 127734df..00000000
--- a/scripts/luacov_gen_empty.sh
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/bin/bash
-# SPDX-License-Identifier: GPL-3.0-or-later
-# Generate stats file in luacov format indicating that files named on stdin
-# were not processed.
-#
-# Normally luacov does not know about files which were not loaded so
-# without this manual addition the files are missing in coverage report.
-
-# Usage:
-# $ luacov_gen_empty.sh < list_of_lua_files > luacov.empty_stats.out
-
-set -o errexit -o nounset
-IFS=$'\n'
-
-while read FILENAME
-do
- echo -e "0:${FILENAME}\n "
-done
diff --git a/scripts/luacov_to_info.lua b/scripts/luacov_to_info.lua
deleted file mode 100755
index b27ba999..00000000
--- a/scripts/luacov_to_info.lua
+++ /dev/null
@@ -1,57 +0,0 @@
-#!/usr/bin/env luajit
--- SPDX-License-Identifier: GPL-3.0-or-later
-
-local luacov = require('luacov')
-local ReporterBase = require('luacov.reporter').ReporterBase
-local LcovReporter = setmetatable({}, ReporterBase)
-LcovReporter.__index = LcovReporter
-
-function LcovReporter:on_new_file(filename)
- self.finfo = self.current_files[filename] or {name=filename, coverage={}}
-end
-
-function LcovReporter:on_mis_line(_, lineno, _)
- self.finfo.coverage[lineno] = self.finfo.coverage[lineno] or 0
-end
-
-function LcovReporter:on_hit_line(_, lineno, _, hits)
- self.finfo.coverage[lineno] = (self.finfo.coverage[lineno] or 0) + hits
-end
-
-function LcovReporter:on_end_file()
- self.current_files[self.finfo.name] = self.finfo
- self.finfo = nil
-end
-
--- Write out results in lcov format
-local function write_lcov_info(files)
- for fname, finfo in pairs(files) do
- local instrumented, nonzero = 0, 0
- print('TN:')
- print(string.format('SF:%s', fname))
- for i, hits in pairs(finfo.coverage) do
- print(string.format('DA:%d,%d', i, hits))
- instrumented = instrumented + 1
- if hits > 0 then
- nonzero = nonzero + 1
- end
- end
- print(string.format('LH:%d', nonzero))
- print(string.format('LF:%d', instrumented))
- print('end_of_record')
- end
-end
-
--- Accumulate total coverage
-local all_files = {}
-for _, fname in ipairs(arg) do
- local conf = luacov.load_config()
- conf.statsfile = fname
- local reporter = assert(LcovReporter:new(conf))
- reporter.current_files = all_files
- reporter:run()
- reporter:close()
-end
-
--- Write results
-write_lcov_info(all_files)
diff --git a/scripts/make-archive.sh b/scripts/make-archive.sh
index 9f2be0ea..b4cd3367 100755
--- a/scripts/make-archive.sh
+++ b/scripts/make-archive.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
# SPDX-License-Identifier: GPL-3.0-or-later
# Create a development tarball
set -o errexit -o nounset -o xtrace
diff --git a/scripts/make-doc.sh b/scripts/make-doc.sh
deleted file mode 100755
index 1723ada7..00000000
--- a/scripts/make-doc.sh
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/bin/bash
-# SPDX-License-Identifier: GPL-3.0-or-later
-set -o errexit -o nounset
-cd "$(dirname "${0}")/.."
-
-# generate JSON schema for the manager's declarative config
-pushd manager
-## the following python command should hopefully run without any dependencies except for standard python
-mkdir -p ../doc/_static/
-python3 -m knot_resolver_manager.cli schema > ../doc/_static/config.schema.json
-generate-schema-doc --config expand_buttons=true ../doc/_static/config.schema.json ../doc/_static/schema_doc.html
-popd
-
-# generating the user documentation
-SPHINX=$(type -P sphinx-build-3 sphinx-build | head -n1)
-rm -rf doc/html
-"$SPHINX" "$@" -b html -d doc/user/.doctrees doc/user doc/html
-
-pushd doc/dev
-doxygen
-popd
-
-# generating the developer documentation
-rm -rf doc/html/dev
-"$SPHINX" "$@" -b html -d doc/dev/.doctrees doc/dev doc/html/dev
diff --git a/scripts/map_install_src.lua b/scripts/map_install_src.lua
deleted file mode 100755
index ffc9a300..00000000
--- a/scripts/map_install_src.lua
+++ /dev/null
@@ -1,168 +0,0 @@
-#!/usr/bin/env luajit
--- SPDX-License-Identifier: GPL-3.0-or-later
-
--- parse install commands from stdin
--- input: PREFIX=... make install --dry-run --always-make
--- output: <install path> <source path>
--- (or sed commands if --sed was specified)
-
-output = 'list'
-if #arg > 1 or arg[1] == '-h' or arg[1] == '--help' then
- print(string.format([[
-Read install commands and map install paths to paths in source directory.
-
-Usage:
-$ PREFIX=... make install --dry-run --always-make | %s
-
-Example output:
-/kresd/git/.local/lib/kdns_modules/policy.lua modules/policy/policy.lua
-
-Option --sed will produce output suitable as input suitable for sed.]],
- arg[0]))
- os.exit(1)
-elseif #arg == 0 then
- output = 'list'
-elseif arg[1] == '--sed' then
- output = 'sed'
-else
- print('Invalid arguments. See --help.')
- os.exit(2)
-end
-
--- remove double // from paths and remove trailing /
-function normalize_path(path)
- assert(path)
- repeat
- path, changes = path:gsub('//', '/')
- until changes == 0
- return path:gsub('/$', '')
-end
-
-function is_opt(word)
- return word:match('^-')
-end
-
--- opts requiring additional argument to be skipped
-local ignored_opts_with_arg = {
- ['--backup'] = true,
- ['-g'] = true,
- ['--group'] = true,
- ['-m'] = true,
- ['--mode'] = true,
- ['-o'] = true,
- ['--owner'] = true,
- ['--strip-program'] = true,
- ['--suffix'] = true,
-}
-
--- state machine junctions caused by --opts
--- returns: new state (expect, mode) and target name if any
-function parse_opts(word, expect, mode)
- if word == '--' then
- return 'names', mode, nil -- no options anymore
- elseif word == '-d' or word == '--directory' then
- return 'opt_or_name', 'newdir', nil
- elseif word == '-t' or word == '--target-directory' then
- return 'targetdir', mode, nil
- elseif word:match('^--target-directory=') then
- return 'opt_or_name', mode, string.sub(word, 20)
- elseif ignored_opts_with_arg[word] then
- return 'ignore', mode, nil -- ignore next word
- else
- return expect, mode, nil -- unhandled opt
- end
-end
-
-
--- cmd: complete install command line: install -m 0644 -t dest src1 src2
--- dirs: names known to be directories: name => true
--- returns: updated dirs
-function process_cmd(cmd, dirs)
- -- print('# ' .. cmd)
- sanity_check(cmd)
- local expect = 'install'
- local mode = 'copy' -- copy or newdir
- local target -- last argument or argument for install -t
- local names = {} -- non-option arguments
-
- for word in cmd:gmatch('%S+') do
- if expect == 'install' then -- parsing 'install'
- assert(word == 'install')
- expect = 'opt_or_name'
- elseif expect == 'opt_or_name' then
- if is_opt(word) then
- expect, mode, newtarget = parse_opts(word, expect, mode)
- target = newtarget or target
- else
- if mode == 'copy' then
- table.insert(names, word)
- elseif mode == 'newdir' then
- local path = normalize_path(word)
- dirs[path] = true
- else
- assert(false, 'bad mode')
- end
- end
- elseif expect == 'targetdir' then
- local path = normalize_path(word)
- dirs[path] = true
- target = word
- expect = 'opt_or_name'
- elseif expect == 'names' then
- table.insert(names, word)
- elseif expect == 'ignore' then
- expect = 'opt_or_name'
- else
- assert(false, 'bad expect')
- end
- end
- if mode == 'newdir' then
- -- no mapping to print, this cmd just created directory
- return dirs
- end
-
- if not target then -- last argument is the target
- target = table.remove(names)
- end
- assert(target, 'fatal: no target in install cmd')
- target = normalize_path(target)
-
- for _, name in pairs(names) do
- basename = string.gsub(name, "(.*/)(.*)", "%2")
- if not dirs[target] then
- print('fatal: target directory "' .. target .. '" was not created yet!')
- os.exit(2)
- end
- -- mapping installed name -> source name
- if output == 'list' then
- print(target .. '/' .. basename, name)
- elseif output == 'sed' then
- print(string.format([[s`%s`%s`g]],
- target .. '/' .. basename, name))
- else
- assert(false, 'unsupported output')
- end
- end
- return dirs
-end
-
-function sanity_check(cmd)
- -- shell quotation is not supported
- assert(not cmd:match('"'), 'quotes " are not supported')
- assert(not cmd:match("'"), "quotes ' are not supported")
- assert(not cmd:match('\\'), "escapes like \\ are not supported")
- assert(cmd:match('^install%s'), 'not an install command')
-end
-
--- remember directories created by install -d so we can expand relative paths
-local dirs = {}
-while true do
- local cmd = io.read("*line")
- if not cmd then
- break
- end
- local isinstall = cmd:match('^install%s')
- if isinstall then
- dirs = process_cmd(cmd, dirs)
- end
-end
diff --git a/scripts/bench.sh b/scripts/meson/bench.sh
index 232c5231..d5a9f38e 100755
--- a/scripts/bench.sh
+++ b/scripts/meson/bench.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
# SPDX-License-Identifier: GPL-3.0-or-later
set -o errexit -o nounset
diff --git a/scripts/gen-cdefs.sh b/scripts/meson/gen-cdefs.sh
index d56ab86d..968f40b9 100755
--- a/scripts/gen-cdefs.sh
+++ b/scripts/meson/gen-cdefs.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
# SPDX-License-Identifier: GPL-3.0-or-later
set -o pipefail -o errexit
diff --git a/scripts/meson/make-doc.sh b/scripts/meson/make-doc.sh
new file mode 100755
index 00000000..1bea3df9
--- /dev/null
+++ b/scripts/meson/make-doc.sh
@@ -0,0 +1,20 @@
+#!/usr/bin/env bash
+# SPDX-License-Identifier: GPL-3.0-or-later
+set -o errexit -o nounset
+cd "$(dirname "${0}")/../.."
+
+# convert JSON schema to html
+generate-schema-doc --config expand_buttons=true doc/_static/config.schema.json doc/_static/schema_doc.html
+
+# generating the user documentation
+SPHINX=$(type -P sphinx-build-3 sphinx-build | head -n1)
+rm -rf doc/html
+"$SPHINX" "$@" -b html -d doc/user/.doctrees doc/user doc/html
+
+pushd doc/dev
+doxygen
+popd
+
+# generating the developer documentation
+rm -rf doc/html/dev
+"$SPHINX" "$@" -b html -d doc/dev/.doctrees doc/dev doc/html/dev
diff --git a/scripts/run-pylint.sh b/scripts/meson/run-pylint.sh
index 92413826..ac23f5c4 100755
--- a/scripts/run-pylint.sh
+++ b/scripts/meson/run-pylint.sh
@@ -1,8 +1,8 @@
-#!/bin/bash
+#!/usr/bin/env bash
# SPDX-License-Identifier: GPL-3.0-or-later
set -o errexit -o nounset
-cd "$(dirname ${0})/.."
+cd "$(dirname ${0})/../.."
# Find Python modules and standalone Python scripts
FILES=$(find ./tests/pytests \
diff --git a/scripts/test-config.sh b/scripts/meson/test-config.sh
index 695e5182..2b55066d 100755
--- a/scripts/test-config.sh
+++ b/scripts/meson/test-config.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
# SPDX-License-Identifier: GPL-3.0-or-later
# Utility script used by meson to run config tests post installation
set -o nounset -o errexit
diff --git a/scripts/test-integration-prepare.sh b/scripts/meson/test-integration-prepare.sh
index 13db1438..17f08df3 100755
--- a/scripts/test-integration-prepare.sh
+++ b/scripts/meson/test-integration-prepare.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
# SPDX-License-Identifier: GPL-3.0-or-later
set -o errexit -o nounset
diff --git a/manager/scripts/codecheck b/scripts/poe-tasks/check
index 3045d156..8cae7fdc 100755
--- a/manager/scripts/codecheck
+++ b/scripts/poe-tasks/check
@@ -1,8 +1,8 @@
-#!/bin/bash
+#!/usr/bin/env bash
# ensure consistent behaviour
src_dir="$(dirname "$(realpath "$0")")"
-source $src_dir/_env.sh
+source $src_dir/utils/_env.sh
aggregate_rv=0
function check_rv {
@@ -32,36 +32,55 @@ fi
# check formatting using black
echo -e "${yellow}Checking formatting using black...${reset}"
-black knot_resolver_manager tests scripts --check --diff
+black python/knot_resolver tests/manager scripts/poe-tasks/utils/create_setup.py --check --diff
+check_rv $?
+echo
+
+# check imports formatting using isort
+echo -e "${yellow}Checking imports formatting using isort...${reset}"
+isort python/knot_resolver tests/manager scripts/poe-tasks/utils/create_setup.py --check --diff
check_rv $?
echo
# check code with pylint
echo -e "${yellow}Linting using pylint...${reset}"
-pylint knot_resolver_manager
+pylint python/knot_resolver
check_rv $?
echo
# check code with flake8
echo -e "${yellow}Linting using flake8...${reset}"
-flake8 --ignore=E266,W503 knot_resolver_manager
+flake8 --max-line-length=200 --ignore=E266,W503 --extend-ignore=E203 python/knot_resolver
check_rv $?
echo
# check types with mypy
echo -e "${yellow}Type checking using mypy...${reset}"
-mypy knot_resolver_manager
+mypy python/knot_resolver
check_rv $?
echo
# check that setup.py is not behind pyproject.toml
echo -e "${yellow}Checking setup.py${reset}"
-python scripts/create_setup.py | diff - setup.py
+python scripts/poe-tasks/utils/create_setup.py | diff - setup.py
check_rv $?
python setup.py --help > /dev/null
check_rv $?
echo
+# check python/knot_resolver/constants.py
+echo -e "${yellow}python/knot_resolver/constants.py${reset}"
+meson_setup_configure > /dev/null
+diff python/knot_resolver/constants.py $build_dir/python/knot_resolver/constants.py
+check_rv $?
+echo
+
+# check that doc/_static/config.schema.json is the latest
+echo -e "${yellow}Checking doc/_static/config.schema.json${reset}"
+python -m knot_resolver.client schema | diff - doc/_static/config.schema.json
+check_rv $?
+echo
+
# fancy messages at the end :)
if test "$aggregate_rv" -eq "0"; then
echo -e "${green}Everything looks great!${reset}"
@@ -70,6 +89,8 @@ else
echo -e "${red}These commands might help you:${reset}"
echo -e "${red}\tpoe format${reset}"
echo -e "${red}\tpoe gen-setuppy${reset}"
+ echo -e "${red}\tpoe gen-constantspy${reset}"
+ echo -e "${red}\tpoe doc-schema${reset}"
echo -e "${red}That's not great. Could you please fix that?${reset} 😲😟"
fi
diff --git a/scripts/poe-tasks/clean b/scripts/poe-tasks/clean
new file mode 100755
index 00000000..c8f596ca
--- /dev/null
+++ b/scripts/poe-tasks/clean
@@ -0,0 +1,22 @@
+#!/usr/bin/env bash
+
+# ensure consistent behaviour
+src_dir="$(dirname "$(realpath "$0")")"
+source $src_dir/utils/_env.sh
+
+echo
+echo --------------------------------------------
+echo Removing mypy, pytest and other cached files
+echo --------------------------------------------
+rm -vrf .coverage .mypy_cache .pytest_cache
+echo
+echo ------------------------------------------
+echo Removing Meson build directories and files
+echo ------------------------------------------
+rm -vrf "$build_dir" "$build_dev_dir" "$KRES_DEV_INSTALL_DIR" build dist
+echo
+echo ------------------------------------------
+echo Removing __pycache__ directories and files
+echo ------------------------------------------
+find . -type d -name '__pycache__' -prune -exec rm -vrf {} +
+echo \ No newline at end of file
diff --git a/scripts/poe-tasks/configure b/scripts/poe-tasks/configure
new file mode 100755
index 00000000..dc4aa074
--- /dev/null
+++ b/scripts/poe-tasks/configure
@@ -0,0 +1,14 @@
+#!/usr/bin/env bash
+
+# ensure consistent behaviour
+src_dir="$(dirname "$(realpath "$0")")"
+source $src_dir/utils/_env.sh
+
+echo
+meson_setup_configure_dev
+echo
+echo -----------------------------------------------
+echo Copying constants.py module configured by Meson
+echo -----------------------------------------------
+cp -v $build_dev_dir/python/knot_resolver/constants.py $gitroot/python/knot_resolver/constants.py
+echo
diff --git a/scripts/poe-tasks/doc b/scripts/poe-tasks/doc
new file mode 100755
index 00000000..ed01f426
--- /dev/null
+++ b/scripts/poe-tasks/doc
@@ -0,0 +1,13 @@
+#!/usr/bin/env bash
+
+# ensure consistent behaviour
+src_dir="$(dirname "$(realpath "$0")")"
+source $src_dir/utils/_env.sh
+
+meson_setup_configure -Ddoc=enabled
+
+echo ----------------------------------
+echo Building documentation using ninja
+echo ----------------------------------
+ninja -C $build_dir doc
+echo
diff --git a/scripts/poe-tasks/doc-schema b/scripts/poe-tasks/doc-schema
new file mode 100755
index 00000000..33ea3bd6
--- /dev/null
+++ b/scripts/poe-tasks/doc-schema
@@ -0,0 +1,13 @@
+#!/usr/bin/env bash
+
+# ensure consistent behaviour
+src_dir="$(dirname "$(realpath "$0")")"
+source $src_dir/utils/_env.sh
+
+schema_file="$gitroot/doc/_static/config.schema.json"
+
+meson_setup_configure > /dev/null
+cp $build_dir/python/knot_resolver/constants.py $gitroot/python/knot_resolver/constants.py
+python -m knot_resolver.client schema > $schema_file
+
+echo New configuration JSON schem saved to $schema_file \ No newline at end of file
diff --git a/scripts/poe-tasks/examples b/scripts/poe-tasks/examples
new file mode 100755
index 00000000..d4437203
--- /dev/null
+++ b/scripts/poe-tasks/examples
@@ -0,0 +1,11 @@
+#!/usr/bin/env bash
+
+# ensure consistent behaviour
+src_dir="$(dirname "$(realpath "$0")")"
+source $src_dir/utils/_env.sh
+
+# validate all configuration examples
+for example in $PWD/etc/config/config.example.*.yaml;
+do
+ python3 -m knot_resolver.client validate --no-strict $example;
+done
diff --git a/scripts/poe-tasks/format b/scripts/poe-tasks/format
new file mode 100755
index 00000000..4b7f39a2
--- /dev/null
+++ b/scripts/poe-tasks/format
@@ -0,0 +1,13 @@
+#!/usr/bin/env bash
+
+# ensure consistent behaviour
+src_dir="$(dirname "$(realpath "$0")")"
+source $src_dir/utils/_env.sh
+
+dirs="python/knot_resolver/ tests/manager scripts/poe-tasks/utils/create_setup.py build_c_extensions.py"
+
+# run black code formater
+black $dirs
+
+# sort python imports
+isort $dirs
diff --git a/scripts/poe-tasks/gen-constantspy b/scripts/poe-tasks/gen-constantspy
new file mode 100755
index 00000000..0f3bb3b6
--- /dev/null
+++ b/scripts/poe-tasks/gen-constantspy
@@ -0,0 +1,14 @@
+#!/usr/bin/env bash
+
+# ensure consistent behaviour
+src_dir="$(dirname "$(realpath "$0")")"
+source $src_dir/utils/_env.sh
+
+echo
+meson_setup_configure
+echo
+echo -----------------------------------------------
+echo Copying constants.py module configured by Meson
+echo -----------------------------------------------
+cp -v $build_dir/python/knot_resolver/constants.py $gitroot/python/knot_resolver/constants.py
+echo \ No newline at end of file
diff --git a/scripts/poe-tasks/gen-setuppy b/scripts/poe-tasks/gen-setuppy
new file mode 100755
index 00000000..8a6d543e
--- /dev/null
+++ b/scripts/poe-tasks/gen-setuppy
@@ -0,0 +1,8 @@
+#!/usr/bin/env bash
+
+# ensure consistent behaviour
+src_dir="$(dirname "$(realpath "$0")")"
+source $src_dir/utils/_env.sh
+
+# create setup.py
+python scripts/poe-tasks/utils/create_setup.py > setup.py
diff --git a/scripts/poe-tasks/kresctl b/scripts/poe-tasks/kresctl
new file mode 100755
index 00000000..87a96e30
--- /dev/null
+++ b/scripts/poe-tasks/kresctl
@@ -0,0 +1,12 @@
+#!/usr/bin/env bash
+
+# ensure consistent behaviour
+src_dir="$(dirname "$(realpath "$0")")"
+source $src_dir/utils/_env.sh
+
+echo
+is_build_dev_dir_configured
+echo
+
+# run knot-resolver client
+python3 -m knot_resolver.client --config "$KRES_DEV_CONFIG_FILE" "$@"
diff --git a/scripts/poe-tasks/run b/scripts/poe-tasks/run
new file mode 100755
index 00000000..4d3ebc8c
--- /dev/null
+++ b/scripts/poe-tasks/run
@@ -0,0 +1,35 @@
+#!/usr/bin/env bash
+
+# ensure consistent behaviour
+src_dir="$(dirname "$(realpath "$0")")"
+source $src_dir/utils/_env.sh
+
+echo
+ninja_dev_install
+echo
+
+echo ------------------------------------
+echo Creating missing runtime directories
+echo ------------------------------------
+mkdir -vp $KRES_DEV_INSTALL_DIR/run/knot-resolver $KRES_DEV_INSTALL_DIR/var/cache/knot-resolver
+echo
+echo ---------------------------------------
+echo Building Python komponents using Poetry
+echo ---------------------------------------
+poetry build
+# copy native modules from build directory to source directory
+shopt -s globstar
+shopt -s nullglob
+for d in build/lib*; do
+ for f in "python/$d/"**/*.so; do
+ cp -v "$f" ${f#"python/$d/"}
+ done
+done
+shopt -u globstar
+shopt -u nullglob
+
+echo
+echo --------------------------------------
+echo Starting Knot Resolver wit the Manager
+echo --------------------------------------
+python3 -m knot_resolver.manager --config "$KRES_DEV_CONFIG_FILE" "$@"
diff --git a/manager/scripts/run-debug b/scripts/poe-tasks/run-debug
index b48f2359..f577681b 100755
--- a/manager/scripts/run-debug
+++ b/scripts/poe-tasks/run-debug
@@ -1,8 +1,8 @@
-#!/bin/bash
+#!/usr/bin/env bash
# ensure consistent behaviour
src_dir="$(dirname "$(realpath "$0")")"
-source $src_dir/_env.sh
+source $src_dir/utils/_env.sh
echo The debug server will be listening on port localhost:5678
echo Use VSCode remote attach feature to connect to the debug server
@@ -10,4 +10,4 @@ echo The manager will start after you connect
echo API will be running on port 5000
echo ----------------------------------------
-KRES_DEBUG_MANAGER=1 poe run $@ \ No newline at end of file
+KRES_DEBUG_MANAGER=1 poe run $@
diff --git a/scripts/poe-tasks/test b/scripts/poe-tasks/test
new file mode 100755
index 00000000..85fd6089
--- /dev/null
+++ b/scripts/poe-tasks/test
@@ -0,0 +1,8 @@
+#!/usr/bin/env bash
+
+# ensure consistent behaviour
+src_dir="$(dirname "$(realpath "$0")")"
+source $src_dir/utils/_env.sh
+
+# run pytest
+env PYTHONPATH=. pytest --junitxml=unit.junit.xml --cov=python/knot_resolver --show-capture=all tests/manager
diff --git a/scripts/poe-tasks/utils/_env.sh b/scripts/poe-tasks/utils/_env.sh
new file mode 100644
index 00000000..66cece83
--- /dev/null
+++ b/scripts/poe-tasks/utils/_env.sh
@@ -0,0 +1,104 @@
+# fail on errors
+set -o errexit
+
+# define color codes
+red="\033[0;31m"
+yellow="\033[0;33m"
+green="\033[0;32m"
+bright_black="\033[0;90m"
+blue="\033[0;34m"
+reset="\033[0m"
+
+# ensure consistent top level directory
+gitroot="$(git rev-parse --show-toplevel)"
+if test -z "$gitroot"; then
+ echo -e "${red}This command can be run only in a git repository tree.${reset}"
+ exit 1
+fi
+cd $gitroot
+
+# build dirs
+build_dir="$gitroot/.build"
+build_dev_dir="$gitroot/.build_dev"
+install_dev_dir="$gitroot/.install_dev"
+
+# ensure consistent environment with virtualenv
+if test -z "$VIRTUAL_ENV" -a "$CI" != "true" -a -z "$KNOT_ENV"; then
+ echo -e "${yellow}You are NOT running the script within the project's virtual environment.${reset}"
+ echo -e "Do you want to continue regardless? [yN]"
+ read cont
+ if test "$cont" != "y" -a "$cont" != "Y"; then
+ echo -e "${red}Exiting early...${reset}"
+ exit 1
+ fi
+fi
+
+# update PATH with node_modules
+PATH="$PATH:$gitroot/node_modules/.bin"
+
+# fail even on unbound variables
+set -o nounset
+
+# Set enviromental variables if not
+if [ -z "${KRES_DEV_INSTALL_DIR:-}" ]; then
+ KRES_DEV_INSTALL_DIR="$install_dev_dir"
+fi
+if [ -z "${KRES_DEV_CONFIG_FILE:-}" ]; then
+ KRES_DEV_CONFIG_FILE="$gitroot/etc/config/config.dev.yaml"
+fi
+export KRES_DEV_INSTALL_DIR
+export KRES_DEV_CONFIG_FILE
+
+function meson_setup_configure {
+ local reconfigure=''
+ if [ -d $build_dir ]; then
+ reconfigure='--reconfigure'
+ fi
+ echo ---------------------------------------
+ echo Configuring build directory using Meson
+ echo ---------------------------------------
+ meson setup \
+ $build_dir \
+ $reconfigure \
+ --prefix=/usr \
+ "$@"
+}
+
+function meson_setup_configure_dev {
+ local reconfigure=''
+ if [ -d $build_dev_dir ]; then
+ reconfigure='--reconfigure'
+ fi
+ echo ---------------------------------------
+ echo Configuring build directory using Meson
+ echo ---------------------------------------
+ meson setup \
+ $build_dev_dir \
+ $reconfigure \
+ --prefix=$KRES_DEV_INSTALL_DIR \
+ -D user=$(id -un) \
+ -D group=$(id -gn) \
+ "$@"
+}
+
+function is_build_dev_dir_configured {
+ if [ ! -d $build_dev_dir ]; then
+ echo
+ echo Knot Resolver build directory is not configured by Meson.
+ echo "Please run './poe configure' (optionally with additional Meson arguments)".
+ echo
+ exit 2
+ fi
+}
+
+function ninja_dev_install {
+
+ is_build_dev_dir_configured
+
+ echo
+ echo --------------------------------------------
+ echo Building/installing C komponents using Ninja
+ echo --------------------------------------------
+ ninja -C $build_dev_dir
+ ninja install -C $build_dev_dir
+}
diff --git a/manager/scripts/create_setup.py b/scripts/poe-tasks/utils/create_setup.py
index 087ce3b0..2240cea8 100644..100755
--- a/manager/scripts/create_setup.py
+++ b/scripts/poe-tasks/utils/create_setup.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Original source:
diff --git a/scripts/run-scanbuild-with-args.sh b/scripts/run-scanbuild-with-args.sh
deleted file mode 100755
index b2954536..00000000
--- a/scripts/run-scanbuild-with-args.sh
+++ /dev/null
@@ -1,51 +0,0 @@
-#!/bin/bash
-# SPDX-License-Identifier: GPL-3.0-or-later
-set -o errexit -o nounset
-
-# following checkers are disabled on purpose:
-# Clang does not support attribute cleanup and this is causing false positives in following checkers:
-# unix.Malloc
-# alpha.unix.SimpleStream
-# alpha.unix.Stream
-# https://bugs.llvm.org/show_bug.cgi?id=3888
-
-# These are disabled for other reasons:
-# alpha.clone.CloneChecker # way too many false positives
-# alpha.core.CastToStruct # we use this pattern too much, hard to avoid in many cases
-# alpha.deadcode.UnreachableCode # false positives/flags sanity checks depending on implementation details
-# alpha.security.MallocOverflow # not smart enough to infer max values from data types
-
-exec scan-build --status-bugs -no-failure-reports \
--analyzer-config aggressive-binary-operation-simplification=true \
--disable-checker unix.Malloc \
--enable-checker alpha.core.BoolAssignment \
--enable-checker alpha.core.CastSize \
--enable-checker alpha.core.Conversion \
--enable-checker alpha.core.DynamicTypeChecker \
--enable-checker alpha.core.FixedAddr \
--enable-checker alpha.core.IdenticalExpr \
--enable-checker alpha.core.PointerArithm \
--enable-checker alpha.core.PointerSub \
--enable-checker alpha.core.SizeofPtr \
--enable-checker alpha.core.TestAfterDivZero \
--enable-checker alpha.cplusplus.IteratorRange \
--enable-checker alpha.security.ArrayBound \
--enable-checker alpha.security.ArrayBoundV2 \
--enable-checker alpha.security.ReturnPtrRange \
--enable-checker alpha.security.taint.TaintPropagation \
--enable-checker alpha.unix.BlockInCriticalSection \
--enable-checker alpha.unix.Chroot \
--enable-checker alpha.unix.PthreadLock \
--enable-checker alpha.unix.cstring.BufferOverlap \
--enable-checker alpha.unix.cstring.NotNullTerminated \
--enable-checker alpha.unix.cstring.OutOfBounds \
--enable-checker nullability.NullableDereferenced \
--enable-checker nullability.NullablePassedToNonnull \
--enable-checker nullability.NullableReturnedFromNonnull \
--enable-checker optin.performance.Padding \
--enable-checker optin.portability.UnixAPI \
--enable-checker security.FloatLoopCounter \
--enable-checker valist.CopyToSelf \
--enable-checker valist.Uninitialized \
--enable-checker valist.Unterminated \
-"$@"
diff --git a/scripts/update-authors.sh b/scripts/update-authors.sh
index 8ccb77ed..4e2f6806 100755
--- a/scripts/update-authors.sh
+++ b/scripts/update-authors.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
# SPDX-License-Identifier: GPL-3.0-or-later
# avoid confusing changes in ordering
diff --git a/scripts/update-root-hints.sh b/scripts/update-root-hints.sh
index 5f7a564b..862d834b 100755
--- a/scripts/update-root-hints.sh
+++ b/scripts/update-root-hints.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
# SPDX-License-Identifier: GPL-3.0-or-later
set -o nounset -o xtrace
diff --git a/setup.py b/setup.py
new file mode 100644
index 00000000..4234d8b0
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,60 @@
+# -*- coding: utf-8 -*-
+from setuptools import setup
+
+package_dir = \
+{'': 'python'}
+
+packages = \
+['knot_resolver',
+ 'knot_resolver.client',
+ 'knot_resolver.client.commands',
+ 'knot_resolver.controller',
+ 'knot_resolver.controller.supervisord',
+ 'knot_resolver.controller.supervisord.plugin',
+ 'knot_resolver.datamodel',
+ 'knot_resolver.datamodel.templates',
+ 'knot_resolver.datamodel.types',
+ 'knot_resolver.manager',
+ 'knot_resolver.manager.metrics',
+ 'knot_resolver.utils',
+ 'knot_resolver.utils.compat',
+ 'knot_resolver.utils.modeling']
+
+package_data = \
+{'': ['*'], 'knot_resolver.datamodel.templates': ['macros/*']}
+
+install_requires = \
+['aiohttp', 'jinja2', 'pyyaml', 'supervisor', 'typing-extensions']
+
+extras_require = \
+{'prometheus': ['prometheus-client']}
+
+entry_points = \
+{'console_scripts': ['knot-resolver = knot_resolver.manager.main:main',
+ 'kresctl = knot_resolver.client.main:main']}
+
+setup_kwargs = {
+ 'name': 'knot-resolver',
+ 'version': '6.0.8',
+ 'description': 'Knot Resolver Manager - a Python program that automatically manages the other components of the resolver',
+ 'long_description': "# Knot Resolver\n\n[![Build Status](https://gitlab.nic.cz/knot/knot-resolver/badges/nightly/pipeline.svg?x)](https://gitlab.nic.cz/knot/knot-resolver/commits/nightly)\n[![Coverage Status](https://gitlab.nic.cz/knot/knot-resolver/badges/nightly/coverage.svg?x)](https://www.knot-resolver.cz/documentation/latest)\n[![Packaging status](https://repology.org/badge/tiny-repos/knot-resolver.svg)](https://repology.org/project/knot-resolver/versions)\n\nKnot Resolver is a full caching DNS resolver implementation. The core architecture is tiny and efficient, written in C and [LuaJIT][luajit], providing a foundation and a state-machine-like API for extension modules. There are three built-in modules - *iterator*, *validator* and *cache* - which provide the main functionality of the resolver. A few other modules are automatically loaded by default to extend the resolver's functionality.\n\nSince Knot Resolver version 6, it also includes a so-called [manager][manager]. It is a new component written in [Python][python] that hides the complexity of older versions and makes it more user friendly. For example, new features include declarative configuration in YAML format and HTTP API for dynamic changes in the resolver and more.\n\nKnot Resolver uses a [different scaling strategy][scaling] than the rest of the DNS resolvers - no threading, shared-nothing architecture (except MVCC cache which can be shared), which allows you to pin workers to available CPU cores and grow by self-replication. You can start and stop additional workers based on the contention without downtime, which is automated by the [manager][manager] by default.\n\nThe LuaJIT modules, support for DNS privacy and DNSSEC, and persistent cache with low memory footprint make it a great personal DNS resolver or a research tool to tap into DNS data. Strong filtering rules, and auto-configuration with etcd make it a great large-scale resolver solution. It also has strong support for DNS over TCP, in particular TCP Fast-Open, query pipelining and deduplication, and response reordering.\n\nFor more on using the resolver, see the [User Documentation][doc]. See the [Developer Documentation][doc-dev] for detailed architecture and development.\n\n## Packages\n\nThe latest stable packages for various distributions are available in our\n[upstream repository](https://pkg.labs.nic.cz/doc/?project=knot-resolver).\nFollow the installation instructions to add this repository to your system.\n\nKnot Resolver is also available from the following distributions' repositories:\n\n* [Fedora and Fedora EPEL](https://src.fedoraproject.org/rpms/knot-resolver)\n* [Debian stable](https://packages.debian.org/stable/knot-resolver),\n [Debian testing](https://packages.debian.org/testing/knot-resolver),\n [Debian unstable](https://packages.debian.org/sid/knot-resolver)\n* [Ubuntu](https://packages.ubuntu.com/jammy/knot-resolver)\n* [Arch Linux](https://archlinux.org/packages/extra/x86_64/knot-resolver/)\n* [Alpine Linux](https://pkgs.alpinelinux.org/packages?name=knot-resolver)\n\n### Packaging\n\nThe project uses [`apkg`](https://gitlab.nic.cz/packaging/apkg) for packaging.\nSee [`distro/README.md`](distro/README.md) for packaging specific instructions.\n\n## Building from sources\n\nKnot Resolver mainly depends on [KnotDNS][knot-dns] libraries, [LuaJIT][luajit], [libuv][libuv] and [Python][python].\n\nSee the [Building project][build] documentation page for more information.\n\n## Running\n\nBy default, Knot Resolver comes with [systemd][systemd] integration and you just need to start its service. It requires no configuration changes to run a server on localhost.\n\n```\n# systemctl start knot-resolver\n```\n\nSee the documentation at [knot-resolver.cz/documentation/latest][doc] for more information.\n\n## Running the Docker image\n\nRunning the Docker image is simple and doesn't require any dependencies or system modifications, just run:\n\n```\n$ docker run -Pit cznic/knot-resolver\n```\n\nThe images are meant as an easy way to try the resolver, and they're not designed for production use.\n\n## Contacting us\n\n- [GitLab issues](https://gitlab.nic.cz/knot/knot-resolver/issues) (you may authenticate via GitHub)\n- [mailing list](https://lists.nic.cz/postorius/lists/knot-resolver-announce.lists.nic.cz/)\n- [![Join the chat at https://gitter.im/CZ-NIC/knot-resolver](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/CZ-NIC/knot-resolver?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)\n\n[build]: https://www.knot-resolver.cz/documentation/latest/dev/build.html\n[doc]: https://www.knot-resolver.cz/documentation/latest/\n[doc-dev]: https://www.knot-resolver.cz/documentation/latest/dev\n[knot-dns]: https://www.knot-dns.cz/\n[luajit]: https://luajit.org/\n[libuv]: http://libuv.org\n[python]: https://www.python.org/\n[systemd]: https://systemd.io/\n[scaling]: https://www.knot-resolver.cz/documentation/latest/config-multiple-workers.html\n[manager]: https://www.knot-resolver.cz/documentation/latest/dev/architecture.html\n",
+ 'author': 'Aleš Mrázek',
+ 'author_email': 'ales.mrazek@nic.cz',
+ 'maintainer': 'Aleš Mrázek',
+ 'maintainer_email': 'ales.mrazek@nic.cz',
+ 'url': 'https://www.knot-resolver.cz',
+ 'package_dir': package_dir,
+ 'packages': packages,
+ 'package_data': package_data,
+ 'install_requires': install_requires,
+ 'extras_require': extras_require,
+ 'entry_points': entry_points,
+ 'python_requires': '>=3.8,<4.0',
+}
+from build_c_extensions import *
+build(setup_kwargs)
+
+setup(**setup_kwargs)
+
+
+# This setup.py was autogenerated using Poetry for backward compatibility with setuptools.
diff --git a/tests/README.rst b/tests/README.rst
index 37867b38..c2239f2f 100644
--- a/tests/README.rst
+++ b/tests/README.rst
@@ -1,13 +1,23 @@
.. SPDX-License-Identifier: GPL-3.0-or-later
-Tests
-=====
+**********************
+Testing infrastructure
+**********************
+
+The following is a non-exhaustive list of various tests that can be found in this repo.
+Some can be enabled by meson build system and some can be performed by Poetry tool.
+
+
+The manager unit tests
+======================
+
+The unit tests use ``pytest`` and can be run with the command ``poe test``.
+They can be run from a freshly cloned repository and should be successful.
+They are located in the ``manager`` subdirectory.
-The following is a non-comprehensitve lists of various tests that can be found
-in this repo. These can be enabled by the build system.
Unit tests
-----------
+==========
The unit tests depend on cmocka_ and can easily be executed after compilation.
They are enabled by default (if ``cmocka`` is found).
@@ -17,8 +27,9 @@ They are enabled by default (if ``cmocka`` is found).
$ ninja -C build_dir
$ meson test -C build_dir --suite unit
+
Postinstall tests
------------------
+=================
There following tests require a working installation of kresd. The
binary ``kresd`` found in ``$PATH`` will be tested. When testing through meson,
@@ -29,8 +40,9 @@ kresd first.
$ ninja install -C build_dir
+
Config tests
-------------
+============
Config tests utilize the kresd's lua config file to execute arbitrary tests,
typically testing various modules, their API etc.
@@ -45,8 +57,9 @@ the build dir).
$ ninja install -C build_dir
$ meson test -C build_dir --suite config
+
Extra tests
------------
+===========
The extra tests require a large set of additional dependencies and executing
them outside of upstream development is probably redundant.
@@ -82,7 +95,7 @@ example TCP, TLS and its connection management.
$ meson test -C build_dir --suite pytests
Useful meson commands
----------------------
+=====================
It's possible to run only specific test suite or a test.
diff --git a/tests/config/meson.build b/tests/config/meson.build
index 2a9e2487..681f2cc2 100644
--- a/tests/config/meson.build
+++ b/tests/config/meson.build
@@ -10,7 +10,7 @@ config_tests += [
]
-run_configtest = find_program('../../scripts/test-config.sh')
+run_configtest = find_program('../../scripts/meson/test-config.sh')
foreach config_test : config_tests
diff --git a/tests/dnstap/src/dnstap-test/run.sh b/tests/dnstap/src/dnstap-test/run.sh
index 70d82254..f1ed9fb2 100755
--- a/tests/dnstap/src/dnstap-test/run.sh
+++ b/tests/dnstap/src/dnstap-test/run.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
set -e
KRESD_CMD=$1
MESON_BUILD_ROOT=$(pwd)
diff --git a/tests/integration/deckard b/tests/integration/deckard
-Subproject b5b338678d48a9807097000afe03ebfdf705f7a
+Subproject cc478cc04956916d1a946ae986a952732a837a8
diff --git a/tests/integration/meson.build b/tests/integration/meson.build
index 98b661f4..e9ab9e0f 100644
--- a/tests/integration/meson.build
+++ b/tests/integration/meson.build
@@ -19,7 +19,7 @@ py3_deps += [
['yaml', 'PyYAML (for deckard)'],
]
-prepare_deckard = find_program('../../scripts/test-integration-prepare.sh')
+prepare_deckard = find_program('../../scripts/meson/test-integration-prepare.sh')
deckard_env = environment()
deckard_env.prepend('PATH', sbin_dir)
diff --git a/tests/manager/__init__.py b/tests/manager/__init__.py
new file mode 100644
index 00000000..5b82d3be
--- /dev/null
+++ b/tests/manager/__init__.py
@@ -0,0 +1,5 @@
+from pathlib import Path
+
+from knot_resolver.datamodel.globals import Context, set_global_validation_context
+
+set_global_validation_context(Context(Path("."), False))
diff --git a/manager/tests/unit/datamodel/templates/test_cache_macros.py b/tests/manager/datamodel/templates/test_cache_macros.py
index 155905c6..b90bc417 100644
--- a/manager/tests/unit/datamodel/templates/test_cache_macros.py
+++ b/tests/manager/datamodel/templates/test_cache_macros.py
@@ -2,8 +2,8 @@ from typing import Any
import pytest
-from knot_resolver_manager.datamodel.cache_schema import CacheClearRPCSchema
-from knot_resolver_manager.datamodel.templates import template_from_str
+from knot_resolver.datamodel.cache_schema import CacheClearRPCSchema
+from knot_resolver.datamodel.templates import template_from_str
@pytest.mark.parametrize(
diff --git a/manager/tests/unit/datamodel/templates/test_common_macros.py b/tests/manager/datamodel/templates/test_common_macros.py
index 0e794fce..9d442ee4 100644
--- a/manager/tests/unit/datamodel/templates/test_common_macros.py
+++ b/tests/manager/datamodel/templates/test_common_macros.py
@@ -1,5 +1,5 @@
-from knot_resolver_manager.datamodel.forward_schema import ForwardServerSchema
-from knot_resolver_manager.datamodel.templates import template_from_str
+from knot_resolver.datamodel.forward_schema import ForwardServerSchema
+from knot_resolver.datamodel.templates import template_from_str
def test_boolean():
@@ -75,7 +75,7 @@ def test_tls_servers_table():
ForwardServerSchema(
{
"address": "192.0.2.1",
- "pin-sha256": "OTJmODU3ZDMyOWMwOWNlNTU4Y2M0YWNjMjI5NWE2NWJlMzY4MzRmMzY3NGU3NDAwNTI1YjMxZTMxYTgzMzQwMQ==",
+ "pin-sha256": "E9CZ9INDbd+2eRQozYqqbQ2yXLVKB9+xcprMF+44U1g=",
}
),
]
diff --git a/manager/tests/unit/datamodel/templates/test_forward_macros.py b/tests/manager/datamodel/templates/test_forward_macros.py
index 534c6007..0ed2ec9b 100644
--- a/manager/tests/unit/datamodel/templates/test_forward_macros.py
+++ b/tests/manager/datamodel/templates/test_forward_macros.py
@@ -1,6 +1,6 @@
-from knot_resolver_manager.datamodel.forward_schema import ForwardSchema
-from knot_resolver_manager.datamodel.templates import template_from_str
-from knot_resolver_manager.datamodel.types import IPAddressOptionalPort
+from knot_resolver.datamodel.forward_schema import ForwardSchema
+from knot_resolver.datamodel.templates import template_from_str
+from knot_resolver.datamodel.types import IPAddressOptionalPort
def test_policy_rule_forward_add():
diff --git a/manager/tests/unit/datamodel/templates/test_network_macros.py b/tests/manager/datamodel/templates/test_network_macros.py
index 066463df..7ff856fe 100644
--- a/manager/tests/unit/datamodel/templates/test_network_macros.py
+++ b/tests/manager/datamodel/templates/test_network_macros.py
@@ -1,5 +1,5 @@
-from knot_resolver_manager.datamodel.network_schema import ListenSchema
-from knot_resolver_manager.datamodel.templates import template_from_str
+from knot_resolver.datamodel.network_schema import ListenSchema
+from knot_resolver.datamodel.templates import template_from_str
def test_network_listen():
diff --git a/manager/tests/unit/datamodel/templates/test_policy_macros.py b/tests/manager/datamodel/templates/test_policy_macros.py
index a9804cd3..09aac85d 100644
--- a/manager/tests/unit/datamodel/templates/test_policy_macros.py
+++ b/tests/manager/datamodel/templates/test_policy_macros.py
@@ -1,9 +1,9 @@
from typing import List
-from knot_resolver_manager.datamodel.network_schema import AddressRenumberingSchema
-from knot_resolver_manager.datamodel.policy_schema import AnswerSchema
-from knot_resolver_manager.datamodel.templates import template_from_str
-from knot_resolver_manager.datamodel.types import PolicyFlagEnum
+from knot_resolver.datamodel.network_schema import AddressRenumberingSchema
+from knot_resolver.datamodel.policy_schema import AnswerSchema
+from knot_resolver.datamodel.templates import template_from_str
+from knot_resolver.datamodel.types import PolicyFlagEnum
def test_policy_add():
diff --git a/manager/tests/unit/datamodel/templates/test_types_render.py b/tests/manager/datamodel/templates/test_types_render.py
index f83b41e3..c67d72b2 100644
--- a/manager/tests/unit/datamodel/templates/test_types_render.py
+++ b/tests/manager/datamodel/templates/test_types_render.py
@@ -3,8 +3,8 @@ from typing import Any
import pytest
from jinja2 import Template
-from knot_resolver_manager.datamodel.types import EscapedStr
-from knot_resolver_manager.utils.modeling import ConfigSchema
+from knot_resolver.datamodel.types import EscapedStr
+from knot_resolver.utils.modeling import ConfigSchema
str_template = Template("'{{ string }}'")
diff --git a/manager/tests/unit/datamodel/templates/test_view_macros.py b/tests/manager/datamodel/templates/test_view_macros.py
index 5f4956f8..f814827d 100644
--- a/manager/tests/unit/datamodel/templates/test_view_macros.py
+++ b/tests/manager/datamodel/templates/test_view_macros.py
@@ -2,8 +2,8 @@ from typing import Any
import pytest
-from knot_resolver_manager.datamodel.templates import template_from_str
-from knot_resolver_manager.datamodel.view_schema import ViewOptionsSchema, ViewSchema
+from knot_resolver.datamodel.templates import template_from_str
+from knot_resolver.datamodel.view_schema import ViewOptionsSchema, ViewSchema
def test_view_flags():
diff --git a/manager/tests/unit/datamodel/test_config_schema.py b/tests/manager/datamodel/test_config_schema.py
index 13a20f33..9ec2b31b 100644
--- a/manager/tests/unit/datamodel/test_config_schema.py
+++ b/tests/manager/datamodel/test_config_schema.py
@@ -2,10 +2,10 @@ import inspect
import json
from typing import Any, Dict, Type, cast
-from knot_resolver_manager.datamodel import KresConfig
-from knot_resolver_manager.datamodel.lua_schema import LuaSchema
-from knot_resolver_manager.utils.modeling import BaseSchema
-from knot_resolver_manager.utils.modeling.types import (
+from knot_resolver.datamodel import KresConfig
+from knot_resolver.datamodel.lua_schema import LuaSchema
+from knot_resolver.utils.modeling import BaseSchema
+from knot_resolver.utils.modeling.types import (
get_generic_type_argument,
get_generic_type_arguments,
get_optional_inner_type,
diff --git a/manager/tests/unit/datamodel/test_forward_schema.py b/tests/manager/datamodel/test_forward_schema.py
index 9ae77fe8..58ad4667 100644
--- a/manager/tests/unit/datamodel/test_forward_schema.py
+++ b/tests/manager/datamodel/test_forward_schema.py
@@ -1,8 +1,8 @@
import pytest
from pytest import raises
-from knot_resolver_manager.datamodel.forward_schema import ForwardSchema
-from knot_resolver_manager.utils.modeling.exceptions import DataValidationError
+from knot_resolver.datamodel.forward_schema import ForwardSchema
+from knot_resolver.utils.modeling.exceptions import DataValidationError
@pytest.mark.parametrize("port,auth", [(5353, False), (53, True)])
diff --git a/manager/tests/unit/datamodel/test_local_data.py b/tests/manager/datamodel/test_local_data.py
index 9842b0b2..9edc5fba 100644
--- a/manager/tests/unit/datamodel/test_local_data.py
+++ b/tests/manager/datamodel/test_local_data.py
@@ -3,8 +3,8 @@ from typing import Any
import pytest
from pytest import raises
-from knot_resolver_manager.datamodel.local_data_schema import RuleSchema
-from knot_resolver_manager.utils.modeling.exceptions import DataValidationError
+from knot_resolver.datamodel.local_data_schema import RuleSchema
+from knot_resolver.utils.modeling.exceptions import DataValidationError
@pytest.mark.parametrize(
diff --git a/manager/tests/unit/datamodel/test_lua_schema.py b/tests/manager/datamodel/test_lua_schema.py
index 30d69bd9..2021298a 100644
--- a/manager/tests/unit/datamodel/test_lua_schema.py
+++ b/tests/manager/datamodel/test_lua_schema.py
@@ -1,7 +1,7 @@
from pytest import raises
-from knot_resolver_manager.datamodel.lua_schema import LuaSchema
-from knot_resolver_manager.utils.modeling.exceptions import DataValidationError
+from knot_resolver.datamodel.lua_schema import LuaSchema
+from knot_resolver.utils.modeling.exceptions import DataValidationError
def test_invalid():
diff --git a/manager/tests/unit/datamodel/test_management_schema.py b/tests/manager/datamodel/test_management_schema.py
index 870e7208..b310a2e2 100644
--- a/manager/tests/unit/datamodel/test_management_schema.py
+++ b/tests/manager/datamodel/test_management_schema.py
@@ -2,8 +2,8 @@ from typing import Any, Dict, Optional
import pytest
-from knot_resolver_manager.datamodel.management_schema import ManagementSchema
-from knot_resolver_manager.utils.modeling.exceptions import DataValidationError
+from knot_resolver.datamodel.management_schema import ManagementSchema
+from knot_resolver.utils.modeling.exceptions import DataValidationError
@pytest.mark.parametrize("val", [{"interface": "::1@53"}, {"unix-socket": "/tmp/socket"}])
diff --git a/manager/tests/unit/datamodel/test_network_schema.py b/tests/manager/datamodel/test_network_schema.py
index 7b616f34..aed09310 100644
--- a/manager/tests/unit/datamodel/test_network_schema.py
+++ b/tests/manager/datamodel/test_network_schema.py
@@ -3,9 +3,9 @@ from typing import Any, Dict, Optional
import pytest
from pytest import raises
-from knot_resolver_manager.datamodel.network_schema import ListenSchema, NetworkSchema
-from knot_resolver_manager.datamodel.types import InterfaceOptionalPort, PortNumber
-from knot_resolver_manager.utils.modeling.exceptions import DataValidationError
+from knot_resolver.datamodel.network_schema import ListenSchema, NetworkSchema
+from knot_resolver.datamodel.types import InterfaceOptionalPort, PortNumber
+from knot_resolver.utils.modeling.exceptions import DataValidationError
def test_listen_defaults():
diff --git a/manager/tests/unit/datamodel/test_policy_schema.py b/tests/manager/datamodel/test_policy_schema.py
index aeb98a71..8b18e42c 100644
--- a/manager/tests/unit/datamodel/test_policy_schema.py
+++ b/tests/manager/datamodel/test_policy_schema.py
@@ -3,10 +3,10 @@ from typing import Any, Dict
import pytest
from pytest import raises
-from knot_resolver_manager.datamodel.policy_schema import ActionSchema, PolicySchema
-from knot_resolver_manager.datamodel.types import PolicyActionEnum
-from knot_resolver_manager.utils.modeling.exceptions import DataValidationError
-from knot_resolver_manager.utils.modeling.types import get_generic_type_arguments
+from knot_resolver.datamodel.policy_schema import ActionSchema, PolicySchema
+from knot_resolver.datamodel.types import PolicyActionEnum
+from knot_resolver.utils.modeling.exceptions import DataValidationError
+from knot_resolver.utils.modeling.types import get_generic_type_arguments
noconfig_actions = [
"pass",
diff --git a/manager/tests/unit/datamodel/test_rpz_schema.py b/tests/manager/datamodel/test_rpz_schema.py
index 6603deed..b656b2a3 100644
--- a/manager/tests/unit/datamodel/test_rpz_schema.py
+++ b/tests/manager/datamodel/test_rpz_schema.py
@@ -1,8 +1,8 @@
import pytest
from pytest import raises
-from knot_resolver_manager.datamodel.rpz_schema import RPZSchema
-from knot_resolver_manager.utils.modeling.exceptions import DataValidationError
+from knot_resolver.datamodel.rpz_schema import RPZSchema
+from knot_resolver.utils.modeling.exceptions import DataValidationError
@pytest.mark.parametrize(
diff --git a/manager/tests/unit/datamodel/types/test_base_types.py b/tests/manager/datamodel/types/test_base_types.py
index 00e7bda5..210604ed 100644
--- a/manager/tests/unit/datamodel/types/test_base_types.py
+++ b/tests/manager/datamodel/types/test_base_types.py
@@ -5,8 +5,8 @@ from typing import List, Optional
import pytest
from pytest import raises
-from knot_resolver_manager.datamodel.types.base_types import IntRangeBase, StringLengthBase
-from knot_resolver_manager.exceptions import KresManagerException
+from knot_resolver import KresBaseException
+from knot_resolver.datamodel.types.base_types import IntRangeBase, StringLengthBase
@pytest.mark.parametrize("min,max", [(0, None), (None, 0), (1, 65535), (-65535, -1)])
@@ -34,7 +34,7 @@ def test_int_range_base(min: Optional[int], max: Optional[int]):
invals.extend([random.randint(-sys.maxsize - 1, rmin - 1) for _ in range(n % 2)] if max else [])
for inval in invals:
- with raises(KresManagerException):
+ with raises(KresBaseException):
Test(inval)
@@ -62,5 +62,5 @@ def test_str_bytes_length_base(min: Optional[int], max: Optional[int]):
invals.extend(["x" * random.randint(1, rmin - 1) for _ in range(n % 2)] if max else [])
for inval in invals:
- with raises(KresManagerException):
+ with raises(KresBaseException):
Test(inval)
diff --git a/manager/tests/unit/datamodel/types/test_custom_types.py b/tests/manager/datamodel/types/test_custom_types.py
index 5eebef3a..e381e918 100644
--- a/manager/tests/unit/datamodel/types/test_custom_types.py
+++ b/tests/manager/datamodel/types/test_custom_types.py
@@ -6,7 +6,7 @@ from typing import Any
import pytest
from pytest import raises
-from knot_resolver_manager.datamodel.types import (
+from knot_resolver.datamodel.types import (
Dir,
DomainName,
EscapedStr,
@@ -25,7 +25,7 @@ from knot_resolver_manager.datamodel.types import (
SizeUnit,
TimeUnit,
)
-from knot_resolver_manager.utils.modeling import BaseSchema
+from knot_resolver.utils.modeling import BaseSchema
def _rand_domain(label_chars: int, levels: int = 1) -> str:
@@ -97,8 +97,8 @@ def test_checked_path():
@pytest.mark.parametrize(
"val",
[
- "YmE3ODE2YmY4ZjAx+2ZlYTQxNDE0MGRlNWRhZTIyMjNiMDAzNjFhMzk/MTc3YTljYjQxMGZmNjFmMjAwMTVhZA==",
- "OTJmODU3ZDMyOWMwOWNlNTU4Y2M0YWNjMjI5NWE2NWJlMzY4MzRmMzY3NGU3NDAwNTI1YjMxZTMxYTgzMzQwMQ==",
+ "d6qzRu9zOECb90Uez27xWltNsj0e1Md7GkYYkVoZWmM=",
+ "E9CZ9INDbd+2eRQozYqqbQ2yXLVKB9+xcprMF+44U1g=",
],
)
def test_pin_sha256_valid(val: str):
@@ -109,9 +109,10 @@ def test_pin_sha256_valid(val: str):
@pytest.mark.parametrize(
"val",
[
- "!YmE3ODE2YmY4ZjAxY2ZlYTQxNDE0MGRlNWRhZTIyMjNiMDAzNjFhMzk2MTc3YTljjQxMGZmNjFmMjAwMTVhZA==",
- "OTJmODU3ZDMyOWMwOWNlNTU4Y2M0YWNjMjI5NWE2NWJlMzY4MzRmMzY3NGU3NDAwNTI1YjMxZTMxYTgzMzQwMQ",
- "YmFzZTY0IQ",
+ "d6qzRu9zOECb90Uez27xWltNsj0e1Md7GkYYkVoZWmM==",
+ "E9CZ9INDbd+2eRQozYqqbQ2yXLVKB9+xcprMF+44U1g",
+ "!E9CZ9INDbd+2eRQozYqqbQ2yXLVKB9+xcprMF+44U1g=",
+ "d6qzRu9zOE",
],
)
def test_pin_sha256_invalid(val: str):
diff --git a/manager/tests/unit/datamodel/types/test_generic_types.py b/tests/manager/datamodel/types/test_generic_types.py
index 40b40189..e0b40664 100644
--- a/manager/tests/unit/datamodel/types/test_generic_types.py
+++ b/tests/manager/datamodel/types/test_generic_types.py
@@ -3,10 +3,10 @@ from typing import Any, List, Optional, Union
import pytest
from pytest import raises
-from knot_resolver_manager.datamodel.types import ListOrItem
-from knot_resolver_manager.utils.modeling import BaseSchema
-from knot_resolver_manager.utils.modeling.exceptions import DataValidationError
-from knot_resolver_manager.utils.modeling.types import get_generic_type_wrapper_argument
+from knot_resolver.datamodel.types import ListOrItem
+from knot_resolver.utils.modeling import BaseSchema
+from knot_resolver.utils.modeling.exceptions import DataValidationError
+from knot_resolver.utils.modeling.types import get_generic_type_wrapper_argument
@pytest.mark.parametrize("val", [str, int])
diff --git a/manager/tests/unit/test_config_store.py b/tests/manager/test_config_store.py
index f4fd8c03..5ddc4e07 100644
--- a/manager/tests/unit/test_config_store.py
+++ b/tests/manager/test_config_store.py
@@ -1,7 +1,7 @@
import pytest
-from knot_resolver_manager.config_store import ConfigStore, only_on_real_changes_update
-from knot_resolver_manager.datamodel.config_schema import KresConfig
+from knot_resolver.datamodel.config_schema import KresConfig
+from knot_resolver.manager.config_store import ConfigStore, only_on_real_changes_update
@pytest.mark.asyncio # type: ignore
diff --git a/tests/manager/test_knot_resolver_manager.py b/tests/manager/test_knot_resolver_manager.py
new file mode 100644
index 00000000..108b3b55
--- /dev/null
+++ b/tests/manager/test_knot_resolver_manager.py
@@ -0,0 +1,12 @@
+import toml
+
+from knot_resolver import __version__
+
+
+def test_version():
+
+ with open("pyproject.toml", "r") as f:
+ pyproject = toml.load(f)
+
+ version = pyproject["tool"]["poetry"]["version"]
+ assert __version__ == version
diff --git a/manager/tests/unit/utils/modeling/test_base_schema.py b/tests/manager/utils/modeling/test_base_schema.py
index 07e278bb..25fcf031 100644
--- a/manager/tests/unit/utils/modeling/test_base_schema.py
+++ b/tests/manager/utils/modeling/test_base_schema.py
@@ -1,11 +1,10 @@
-from typing import Any, Dict, List, Optional, Tuple, Type, Union
+from typing import Any, Dict, List, Literal, Optional, Tuple, Type, Union
import pytest
from pytest import raises
-from typing_extensions import Literal
-from knot_resolver_manager.utils.modeling import ConfigSchema, parse_json, parse_yaml
-from knot_resolver_manager.utils.modeling.exceptions import DataDescriptionError, DataValidationError
+from knot_resolver.utils.modeling import ConfigSchema, parse_json, parse_yaml
+from knot_resolver.utils.modeling.exceptions import DataDescriptionError, DataValidationError
class _TestBool(ConfigSchema):
diff --git a/manager/tests/unit/utils/modeling/test_etag.py b/tests/manager/utils/modeling/test_etag.py
index 25a52369..8a9e2af3 100644
--- a/manager/tests/unit/utils/modeling/test_etag.py
+++ b/tests/manager/utils/modeling/test_etag.py
@@ -1,6 +1,6 @@
from pyparsing import empty
-from knot_resolver_manager.utils.etag import structural_etag
+from knot_resolver.utils.etag import structural_etag
def test_etag():
diff --git a/manager/tests/unit/utils/modeling/test_json_pointer.py b/tests/manager/utils/modeling/test_json_pointer.py
index 532e6d5e..1566c715 100644
--- a/manager/tests/unit/utils/modeling/test_json_pointer.py
+++ b/tests/manager/utils/modeling/test_json_pointer.py
@@ -1,6 +1,6 @@
from pytest import raises
-from knot_resolver_manager.utils.modeling.json_pointer import json_ptr_resolve
+from knot_resolver.utils.modeling.json_pointer import json_ptr_resolve
# example adopted from https://www.sitepoint.com/json-server-example/
TEST = {
diff --git a/manager/tests/unit/utils/modeling/test_query.py b/tests/manager/utils/modeling/test_query.py
index 1a552b87..0e9200d1 100644
--- a/manager/tests/unit/utils/modeling/test_query.py
+++ b/tests/manager/utils/modeling/test_query.py
@@ -1,6 +1,6 @@
from pytest import raises
-from knot_resolver_manager.utils.modeling.query import query
+from knot_resolver.utils.modeling.query import query
def test_example_from_spec():
diff --git a/manager/tests/unit/utils/modeling/test_renaming.py b/tests/manager/utils/modeling/test_renaming.py
index 1a4ce89e..219e58d5 100644
--- a/manager/tests/unit/utils/modeling/test_renaming.py
+++ b/tests/manager/utils/modeling/test_renaming.py
@@ -1,4 +1,4 @@
-from knot_resolver_manager.utils.modeling.renaming import renamed
+from knot_resolver.utils.modeling.renaming import renamed
def test_all():
diff --git a/manager/tests/unit/utils/modeling/test_types.py b/tests/manager/utils/modeling/test_types.py
index 281f03a8..4b9e62fa 100644
--- a/manager/tests/unit/utils/modeling/test_types.py
+++ b/tests/manager/utils/modeling/test_types.py
@@ -1,10 +1,9 @@
-from typing import Any, Dict, List, Tuple, Union
+from typing import Any, Dict, List, Literal, Tuple, Union
import pytest
-from typing_extensions import Literal
-from knot_resolver_manager.utils.modeling import BaseSchema
-from knot_resolver_manager.utils.modeling.types import is_list, is_literal
+from knot_resolver.utils.modeling import BaseSchema
+from knot_resolver.utils.modeling.types import is_list, is_literal
types = [
bool,
diff --git a/manager/tests/unit/utils/test_functional.py b/tests/manager/utils/test_functional.py
index 041748e4..31bd19fc 100644
--- a/manager/tests/unit/utils/test_functional.py
+++ b/tests/manager/utils/test_functional.py
@@ -1,4 +1,4 @@
-from knot_resolver_manager.utils.functional import all_matches, contains_element_matching, foldl
+from knot_resolver.utils.functional import all_matches, contains_element_matching, foldl
def test_foldl():
diff --git a/tests/meson.build b/tests/meson.build
index 818169df..3b2bb654 100644
--- a/tests/meson.build
+++ b/tests/meson.build
@@ -13,7 +13,6 @@ if get_option('unit_tests') != 'disabled'
message('-------------------------------')
endif
-build_extra_tests = get_option('extra_tests') == 'enabled'
build_config_tests = get_option('config_tests') == 'enabled'
if get_option('config_tests') == 'auto'
build_config_tests = build_extra_tests
diff --git a/tests/packaging/README.md b/tests/packaging/README.md
new file mode 100644
index 00000000..633bd576
--- /dev/null
+++ b/tests/packaging/README.md
@@ -0,0 +1,5 @@
+# Packaging tests
+
+## Distro tests
+
+Tests in this directory are part of the distro tests included in the `distro/tests/extra/all/control` file and can be run with the `apkg test` command.
diff --git a/manager/tests/packaging/dependencies.py b/tests/packaging/dependencies.py
index d92be71a..1262d2e1 100755
--- a/manager/tests/packaging/dependencies.py
+++ b/tests/packaging/dependencies.py
@@ -1,4 +1,4 @@
-#!/usr/bin/python3
+#!/usr/bin/env python3
import importlib
import importlib.util
@@ -15,7 +15,7 @@ sys.modules["setuptools"] = dummy
sys.modules["build_c_extensions"] = dummy
# load install_requires array from setup.py
-spec = importlib.util.spec_from_file_location("setup", sys.argv[1] if len(sys.argv) == 2 else "manager/setup.py")
+spec = importlib.util.spec_from_file_location("setup", sys.argv[1] if len(sys.argv) == 2 else "setup.py")
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
install_requires = mod.install_requires
diff --git a/manager/tests/packaging/interactive/cache-clear.sh b/tests/packaging/interactive/cache-clear.sh
index 377cf5d3..79d88a12 100755
--- a/manager/tests/packaging/interactive/cache-clear.sh
+++ b/tests/packaging/interactive/cache-clear.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
# clear full cache
kresctl cache clear
diff --git a/manager/tests/packaging/interactive/etag.sh b/tests/packaging/interactive/etag.sh
index f14ef96a..3a9de46c 100755
--- a/manager/tests/packaging/interactive/etag.sh
+++ b/tests/packaging/interactive/etag.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
set -e
diff --git a/manager/tests/packaging/interactive/metrics.sh b/tests/packaging/interactive/metrics.sh
index 1ad48930..63ac035d 100755
--- a/manager/tests/packaging/interactive/metrics.sh
+++ b/tests/packaging/interactive/metrics.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
set -e
diff --git a/manager/tests/packaging/interactive/reload.sh b/tests/packaging/interactive/reload.sh
index 9daa1890..85bca315 100755
--- a/manager/tests/packaging/interactive/reload.sh
+++ b/tests/packaging/interactive/reload.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
set -e
diff --git a/tests/packaging/interactive/schema.sh b/tests/packaging/interactive/schema.sh
new file mode 100755
index 00000000..3ea45d52
--- /dev/null
+++ b/tests/packaging/interactive/schema.sh
@@ -0,0 +1,15 @@
+#!/bin/bash
+
+set -e
+
+kresctl schema
+if [ "$?" -ne "0" ]; then
+ echo "Failed to generate JSON schema with 'kresctl'"
+ exit 1
+fi
+
+kresctl schema --live
+if [ "$?" -ne "0" ]; then
+ echo "Failed to get JSON schema from the running resolver"
+ exit 1
+fi
diff --git a/manager/tests/packaging/interactive/workers.sh b/tests/packaging/interactive/workers.sh
index 4f54f6ae..b23afcdb 100755
--- a/manager/tests/packaging/interactive/workers.sh
+++ b/tests/packaging/interactive/workers.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
expected_workers="5"
diff --git a/manager/tests/packaging/knot-resolver.sh b/tests/packaging/knot-resolver.sh
index 6aa38bde..2c252f7e 100755
--- a/manager/tests/packaging/knot-resolver.sh
+++ b/tests/packaging/knot-resolver.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
# fail fast
set -e
diff --git a/manager/tests/packaging/kresctl.sh b/tests/packaging/kresctl.sh
index 579f1a10..aff02b6b 100755
--- a/manager/tests/packaging/kresctl.sh
+++ b/tests/packaging/kresctl.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
# fail fast
set -e
diff --git a/manager/tests/packaging/manpage.sh b/tests/packaging/manpage.sh
index c48d9048..c48d9048 100755
--- a/manager/tests/packaging/manpage.sh
+++ b/tests/packaging/manpage.sh
diff --git a/manager/tests/packaging/systemd_service.sh b/tests/packaging/systemd_service.sh
index c6ac826b..a4425385 100755
--- a/manager/tests/packaging/systemd_service.sh
+++ b/tests/packaging/systemd_service.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
# fail fast
set -e
@@ -9,6 +9,14 @@ if test "$(id -u)" -ne 0; then
exit 1
fi
+# SKIP test when systemd isn't PID 1
+if [[ -d /run/systemd/system ]] ; then
+ echo "systemd detected -> RUN systemd tests"
+else
+ echo "systemd not detected -> SKIP systemd tests"
+ exit 77
+fi
+
# We will be starting a systemd service, but another tests might do the same
# so this makes sure there is nothing left after we exit
trap "systemctl stop knot-resolver.service" EXIT
diff --git a/manager/shell-completion/client.bash b/utils/shell-completion/client.bash
index b3c19419..b3c19419 100644
--- a/manager/shell-completion/client.bash
+++ b/utils/shell-completion/client.bash
diff --git a/manager/shell-completion/client.fish b/utils/shell-completion/client.fish
index ec3a0ab7..ec3a0ab7 100644
--- a/manager/shell-completion/client.fish
+++ b/utils/shell-completion/client.fish
diff --git a/manager/shell-completion/meson.build b/utils/shell-completion/meson.build
index 6c35ffe3..6c35ffe3 100644
--- a/manager/shell-completion/meson.build
+++ b/utils/shell-completion/meson.build