summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore3
-rw-r--r--.gitlab-ci.yml311
-rw-r--r--Dockerfile99
-rw-r--r--NEWS16
-rw-r--r--ci/images/README.md8
-rwxr-xr-xci/images/build.sh12
-rw-r--r--ci/images/debian-11/Dockerfile10
-rw-r--r--ci/images/debian-buster/Dockerfile9
-rw-r--r--ci/images/manager/Dockerfile20
-rw-r--r--ci/pkgtest.yaml20
-rwxr-xr-xclient/.packaging/test.sh5
-rw-r--r--daemon/.packaging/centos/7/builddeps13
-rwxr-xr-xdaemon/.packaging/centos/7/pre-build.sh9
-rwxr-xr-xdaemon/.packaging/centos/7/pre-run.sh8
-rw-r--r--daemon/.packaging/centos/7/rundeps6
-rw-r--r--daemon/.packaging/centos/8/builddeps14
-rwxr-xr-xdaemon/.packaging/centos/8/pre-build.sh9
-rwxr-xr-xdaemon/.packaging/centos/8/pre-run.sh7
-rw-r--r--daemon/.packaging/centos/8/rundeps6
-rw-r--r--daemon/.packaging/debian/10/builddeps12
-rwxr-xr-xdaemon/.packaging/debian/10/pre-build.sh11
-rwxr-xr-xdaemon/.packaging/debian/10/pre-run.sh11
-rw-r--r--daemon/.packaging/debian/10/rundeps15
-rw-r--r--daemon/.packaging/debian/9/builddeps12
-rwxr-xr-xdaemon/.packaging/debian/9/pre-build.sh11
-rwxr-xr-xdaemon/.packaging/debian/9/pre-run.sh11
-rw-r--r--daemon/.packaging/debian/9/rundeps15
-rw-r--r--daemon/.packaging/fedora/31/builddeps14
-rwxr-xr-xdaemon/.packaging/fedora/31/pre-build.sh7
-rwxr-xr-xdaemon/.packaging/fedora/31/pre-run.sh6
-rw-r--r--daemon/.packaging/fedora/31/rundeps7
-rw-r--r--daemon/.packaging/fedora/32/builddeps14
-rwxr-xr-xdaemon/.packaging/fedora/32/pre-build.sh7
-rwxr-xr-xdaemon/.packaging/fedora/32/pre-run.sh6
-rw-r--r--daemon/.packaging/fedora/32/rundeps7
-rw-r--r--daemon/.packaging/leap/15.2/builddeps14
-rwxr-xr-xdaemon/.packaging/leap/15.2/pre-build.sh6
-rwxr-xr-xdaemon/.packaging/leap/15.2/pre-run.sh3
-rw-r--r--daemon/.packaging/leap/15.2/rundeps4
-rw-r--r--daemon/.packaging/leap/docker-image-name1
-rw-r--r--daemon/.packaging/test.config2
-rw-r--r--daemon/.packaging/ubuntu/16.04/builddeps16
-rwxr-xr-xdaemon/.packaging/ubuntu/16.04/pre-build.sh12
-rwxr-xr-xdaemon/.packaging/ubuntu/16.04/pre-run.sh12
-rw-r--r--daemon/.packaging/ubuntu/16.04/rundeps15
-rw-r--r--daemon/.packaging/ubuntu/18.04/builddeps16
-rwxr-xr-xdaemon/.packaging/ubuntu/18.04/pre-build.sh12
-rwxr-xr-xdaemon/.packaging/ubuntu/18.04/pre-run.sh12
-rw-r--r--daemon/.packaging/ubuntu/18.04/rundeps15
-rw-r--r--daemon/.packaging/ubuntu/20.04/builddeps16
-rwxr-xr-xdaemon/.packaging/ubuntu/20.04/pre-build.sh12
-rwxr-xr-xdaemon/.packaging/ubuntu/20.04/pre-run.sh12
-rw-r--r--daemon/.packaging/ubuntu/20.04/rundeps15
-rw-r--r--daemon/bindings/cache.c5
-rw-r--r--daemon/bindings/net.c2
-rw-r--r--daemon/lua/kres-gen-30.lua42
-rw-r--r--daemon/lua/kres-gen-31.lua42
-rw-r--r--daemon/lua/kres-gen-32.lua42
-rwxr-xr-xdaemon/lua/kres-gen.sh17
-rw-r--r--daemon/main.c21
-rw-r--r--daemon/scripting.rst2
-rw-r--r--distro/pkg/deb/clean2
-rw-r--r--distro/pkg/deb/control32
-rw-r--r--distro/pkg/deb/knot-resolver-doc.doc-base11
-rw-r--r--distro/pkg/deb/knot-resolver-doc.docs1
-rw-r--r--distro/pkg/deb/knot-resolver-doc.info2
-rw-r--r--distro/pkg/deb/knot-resolver-doc.links2
-rw-r--r--distro/pkg/deb/not-installed1
-rw-r--r--distro/pkg/deb/python3-knot-resolver-manager.install4
-rw-r--r--distro/pkg/deb/python3-knot-resolver-manager.manpages1
-rwxr-xr-xdistro/pkg/deb/rules14
-rw-r--r--distro/pkg/rpm/knot-resolver.spec88
-rw-r--r--distro/tests/.ansible.cfg8
-rw-r--r--distro/tests/README.md42
-rw-r--r--distro/tests/ansible-roles/knot_resolver/defaults/main.yaml6
-rw-r--r--distro/tests/ansible-roles/knot_resolver/tasks/configure_dnstap.yaml10
-rw-r--r--distro/tests/ansible-roles/knot_resolver/tasks/configure_doh.yaml10
-rw-r--r--distro/tests/ansible-roles/knot_resolver/tasks/configure_doh2.yaml8
-rw-r--r--distro/tests/ansible-roles/knot_resolver/tasks/main.yaml71
-rw-r--r--distro/tests/ansible-roles/knot_resolver/tasks/restart_kresd.yaml16
-rw-r--r--distro/tests/ansible-roles/knot_resolver/tasks/test_dnssec.yaml15
-rw-r--r--distro/tests/ansible-roles/knot_resolver/tasks/test_doh.yaml9
-rw-r--r--distro/tests/ansible-roles/knot_resolver/tasks/test_doh2.yaml24
-rw-r--r--distro/tests/ansible-roles/knot_resolver/tasks/test_kres_cache_gc.yaml4
-rw-r--r--distro/tests/ansible-roles/knot_resolver/tasks/test_tcp.yaml8
-rw-r--r--distro/tests/ansible-roles/knot_resolver/tasks/test_tls.yaml8
-rw-r--r--distro/tests/ansible-roles/knot_resolver/tasks/test_udp.yaml8
-rw-r--r--distro/tests/ansible-roles/knot_resolver/vars/CentOS.yaml6
-rw-r--r--distro/tests/ansible-roles/knot_resolver/vars/Debian.yaml6
-rw-r--r--distro/tests/ansible-roles/knot_resolver/vars/Fedora.yaml6
-rw-r--r--distro/tests/ansible-roles/knot_resolver/vars/Rocky.yaml6
-rw-r--r--distro/tests/ansible-roles/knot_resolver/vars/Ubuntu.yaml6
-rw-r--r--distro/tests/ansible-roles/knot_resolver/vars/openSUSE_Leap.yaml6
-rw-r--r--distro/tests/ansible-roles/knot_resolver/vars/openSUSE_Tumbleweed.yaml7
-rw-r--r--distro/tests/ansible-roles/obs_repos/defaults/main.yaml4
-rw-r--r--distro/tests/ansible-roles/obs_repos/tasks/CentOS.yaml18
-rw-r--r--distro/tests/ansible-roles/obs_repos/tasks/Debian.yaml15
-rw-r--r--distro/tests/ansible-roles/obs_repos/tasks/Fedora.yaml8
-rw-r--r--distro/tests/ansible-roles/obs_repos/tasks/Rocky.yaml13
-rw-r--r--distro/tests/ansible-roles/obs_repos/tasks/Ubuntu.yaml14
-rw-r--r--distro/tests/ansible-roles/obs_repos/tasks/main.yaml12
-rw-r--r--distro/tests/ansible-roles/obs_repos/tasks/openSUSE_Leap.yaml19
-rw-r--r--distro/tests/ansible-roles/obs_repos/tasks/openSUSE_Tumbleweed.yaml13
-rw-r--r--distro/tests/ansible-roles/obs_repos/vars/CentOS.yaml3
-rw-r--r--distro/tests/ansible-roles/obs_repos/vars/Debian_10.yaml3
l---------distro/tests/ansible-roles/obs_repos/vars/Debian_11.yaml1
-rw-r--r--distro/tests/ansible-roles/obs_repos/vars/Debian_9.yaml3
-rw-r--r--distro/tests/ansible-roles/obs_repos/vars/Fedora.yaml3
-rw-r--r--distro/tests/ansible-roles/obs_repos/vars/Rocky.yaml3
-rw-r--r--distro/tests/ansible-roles/obs_repos/vars/Ubuntu.yaml3
-rw-r--r--distro/tests/ansible-roles/obs_repos/vars/openSUSE_Leap.yaml3
-rw-r--r--distro/tests/ansible-roles/obs_repos/vars/openSUSE_Tumbleweed.yaml3
-rw-r--r--distro/tests/centos7/Vagrantfile30
l---------distro/tests/centos7/ansible.cfg1
-rw-r--r--distro/tests/debian10/Vagrantfile28
l---------distro/tests/debian10/ansible.cfg1
-rw-r--r--distro/tests/debian11/Vagrantfile27
l---------distro/tests/debian11/ansible.cfg1
-rw-r--r--distro/tests/debian9/Vagrantfile27
l---------distro/tests/debian9/ansible.cfg1
-rw-r--r--distro/tests/extra/all/control2
-rw-r--r--distro/tests/fedora35/Vagrantfile30
l---------distro/tests/fedora35/ansible.cfg1
-rw-r--r--distro/tests/fedora36/Vagrantfile30
l---------distro/tests/fedora36/ansible.cfg1
-rw-r--r--distro/tests/knot-resolver-pkgtest.yaml13
-rw-r--r--distro/tests/leap15/Vagrantfile29
l---------distro/tests/leap15/ansible.cfg1
-rw-r--r--distro/tests/repos.yaml4
-rw-r--r--distro/tests/rocky8/Vagrantfile30
l---------distro/tests/rocky8/ansible.cfg1
-rwxr-xr-xdistro/tests/test-distro.sh26
-rw-r--r--distro/tests/ubuntu1804/Vagrantfile30
l---------distro/tests/ubuntu1804/ansible.cfg1
-rw-r--r--distro/tests/ubuntu2004/Vagrantfile30
l---------distro/tests/ubuntu2004/ansible.cfg1
-rw-r--r--distro/tests/ubuntu2204/Vagrantfile30
l---------distro/tests/ubuntu2204/ansible.cfg1
-rw-r--r--doc/.packaging/centos/8/NOTSUPPORTED0
-rwxr-xr-xdoc/.packaging/debian/10/build.sh19
-rw-r--r--doc/.packaging/debian/10/builddeps4
-rwxr-xr-xdoc/.packaging/debian/10/install.sh3
-rwxr-xr-xdoc/.packaging/debian/9/build.sh19
-rw-r--r--doc/.packaging/debian/9/builddeps4
-rwxr-xr-xdoc/.packaging/debian/9/install.sh3
-rwxr-xr-xdoc/.packaging/fedora/31/build.sh20
-rw-r--r--doc/.packaging/fedora/31/builddeps4
-rwxr-xr-xdoc/.packaging/fedora/31/install.sh3
-rwxr-xr-xdoc/.packaging/fedora/32/30/build.sh20
-rw-r--r--doc/.packaging/fedora/32/30/builddeps4
-rwxr-xr-xdoc/.packaging/fedora/32/30/install.sh3
-rwxr-xr-xdoc/.packaging/fedora/32/build.sh20
-rw-r--r--doc/.packaging/fedora/32/builddeps4
-rwxr-xr-xdoc/.packaging/fedora/32/install.sh3
-rwxr-xr-xdoc/.packaging/leap/15.2/build.sh20
-rw-r--r--doc/.packaging/leap/15.2/builddeps4
-rwxr-xr-xdoc/.packaging/leap/15.2/install.sh3
-rwxr-xr-xdoc/.packaging/test.sh3
-rwxr-xr-xdoc/.packaging/ubuntu/16.04/build.sh19
-rw-r--r--doc/.packaging/ubuntu/16.04/builddeps4
-rwxr-xr-xdoc/.packaging/ubuntu/16.04/install.sh3
-rwxr-xr-xdoc/.packaging/ubuntu/18.04/build.sh19
-rw-r--r--doc/.packaging/ubuntu/18.04/builddeps4
-rwxr-xr-xdoc/.packaging/ubuntu/18.04/install.sh3
-rwxr-xr-xdoc/.packaging/ubuntu/20.04/build.sh19
-rw-r--r--doc/.packaging/ubuntu/20.04/builddeps4
-rwxr-xr-xdoc/.packaging/ubuntu/20.04/install.sh3
-rw-r--r--doc/README.md24
-rw-r--r--doc/_static/.gitignore0
-rw-r--r--doc/_static/package-lock.json2484
-rw-r--r--doc/_static/package.json5
-rw-r--r--doc/architecture-gc.rst12
-rw-r--r--doc/architecture-kresd.rst3
-rw-r--r--doc/architecture-manager.drawio1
-rw-r--r--doc/architecture-manager.rst57
-rw-r--r--doc/architecture-manager.svg3
-rw-r--r--doc/architecture-schema.drawio1
-rw-r--r--doc/architecture-schema.svg3
-rw-r--r--doc/architecture.rst48
-rw-r--r--doc/build.rst103
-rw-r--r--doc/conf.py15
-rw-r--r--doc/config-lua-overview.rst87
-rw-r--r--doc/config-lua.rst23
-rw-r--r--doc/config-overview.rst96
-rw-r--r--doc/config-schema.rst42
-rw-r--r--doc/deployment-advanced-no-manager.rst73
-rw-r--r--doc/deployment-advanced.rst9
-rw-r--r--doc/deployment-docker.rst21
-rw-r--r--doc/deployment-manual.rst15
-rw-r--r--doc/deployment-no-systemd-privileges.rst (renamed from doc/config-no-systemd-privileges.rst)2
-rw-r--r--doc/deployment-no-systemd-processes.rst (renamed from doc/config-no-systemd-processes.rst)2
-rw-r--r--doc/deployment-no-systemd.rst (renamed from doc/config-no-systemd.rst)14
-rw-r--r--doc/deployment-systemd.rst19
-rw-r--r--doc/deployment-warning.rst10
-rw-r--r--doc/gettingstarted-config.rst263
-rw-r--r--doc/gettingstarted-install.rst43
-rw-r--r--doc/gettingstarted-startup.rst (renamed from doc/quickstart-startup.rst)24
-rw-r--r--doc/index.rst62
-rw-r--r--doc/kresctl.8.in67
-rw-r--r--doc/manager-api.rst111
-rw-r--r--doc/manager-client.rst202
-rw-r--r--doc/manager-dev.rst115
-rw-r--r--doc/manager-kresctl.rst20
-rw-r--r--doc/manager-no-systemd.rst7
-rw-r--r--doc/meson.build44
-rw-r--r--doc/quickstart-config.rst209
-rw-r--r--doc/quickstart-install.rst73
-rw-r--r--doc/requirements.txt4
-rw-r--r--doc/upgrading-to-6.rst41
-rw-r--r--doc/upgrading.rst5
-rw-r--r--doc/usecase-internal-resolver.rst24
-rw-r--r--doc/usecase-isp-resolver.rst7
-rw-r--r--doc/usecase-network-interfaces.rst42
-rw-r--r--doc/usecase-personal-resolver.rst22
-rw-r--r--etc/meson.build2
-rw-r--r--lib/cache/api.c22
-rw-r--r--lib/cache/api.h5
-rw-r--r--lib/cache/cdb_api.h16
-rw-r--r--lib/cache/cdb_lmdb.c151
-rw-r--r--lib/cache/cdb_lmdb.h1
-rw-r--r--lib/cache/entry_pkt.c9
-rw-r--r--lib/cache/entry_rr.c5
-rw-r--r--lib/cache/impl.h11
-rw-r--r--lib/cache/knot_pkt.c8
-rw-r--r--lib/cache/peek.c40
-rw-r--r--lib/layer/iterate.c23
-rw-r--r--lib/log.c1
-rw-r--r--lib/log.h6
-rw-r--r--lib/meson.build9
-rw-r--r--lib/resolve-impl.h52
-rw-r--r--lib/resolve-produce.c728
-rw-r--r--lib/resolve.c796
-rw-r--r--lib/resolve.h10
-rw-r--r--lib/rplan.h22
-rw-r--r--lib/rules/api.c949
-rw-r--r--lib/rules/api.h159
-rw-r--r--lib/rules/defaults.c210
-rw-r--r--lib/rules/forward.c168
-rw-r--r--lib/rules/impl.h101
-rw-r--r--lib/rules/zonefile.c272
-rw-r--r--lib/selection.c18
-rw-r--r--lib/selection.h6
-rw-r--r--lib/selection_forward.c4
-rw-r--r--manager/.dockerignore8
-rw-r--r--manager/.flake83
-rw-r--r--manager/.gitignore18
-rw-r--r--manager/.gitlab-ci.yml65
-rw-r--r--manager/.python-version4
-rw-r--r--manager/ARCHITECTURE.md41
-rw-r--r--manager/ERROR_HANDLING.md60
-rw-r--r--manager/README.md85
-rw-r--r--manager/build.py16
-rw-r--r--manager/etc/knot-resolver/.gitignore2
-rw-r--r--manager/etc/knot-resolver/config.dev.yml63
-rw-r--r--manager/etc/knot-resolver/config.docker.yml12
-rw-r--r--manager/etc/knot-resolver/config.yml11
-rw-r--r--manager/knot_resolver_manager/__init__.py1
-rw-r--r--manager/knot_resolver_manager/__main__.py15
-rw-r--r--manager/knot_resolver_manager/cli/__init__.py5
-rw-r--r--manager/knot_resolver_manager/cli/__main__.py4
-rw-r--r--manager/knot_resolver_manager/cli/cmd/completion.py95
-rw-r--r--manager/knot_resolver_manager/cli/cmd/config.py255
-rw-r--r--manager/knot_resolver_manager/cli/cmd/convert.py74
-rw-r--r--manager/knot_resolver_manager/cli/cmd/help.py24
-rw-r--r--manager/knot_resolver_manager/cli/cmd/metrics.py45
-rw-r--r--manager/knot_resolver_manager/cli/cmd/reload.py36
-rw-r--r--manager/knot_resolver_manager/cli/cmd/schema.py55
-rw-r--r--manager/knot_resolver_manager/cli/cmd/stop.py32
-rw-r--r--manager/knot_resolver_manager/cli/cmd/validate.py63
-rw-r--r--manager/knot_resolver_manager/cli/command.py66
-rw-r--r--manager/knot_resolver_manager/cli/kresctl.py49
-rw-r--r--manager/knot_resolver_manager/cli/main.py56
-rw-r--r--manager/knot_resolver_manager/compat/__init__.py3
-rw-r--r--manager/knot_resolver_manager/compat/asyncio.py137
-rw-r--r--manager/knot_resolver_manager/compat/dataclasses.py69
-rw-r--r--manager/knot_resolver_manager/config_store.py77
-rw-r--r--manager/knot_resolver_manager/constants.py86
-rw-r--r--manager/knot_resolver_manager/datamodel/__init__.py3
-rw-r--r--manager/knot_resolver_manager/datamodel/cache_schema.py82
-rw-r--r--manager/knot_resolver_manager/datamodel/config_schema.py209
-rw-r--r--manager/knot_resolver_manager/datamodel/design-notes.yml237
-rw-r--r--manager/knot_resolver_manager/datamodel/dns64_schema.py13
-rw-r--r--manager/knot_resolver_manager/datamodel/dnssec_schema.py45
-rw-r--r--manager/knot_resolver_manager/datamodel/forward_schema.py57
-rw-r--r--manager/knot_resolver_manager/datamodel/globals.py57
-rw-r--r--manager/knot_resolver_manager/datamodel/local_data_schema.py82
-rw-r--r--manager/knot_resolver_manager/datamodel/logging_schema.py150
-rw-r--r--manager/knot_resolver_manager/datamodel/lua_schema.py22
-rw-r--r--manager/knot_resolver_manager/datamodel/management_schema.py21
-rw-r--r--manager/knot_resolver_manager/datamodel/monitoring_schema.py25
-rw-r--r--manager/knot_resolver_manager/datamodel/network_schema.py179
-rw-r--r--manager/knot_resolver_manager/datamodel/options_schema.py75
-rw-r--r--manager/knot_resolver_manager/datamodel/policy_schema.py126
-rw-r--r--manager/knot_resolver_manager/datamodel/rpz_schema.py29
-rw-r--r--manager/knot_resolver_manager/datamodel/slice_schema.py21
-rw-r--r--manager/knot_resolver_manager/datamodel/static_hints_schema.py27
-rw-r--r--manager/knot_resolver_manager/datamodel/stub_zone_schema.py32
-rw-r--r--manager/knot_resolver_manager/datamodel/templates/cache.lua.j218
-rw-r--r--manager/knot_resolver_manager/datamodel/templates/config.lua.j270
-rw-r--r--manager/knot_resolver_manager/datamodel/templates/dns64.lua.j27
-rw-r--r--manager/knot_resolver_manager/datamodel/templates/dnssec.lua.j258
-rw-r--r--manager/knot_resolver_manager/datamodel/templates/forward.lua.j27
-rw-r--r--manager/knot_resolver_manager/datamodel/templates/forward_zones.lua.j272
-rw-r--r--manager/knot_resolver_manager/datamodel/templates/local_data.lua.j251
-rw-r--r--manager/knot_resolver_manager/datamodel/templates/logging.lua.j243
-rw-r--r--manager/knot_resolver_manager/datamodel/templates/macros/common_macros.lua.j2101
-rw-r--r--manager/knot_resolver_manager/datamodel/templates/macros/forward_macros.lua.j242
-rw-r--r--manager/knot_resolver_manager/datamodel/templates/macros/local_data_macros.lua.j275
-rw-r--r--manager/knot_resolver_manager/datamodel/templates/macros/network_macros.lua.j255
-rw-r--r--manager/knot_resolver_manager/datamodel/templates/macros/policy_macros.lua.j2275
-rw-r--r--manager/knot_resolver_manager/datamodel/templates/macros/view_macros.lua.j222
-rw-r--r--manager/knot_resolver_manager/datamodel/templates/monitoring.lua.j233
-rw-r--r--manager/knot_resolver_manager/datamodel/templates/network.lua.j2102
-rw-r--r--manager/knot_resolver_manager/datamodel/templates/options.lua.j252
-rw-r--r--manager/knot_resolver_manager/datamodel/templates/policy.lua.j262
-rw-r--r--manager/knot_resolver_manager/datamodel/templates/rpz.lua.j257
-rw-r--r--manager/knot_resolver_manager/datamodel/templates/slices.lua.j2 (renamed from doc/.packaging/centos/7/NOTSUPPORTED)0
-rw-r--r--manager/knot_resolver_manager/datamodel/templates/static_hints.lua.j251
-rw-r--r--manager/knot_resolver_manager/datamodel/templates/stub_zones.lua.j258
-rw-r--r--manager/knot_resolver_manager/datamodel/templates/views.lua.j222
-rw-r--r--manager/knot_resolver_manager/datamodel/templates/webmgmt.lua.j225
-rw-r--r--manager/knot_resolver_manager/datamodel/types/__init__.py56
-rw-r--r--manager/knot_resolver_manager/datamodel/types/base_types.py193
-rw-r--r--manager/knot_resolver_manager/datamodel/types/enums.py153
-rw-r--r--manager/knot_resolver_manager/datamodel/types/files.py138
-rw-r--r--manager/knot_resolver_manager/datamodel/types/generic_types.py33
-rw-r--r--manager/knot_resolver_manager/datamodel/types/types.py411
-rw-r--r--manager/knot_resolver_manager/datamodel/view_schema.py40
-rw-r--r--manager/knot_resolver_manager/datamodel/webmgmt_schema.py27
-rw-r--r--manager/knot_resolver_manager/exceptions.py28
-rw-r--r--manager/knot_resolver_manager/kres_manager.py306
-rw-r--r--manager/knot_resolver_manager/kresd_controller/__init__.py93
-rw-r--r--manager/knot_resolver_manager/kresd_controller/interface.py251
-rw-r--r--manager/knot_resolver_manager/kresd_controller/supervisord/__init__.py269
-rw-r--r--manager/knot_resolver_manager/kresd_controller/supervisord/config_file.py182
-rw-r--r--manager/knot_resolver_manager/kresd_controller/supervisord/plugin/fast_rpcinterface.py173
-rw-r--r--manager/knot_resolver_manager/kresd_controller/supervisord/plugin/manager_integration.py85
-rw-r--r--manager/knot_resolver_manager/kresd_controller/supervisord/plugin/notifymodule.c176
-rw-r--r--manager/knot_resolver_manager/kresd_controller/supervisord/plugin/patch_logger.py97
-rw-r--r--manager/knot_resolver_manager/kresd_controller/supervisord/plugin/sd_notify.py214
-rw-r--r--manager/knot_resolver_manager/kresd_controller/supervisord/supervisord.conf.j280
-rw-r--r--manager/knot_resolver_manager/log.py105
-rw-r--r--manager/knot_resolver_manager/main.py42
-rw-r--r--manager/knot_resolver_manager/server.py596
-rw-r--r--manager/knot_resolver_manager/statistics.py413
-rw-r--r--manager/knot_resolver_manager/utils/__init__.py45
-rw-r--r--manager/knot_resolver_manager/utils/async_utils.py129
-rw-r--r--manager/knot_resolver_manager/utils/custom_atexit.py20
-rw-r--r--manager/knot_resolver_manager/utils/etag.py10
-rw-r--r--manager/knot_resolver_manager/utils/functional.py72
-rw-r--r--manager/knot_resolver_manager/utils/modeling/README.md155
-rw-r--r--manager/knot_resolver_manager/utils/modeling/__init__.py14
-rw-r--r--manager/knot_resolver_manager/utils/modeling/base_generic_type_wrapper.py9
-rw-r--r--manager/knot_resolver_manager/utils/modeling/base_schema.py808
-rw-r--r--manager/knot_resolver_manager/utils/modeling/base_value_type.py45
-rw-r--r--manager/knot_resolver_manager/utils/modeling/exceptions.py56
-rw-r--r--manager/knot_resolver_manager/utils/modeling/json_pointer.py89
-rw-r--r--manager/knot_resolver_manager/utils/modeling/parsing.py91
-rw-r--r--manager/knot_resolver_manager/utils/modeling/query.py183
-rw-r--r--manager/knot_resolver_manager/utils/modeling/renaming.py90
-rw-r--r--manager/knot_resolver_manager/utils/modeling/types.py105
-rw-r--r--manager/knot_resolver_manager/utils/requests.py88
-rw-r--r--manager/knot_resolver_manager/utils/systemd_notify.py54
-rw-r--r--manager/knot_resolver_manager/utils/which.py22
-rw-r--r--manager/meson.build37
-rwxr-xr-xmanager/poe3
-rw-r--r--manager/poetry.lock2945
-rw-r--r--manager/pyproject.toml180
-rw-r--r--manager/scripts/_env.sh52
-rwxr-xr-xmanager/scripts/codecheck77
-rwxr-xr-xmanager/scripts/commit18
-rwxr-xr-xmanager/scripts/configure-vscode55
-rwxr-xr-xmanager/scripts/container.py284
-rw-r--r--manager/scripts/create_setup.py66
-rw-r--r--manager/scripts/install.sh11
-rw-r--r--manager/scripts/make-package.sh71
-rwxr-xr-xmanager/scripts/man9
-rwxr-xr-xmanager/scripts/run29
-rwxr-xr-xmanager/scripts/run-debug13
-rw-r--r--manager/setup.py55
-rw-r--r--manager/shell-completion/client.bash33
-rw-r--r--manager/shell-completion/client.fish6
-rw-r--r--manager/shell-completion/meson.build13
-rw-r--r--manager/tests/README.md9
-rw-r--r--manager/tests/integration/.gitignore2
-rw-r--r--manager/tests/integration/config.yml13
-rw-r--r--manager/tests/integration/runner.py96
-rw-r--r--manager/tests/packaging/control41
-rwxr-xr-xmanager/tests/packaging/dependencies.py33
-rwxr-xr-xmanager/tests/packaging/interactive/etag.sh10
-rwxr-xr-xmanager/tests/packaging/interactive/metrics.sh3
-rwxr-xr-xmanager/tests/packaging/interactive/reload.sh5
-rwxr-xr-xmanager/tests/packaging/interactive/workers.sh7
-rwxr-xr-xmanager/tests/packaging/knot-resolver.sh7
-rwxr-xr-xmanager/tests/packaging/kresctl.sh7
-rwxr-xr-xmanager/tests/packaging/manpage.sh7
-rwxr-xr-xmanager/tests/packaging/systemd_service.sh32
-rw-r--r--manager/tests/unit/__init__.py5
-rw-r--r--manager/tests/unit/datamodel/templates/test_common_macros.py82
-rw-r--r--manager/tests/unit/datamodel/templates/test_forward_macros.py27
-rw-r--r--manager/tests/unit/datamodel/templates/test_network_macros.py35
-rw-r--r--manager/tests/unit/datamodel/templates/test_policy_macros.py132
-rw-r--r--manager/tests/unit/datamodel/templates/test_view_macros.py53
-rw-r--r--manager/tests/unit/datamodel/test_config_schema.py54
-rw-r--r--manager/tests/unit/datamodel/test_local_data.py33
-rw-r--r--manager/tests/unit/datamodel/test_lua_schema.py9
-rw-r--r--manager/tests/unit/datamodel/test_management_schema.py21
-rw-r--r--manager/tests/unit/datamodel/test_network_schema.py79
-rw-r--r--manager/tests/unit/datamodel/test_options_schema.py7
-rw-r--r--manager/tests/unit/datamodel/test_policy_schema.py89
-rw-r--r--manager/tests/unit/datamodel/test_rpz_schema.py23
-rw-r--r--manager/tests/unit/datamodel/types/test_base_types.py38
-rw-r--r--manager/tests/unit/datamodel/types/test_custom_types.py252
-rw-r--r--manager/tests/unit/datamodel/types/test_generic_types.py56
-rw-r--r--manager/tests/unit/test_config_store.py31
-rw-r--r--manager/tests/unit/test_knot_resolver_manager.py5
-rw-r--r--manager/tests/unit/utils/modeling/test_base_schema.py205
-rw-r--r--manager/tests/unit/utils/modeling/test_etag.py15
-rw-r--r--manager/tests/unit/utils/modeling/test_json_pointer.py72
-rw-r--r--manager/tests/unit/utils/modeling/test_query.py18
-rw-r--r--manager/tests/unit/utils/modeling/test_renaming.py24
-rw-r--r--manager/tests/unit/utils/modeling/test_types.py38
-rw-r--r--manager/tests/unit/utils/test_dataclasses.py15
-rw-r--r--manager/tests/unit/utils/test_functional.py22
-rw-r--r--manager/typings/READMEmd8
-rw-r--r--manager/typings/pytest/__init__.pyi36
-rw-r--r--manager/typings/pytest/__main__.pyi9
-rw-r--r--manager/typings/supervisor/__init__.pyi3
-rw-r--r--manager/typings/supervisor/childutils.pyi51
-rw-r--r--manager/typings/supervisor/compat.pyi39
-rw-r--r--manager/typings/supervisor/confecho.pyi6
-rw-r--r--manager/typings/supervisor/datatypes.pyi199
-rw-r--r--manager/typings/supervisor/dispatchers.pyi158
-rw-r--r--manager/typings/supervisor/events.pyi227
-rw-r--r--manager/typings/supervisor/http.pyi216
-rw-r--r--manager/typings/supervisor/http_client.pyi84
-rw-r--r--manager/typings/supervisor/loggers.pyi233
-rw-r--r--manager/typings/supervisor/medusa/__init__.pyi7
-rw-r--r--manager/typings/supervisor/medusa/asynchat_25.pyi117
-rw-r--r--manager/typings/supervisor/medusa/asyncore_25.pyi195
-rw-r--r--manager/typings/supervisor/medusa/auth_handler.pyi42
-rw-r--r--manager/typings/supervisor/medusa/counter.pyi27
-rw-r--r--manager/typings/supervisor/medusa/default_handler.pyi41
-rw-r--r--manager/typings/supervisor/medusa/filesys.pyi176
-rw-r--r--manager/typings/supervisor/medusa/http_date.pyi37
-rw-r--r--manager/typings/supervisor/medusa/http_server.pyi196
-rw-r--r--manager/typings/supervisor/medusa/logger.pyi122
-rw-r--r--manager/typings/supervisor/medusa/producers.pyi155
-rw-r--r--manager/typings/supervisor/medusa/util.pyi18
-rw-r--r--manager/typings/supervisor/medusa/xmlrpc_handler.pyi42
-rw-r--r--manager/typings/supervisor/options.pyi527
-rw-r--r--manager/typings/supervisor/pidproxy.pyi33
-rw-r--r--manager/typings/supervisor/poller.pyi127
-rw-r--r--manager/typings/supervisor/process.pyi224
-rw-r--r--manager/typings/supervisor/rpcinterface.pyi336
-rw-r--r--manager/typings/supervisor/socket_manager.pyi59
-rw-r--r--manager/typings/supervisor/states.pyi44
-rw-r--r--manager/typings/supervisor/supervisorctl.pyi280
-rw-r--r--manager/typings/supervisor/supervisord.pyi102
-rw-r--r--manager/typings/supervisor/templating.pyi476
-rw-r--r--manager/typings/supervisor/tests/__init__.pyi3
-rw-r--r--manager/typings/supervisor/web.pyi87
-rw-r--r--manager/typings/supervisor/xmlrpc.pyi174
-rw-r--r--meson.build7
-rw-r--r--meson_options.txt12
-rw-r--r--modules/bogus_log/.packaging/test.config4
-rw-r--r--modules/daf/.packaging/test.config4
-rw-r--r--modules/daf/daf.test.lua4
-rw-r--r--modules/detect_time_jump/.packaging/test.config4
-rw-r--r--modules/detect_time_skew/.packaging/test.config4
-rw-r--r--modules/dns64/.packaging/test.config4
-rw-r--r--modules/dns64/dns64.test.lua2
-rw-r--r--modules/dnstap/.packaging/centos/7/builddeps3
-rw-r--r--modules/dnstap/.packaging/centos/7/rundeps2
-rw-r--r--modules/dnstap/.packaging/centos/8/builddeps3
-rw-r--r--modules/dnstap/.packaging/centos/8/rundeps2
-rw-r--r--modules/dnstap/.packaging/debian/10/builddeps3
-rw-r--r--modules/dnstap/.packaging/debian/10/rundeps2
-rw-r--r--modules/dnstap/.packaging/debian/9/builddeps3
-rw-r--r--modules/dnstap/.packaging/debian/9/rundeps2
-rw-r--r--modules/dnstap/.packaging/fedora/31/builddeps3
-rw-r--r--modules/dnstap/.packaging/fedora/31/rundeps2
-rw-r--r--modules/dnstap/.packaging/fedora/32/builddeps3
-rw-r--r--modules/dnstap/.packaging/fedora/32/rundeps2
-rw-r--r--modules/dnstap/.packaging/leap/15.2/builddeps3
-rw-r--r--modules/dnstap/.packaging/leap/15.2/rundeps2
-rw-r--r--modules/dnstap/.packaging/test.config4
-rw-r--r--modules/dnstap/.packaging/ubuntu/16.04/builddeps3
-rw-r--r--modules/dnstap/.packaging/ubuntu/16.04/rundeps2
-rw-r--r--modules/dnstap/.packaging/ubuntu/18.04/builddeps3
-rw-r--r--modules/dnstap/.packaging/ubuntu/18.04/rundeps2
-rw-r--r--modules/dnstap/.packaging/ubuntu/20.04/builddeps3
-rw-r--r--modules/dnstap/.packaging/ubuntu/20.04/rundeps2
-rw-r--r--modules/edns_keepalive/.packaging/test.config10
-rwxr-xr-xmodules/etcd/.packaging/centos/7/pre-test.sh1
-rw-r--r--modules/etcd/.packaging/centos/7/rundeps6
-rw-r--r--modules/etcd/.packaging/centos/8/NOTSUPPORTED0
-rwxr-xr-xmodules/etcd/.packaging/debian/10/pre-test.sh1
-rw-r--r--modules/etcd/.packaging/debian/10/rundeps4
-rwxr-xr-xmodules/etcd/.packaging/debian/9/pre-test.sh1
-rw-r--r--modules/etcd/.packaging/debian/9/rundeps4
-rw-r--r--modules/etcd/.packaging/fedora/31/NOTSUPPORTED16
-rw-r--r--modules/etcd/.packaging/fedora/32/NOTSUPPORTED16
-rwxr-xr-xmodules/etcd/.packaging/leap/15.2/pre-test.sh1
-rw-r--r--modules/etcd/.packaging/leap/15.2/rundeps6
-rw-r--r--modules/etcd/.packaging/test.config4
-rwxr-xr-xmodules/etcd/.packaging/ubuntu/16.04/pre-test.sh1
-rw-r--r--modules/etcd/.packaging/ubuntu/16.04/rundeps3
-rwxr-xr-xmodules/etcd/.packaging/ubuntu/18.04/pre-test.sh1
-rw-r--r--modules/etcd/.packaging/ubuntu/18.04/rundeps3
-rwxr-xr-xmodules/etcd/.packaging/ubuntu/20.04/pre-test.sh1
-rw-r--r--modules/etcd/.packaging/ubuntu/20.04/rundeps4
-rw-r--r--modules/experimental_dot_auth/.packaging/centos/7/rundeps1
-rw-r--r--modules/experimental_dot_auth/.packaging/centos/8/rundeps1
-rw-r--r--modules/experimental_dot_auth/.packaging/debian/10/rundeps1
-rw-r--r--modules/experimental_dot_auth/.packaging/debian/9/rundeps1
-rw-r--r--modules/experimental_dot_auth/.packaging/fedora/31/rundeps1
-rw-r--r--modules/experimental_dot_auth/.packaging/fedora/32/rundeps1
-rw-r--r--modules/experimental_dot_auth/.packaging/leap/15.2/NOTSUPPORTED6
-rwxr-xr-xmodules/experimental_dot_auth/.packaging/leap/15.2/pre-test.sh1
-rw-r--r--modules/experimental_dot_auth/.packaging/leap/15.2/rundeps4
-rw-r--r--modules/experimental_dot_auth/.packaging/test.config4
-rw-r--r--modules/experimental_dot_auth/.packaging/ubuntu/16.04/NOTSUPPORTED0
-rw-r--r--modules/experimental_dot_auth/.packaging/ubuntu/18.04/rundeps1
-rw-r--r--modules/experimental_dot_auth/.packaging/ubuntu/20.04/rundeps1
-rw-r--r--modules/graphite/.packaging/centos/7/rundeps1
-rw-r--r--modules/graphite/.packaging/centos/8/rundeps1
-rw-r--r--modules/graphite/.packaging/debian/10/rundeps1
-rw-r--r--modules/graphite/.packaging/debian/9/rundeps1
-rw-r--r--modules/graphite/.packaging/fedora/31/rundeps1
-rw-r--r--modules/graphite/.packaging/fedora/32/rundeps1
-rw-r--r--modules/graphite/.packaging/leap/15.2/NOTSUPPORTED6
-rwxr-xr-xmodules/graphite/.packaging/leap/15.2/pre-test.sh1
-rw-r--r--modules/graphite/.packaging/leap/15.2/rundeps6
-rw-r--r--modules/graphite/.packaging/test.config4
-rw-r--r--modules/graphite/.packaging/ubuntu/16.04/rundeps1
-rw-r--r--modules/graphite/.packaging/ubuntu/18.04/rundeps1
-rw-r--r--modules/graphite/.packaging/ubuntu/20.04/rundeps1
-rw-r--r--modules/hints/.packaging/test.config4
-rw-r--r--modules/hints/README.rst12
-rw-r--r--modules/hints/hints.c361
-rw-r--r--modules/http/.packaging/centos/7/rundeps1
-rw-r--r--modules/http/.packaging/centos/8/rundeps1
-rw-r--r--modules/http/.packaging/debian/10/rundeps1
-rw-r--r--modules/http/.packaging/debian/9/rundeps1
-rw-r--r--modules/http/.packaging/fedora/31/rundeps1
-rw-r--r--modules/http/.packaging/fedora/32/rundeps1
-rw-r--r--modules/http/.packaging/leap/15.2/NOTSUPPORTED5
-rwxr-xr-xmodules/http/.packaging/leap/15.2/pre-test.sh1
-rw-r--r--modules/http/.packaging/leap/15.2/rundeps7
-rw-r--r--modules/http/.packaging/test.config4
-rw-r--r--modules/http/.packaging/ubuntu/16.04/NOTSUPPORTED0
-rw-r--r--modules/http/.packaging/ubuntu/18.04/rundeps1
-rw-r--r--modules/http/.packaging/ubuntu/20.04/rundeps1
-rw-r--r--modules/nsid/.packaging/test.config4
-rw-r--r--modules/policy/.packaging/test.config4
-rw-r--r--modules/policy/policy.lua332
-rw-r--r--modules/predict/.packaging/test.config4
-rw-r--r--modules/prefill/.packaging/test.config4
-rw-r--r--modules/priming/.packaging/test.config4
-rw-r--r--modules/rebinding/.packaging/test.config4
-rw-r--r--modules/refuse_nord/.packaging/test.config3
-rw-r--r--modules/renumber/.packaging/test.config4
-rw-r--r--modules/serve_stale/.packaging/test.config4
-rw-r--r--modules/stats/.packaging/test.config4
-rw-r--r--modules/stats/README.rst2
-rw-r--r--modules/stats/stats.c2
-rw-r--r--modules/stats/test.integr/kresd_config.j21
-rw-r--r--modules/ta_sentinel/.packaging/test.config4
-rw-r--r--modules/ta_signal_query/.packaging/test.config4
-rw-r--r--modules/ta_update/.packaging/test.config4
-rw-r--r--modules/view/.packaging/test.config4
-rw-r--r--modules/watchdog/.packaging/test.config4
-rw-r--r--modules/workarounds/.packaging/test.config4
-rwxr-xr-xscripts/enable-repo.py132
-rwxr-xr-xscripts/make-doc.sh14
-rw-r--r--systemd/knot-resolver.service.in22
-rw-r--r--systemd/meson.build7
-rw-r--r--tests/dnstap/src/dnstap-test/go.mod9
-rw-r--r--tests/dnstap/src/dnstap-test/go.sum44
-rw-r--r--tests/dnstap/src/dnstap-test/main.go2
-rwxr-xr-xtests/dnstap/src/dnstap-test/run.sh9
-rw-r--r--tests/dnstap/src/dnstap-test/vendor/manifest55
m---------tests/integration/deckard0
-rw-r--r--tests/packaging/README.rst87
-rw-r--r--tests/packaging/conftest.py10
-rw-r--r--tests/packaging/test_packaging.py494
-rw-r--r--tests/pytests/pylintrc1
-rw-r--r--tests/pytests/test_random_close.py2
-rw-r--r--utils/cache_gc/db.c6
590 files changed, 29299 insertions, 4631 deletions
diff --git a/.gitignore b/.gitignore
index 9f27fe44..fd217593 100644
--- a/.gitignore
+++ b/.gitignore
@@ -51,6 +51,9 @@
/doc/html
/doc/kresd.8
/doc/texinfo
+/doc/_static/config.schema.json
+/doc/_static/schema_doc*
+/doc/config-schema-body.md
/ephemeral_key.pem
/install-sh
/libkres.pc
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 6d8e769d..69371e31 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -31,7 +31,7 @@ stages:
- test
- respdiff
- deploy
- - pkgtest
+ - pkg
# https://docs.gitlab.com/ce/ci/jobs/job_control.html#select-different-runner-tags-for-each-parallel-matrix-job
.multi_platform: &multi_platform
@@ -109,7 +109,7 @@ build:
- meson build_ci --default-library=static --prefix=$PREFIX -Dmalloc=disabled -Dwerror=true -Dextra_tests=enabled
- ninja -C build_ci
- ninja -C build_ci install >/dev/null
- - ${MESON_TEST} --suite unit --suite config --no-suite snowflake
+ - ${MESON_TEST} --suite unit --suite config --suite dnstap --no-suite snowflake
build-knot32:
<<: *build
@@ -118,7 +118,7 @@ build-knot32:
- meson build_ci_knot32 --default-library=static --prefix=$PREFIX -Dmalloc=disabled -Dwerror=true -Dextra_tests=enabled
- ninja -C build_ci_knot32
- ninja -C build_ci_knot32 install >/dev/null
- - ${MESON_TEST} --suite unit --suite config --no-suite snowflake
+ - ${MESON_TEST} --suite unit --suite config --suite dnstap --no-suite snowflake
build-asan:
<<: *build
@@ -129,7 +129,7 @@ build-asan:
- ninja -C build_ci_asan install >/dev/null
# TODO _leaks: not sure what exactly is wrong in leak detection on config tests
# TODO skip_asan: all three of these disappear locally when using gcc 9.1 (except some leaks)
- - MESON_TESTTHREADS=1 ASAN_OPTIONS=detect_leaks=0 ${MESON_TEST} --suite unit --suite config --no-suite skip_asan --no-suite snowflake
+ - MESON_TESTTHREADS=1 ASAN_OPTIONS=detect_leaks=0 ${MESON_TEST} --suite unit --suite config --suite dnstap --no-suite skip_asan --no-suite snowflake
build:macOS:
<<: *nodep
@@ -157,7 +157,7 @@ docker:
DOCKER_IMAGE_NAME: knot-resolver-test:${CI_COMMIT_SHA}
script:
- docker build --no-cache -t ${DOCKER_IMAGE_NAME} .
- - echo "quit()" | docker run -i ${DOCKER_IMAGE_NAME}
+ # TODO: perhaps try if the running image answers queries
after_script: # remove dangling images to avoid running out of disk space
- docker rmi ${DOCKER_IMAGE_NAME}
- docker rmi $(docker images -f "dangling=true" -q)
@@ -209,16 +209,6 @@ trivial_checks: # aggregated to save some processing
- ci/no_assert_check.sh
- ci/deckard_commit_check.sh
-doc:
- <<: *sanity
- script:
- - meson build_doc -Ddoc=enabled
- - ninja -C build_doc doc-strict
- artifacts:
- expire_in: 6 hour
- paths:
- - doc/html
-
lint:other:
<<: *sanity
script:
@@ -253,7 +243,7 @@ lint:scan-build:
script:
- export SCANBUILD="$(realpath ./scripts/run-scanbuild-with-args.sh)"
- ninja -C build_ci* scan-build || true
- - test "$(ls build_ci*/meson-logs/scanbuild/*/report-*.html | wc -l)" = 20 # we have this many errors ATM :-)
+ - test "$(ls build_ci*/meson-logs/scanbuild/*/report-*.html | wc -l)" = 23 # we have this many errors ATM :-)
lint:tidy:
<<: *after_build
@@ -362,15 +352,12 @@ test:valgrind:
- ${MESON_TEST} --suite unit --suite config --no-suite snowflake --wrap="valgrind --leak-check=full --trace-children=yes --quiet --suppressions=/lj.supp"
- MESON_TESTTHREADS=1 ${MESON_TEST} --wrap="valgrind --leak-check=full --trace-children=yes --quiet --suppressions=/lj.supp" --suite snowflake
-pkgtest:
+manager:
stage: test
+ needs: []
trigger:
- include: ci/pkgtest.yaml
+ include: manager/.gitlab-ci.yml
strategy: depend
- needs:
- - build
- variables: # https://gitlab.nic.cz/help/ci/yaml/README.md#artifact-downloads-to-child-pipelines
- PARENT_PIPELINE_ID: $CI_PIPELINE_ID
except:
refs:
- master@knot/knot-resolver
@@ -565,7 +552,7 @@ obs:release:
obs:odvr:
<<: *obs_trigger
- stage: pkgtest # last stage to ensure it doesn't block anything
+ stage: pkg # last stage to ensure it doesn't block anything
only:
- tags
variables:
@@ -573,197 +560,103 @@ obs:odvr:
when: manual
# }}}
-# pkgtest {{{
-.deploytest: &deploytest
- stage: pkgtest
- only:
- variables:
- - $OBS_REPO =~ /^knot-resolver-devel|knot-dns-devel|knot-resolver-testing$/
- - $CI_COMMIT_TAG
- dependencies: [] # wait for previous stages to finish
- variables:
- OBS_REPO: knot-resolver-latest
- when: delayed
- start_in: 3 minutes # give OBS build some time
- tags:
- - condor
+# pkg {{{
+.pkg_deb_extras: &pkg_deb_extras
+ before_script:
+ - apt update
-obs:build:all:
- <<: *deploytest
- only:
- variables:
- - $OBS_REPO =~ /^knot-resolver-devel|knot-dns-devel|knot-resolver-testing|knot-resolver-odvr$/
- - $CI_COMMIT_TAG
- allow_failure: true
- script:
- - "osc results home:CZ-NIC:$OBS_REPO knot-resolver -w"
- - version=$(sed 's/^v//' <(git describe --exact-match HEAD || git rev-parse --short HEAD) )
- - > # check version only for one (reliable) repo to avoid false negatives
- ! osc ls -b home:CZ-NIC:$OBS_REPO knot-resolver Debian_9.0 x86_64 | \
- grep -E '(rpm|deb|tar\.xz)$' | grep -v $version || \
- (echo "ERROR: version mismatch"; exit 1)
- - >
- ! osc results home:CZ-NIC:$OBS_REPO knot-resolver --csv | \
- grep -Ev 'disabled|excluded|Rawhide|CentOS_8_EPEL' | grep -v 'succeeded' -q || \
- (echo "ERROR: build(s) failed"; exit 1)
-
-.distrotest: &distrotest
- <<: *deploytest
- # Description of the distrotest script workflow:
- # 1. wait for OBS package build to complete
- # 2. check the OBS build suceeded
- # 3. set up some variables, dir names etc.
- # 4. create a symlink with predictable name to export artifacts afterwards
- # 5. create an HTCondor job and submit it to a HTCondor cluster
- # 6. check exit code from condor, optionally display one of the logs and end the job with same exit code
+.enable_repo_build: &enable_repo_build
+ before_script:
+ - ./scripts/enable-repo.py build
+
+.pkg_test: &pkg_test
+ stage: pkg
+ needs:
+ - pkg:make-archive
+ tags:
+ - lxc
+ - amd64
script:
- - "osc results home:CZ-NIC:$OBS_REPO knot-resolver -a x86_64 -r $DISTROTEST_REPO -w"
- - >
- osc results home:CZ-NIC:$OBS_REPO knot-resolver -a x86_64 -r $DISTROTEST_REPO --csv | grep 'succeeded|$' -q || \
- (echo "ERROR: build failed"; exit 1)
- - export LABEL="gl$(date +%s)_$OBS_REPO"
- - export COMMITDIR="/var/tmp/respdiff-jobs/$(git rev-parse --short HEAD)-$LABEL"
- - export TESTDIR="$COMMITDIR/distrotest.$DISTROTEST_NAME"
- - ln -s $COMMITDIR distrotest_commitdir
- - sudo -u respdiff /var/opt/respdiff/contrib/job_manager/submit.py -w
- -p $DISTROTEST_PRIORITY
- $(sudo -u respdiff /var/opt/respdiff/contrib/job_manager/create.py
- "$(git rev-parse --short HEAD)" -l $LABEL -t distrotest.$DISTROTEST_NAME
- --obs-repo $OBS_REPO)
- - export EXITCODE=$(cat $TESTDIR/j*_exitcode)
- - if [[ "$EXITCODE" != "0" ]]; then cat $TESTDIR/j*_{vagrant.log.txt,stdout.txt}; fi
- - exit $EXITCODE
- after_script:
- - 'cp -t . distrotest_commitdir/distrotest.$DISTROTEST_NAME/j* ||:'
+ # make sure the archive from pkg:make-archive is available
+ - apkg info cache | grep archive/dev
+ - apkg install --build-dep
+ - apkg test --test-dep
+
+.pkg_test_deb: &pkg_test_deb
+ <<: *pkg_test
+ <<: *pkg_deb_extras
+
+pkg:make-archive:
+ # archive is created once and reused in other pkg jobs
+ <<: *pkg_deb_extras
+ stage: pkg
+ image: $CI_REGISTRY/packaging/apkg/full/debian-11
+ tags:
+ - lxc
+ - amd64
+ needs: []
artifacts:
- when: always
- expire_in: 1 week
paths:
- - ./j*
- retry:
- max: 1
- when:
- - script_failure
-
-obs:rocky8:x86_64:
- <<: *distrotest
- allow_failure: true
- variables:
- OBS_REPO: knot-resolver-latest
- DISTROTEST_NAME: rocky8
- DISTROTEST_REPO: CentOS_8_EPEL
-
-obs:debian9:x86_64:
- <<: *distrotest
- variables:
- OBS_REPO: knot-resolver-latest
- DISTROTEST_NAME: debian9
- DISTROTEST_REPO: Debian_9.0
-
-obs:debian10:x86_64:
- <<: *distrotest
- only:
- variables:
- - $OBS_REPO =~ /^knot-resolver-devel|knot-dns-devel|knot-resolver-testing|knot-resolver-odvr$/
- - $CI_COMMIT_TAG
- variables:
- OBS_REPO: knot-resolver-latest
- DISTROTEST_NAME: debian10
- DISTROTEST_REPO: Debian_10
-
-obs:debian11:x86_64:
- <<: *distrotest
- only:
- variables:
- - $OBS_REPO =~ /^knot-resolver-devel|knot-dns-devel|knot-resolver-testing|knot-resolver-odvr$/
- - $CI_COMMIT_TAG
- variables:
- OBS_REPO: knot-resolver-latest
- DISTROTEST_NAME: debian11
- DISTROTEST_REPO: Debian_11
-
-obs:fedora35:x86_64:
- <<: *distrotest
- allow_failure: true
- variables:
- OBS_REPO: knot-resolver-latest
- DISTROTEST_NAME: fedora35
- DISTROTEST_REPO: Fedora_35
-
-obs:fedora36:x86_64:
- <<: *distrotest
- allow_failure: true
- variables:
- OBS_REPO: knot-resolver-latest
- DISTROTEST_NAME: fedora36
- DISTROTEST_REPO: Fedora_36
-
-obs:leap15:x86_64:
- <<: *distrotest
- allow_failure: true
- variables:
- OBS_REPO: knot-resolver-latest
- DISTROTEST_NAME: leap15
- DISTROTEST_REPO: openSUSE_Leap_15.4
-
-obs:ubuntu1804:x86_64:
- <<: *distrotest
- variables:
- OBS_REPO: knot-resolver-latest
- DISTROTEST_NAME: ubuntu1804
- DISTROTEST_REPO: xUbuntu_18.04
-
-obs:ubuntu2004:x86_64:
- <<: *distrotest
- only:
- variables:
- - $OBS_REPO =~ /^knot-resolver-devel|knot-dns-devel|knot-resolver-testing|knot-resolver-odvr$/
- - $CI_COMMIT_TAG
- variables:
- OBS_REPO: knot-resolver-latest
- DISTROTEST_NAME: ubuntu2004
- DISTROTEST_REPO: xUbuntu_20.04
+ - pkg/
+ script:
+ - apkg build-dep
+ - apkg make-archive
-obs:ubuntu2204:x86_64:
- <<: *distrotest
- allow_failure: true
- variables:
- OBS_REPO: knot-resolver-latest
- DISTROTEST_NAME: ubuntu2204
- DISTROTEST_REPO: xUbuntu_22.04
+pkg:debian-12:
+ <<: *pkg_test_deb
+ image: $CI_REGISTRY/packaging/apkg/full/debian-12
+
+pkg:debian-11:
+ <<: *pkg_test_deb
+ image: $CI_REGISTRY/packaging/apkg/full/debian-11
+
+pkg:ubuntu-22.04:
+ <<: *pkg_test_deb
+ image: $CI_REGISTRY/packaging/apkg/full/ubuntu-22.04
+
+pkg:ubuntu-20.04:
+ <<: *pkg_test_deb
+ <<: *enable_repo_build
+ image: $CI_REGISTRY/packaging/apkg/full/ubuntu-20.04
+
+pkg:fedora-38:
+ <<: *pkg_test
+ image: $CI_REGISTRY/packaging/apkg/full/fedora-38
+
+pkg:fedora-37:
+ <<: *pkg_test
+ image: $CI_REGISTRY/packaging/apkg/full/fedora-37
+
+pkg:alma-9:
+ <<: *pkg_test
+ image: $CI_REGISTRY/packaging/apkg/full/alma-9
+
+# RHEL 8 derivatives would need more work due to *default* python being old
+#pkg:rocky-8:
+# <<: *pkg_test
+# image: $CI_REGISTRY/packaging/apkg/full/rocky-8
+
+# Leap 15.4 would need more work due to *default* python being old
+#pkg:opensuse-15.4:
+# <<: *pkg_test
+# <<: *enable_repo_build
+# image: $CI_REGISTRY/packaging/apkg/full/opensuse-15.4
+# allow_failure: true # SUSE is always special
+# }}}
-.packagingtest: &packagingtest
- stage: pkgtest
- only:
- refs:
- - nightly@knot/knot-resolver
+pages:
+ image: $CI_REGISTRY/packaging/apkg/lxc/fedora-36
+ stage: deploy
needs: []
- tags:
- - dind
- - amd64
- variables:
- DISTRO: debian_10
script:
- - pytest -r fEsxX tests/packaging -k $DISTRO
-
-packaging:centos_8:
- <<: *packagingtest
- variables:
- DISTRO: centos_8
-
-packaging:centos_7:
- <<: *packagingtest
- variables:
- DISTRO: centos_7
-
-packaging:fedora_31:
- <<: *packagingtest
- variables:
- DISTRO: fedora_31
-
-packaging:fedora_32:
- <<: *packagingtest
- variables:
- DISTRO: fedora_32
-
-# }}}
+ - git submodule update --init --recursive
+ - apkg build-dep -y
+ - dnf install -y python3-sphinx texinfo doxygen
+ - pip3 install -r doc/requirements.txt
+ - pip3 install sphinx_rtd_theme
+ - meson build_doc -Ddoc=enabled
+ - ninja -C build_doc doc
+ - mv doc/html public
+ artifacts:
+ paths:
+ - public
diff --git a/Dockerfile b/Dockerfile
index c82938fd..b8914057 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,78 +1,55 @@
-# Intermediate container for Knot DNS build (not persistent)
# SPDX-License-Identifier: GPL-3.0-or-later
-FROM debian:11 AS knot-dns-build
-ARG KNOT_DNS_VERSION=v3.1.1
-
-# Build dependencies
-ENV KNOT_DNS_BUILD_DEPS git-core build-essential libtool autoconf pkg-config \
- libgnutls28-dev libprotobuf-dev libprotobuf-c-dev libfstrm-dev
-ENV KNOT_RESOLVER_BUILD_DEPS build-essential pkg-config bsdmainutils liblmdb-dev \
- libluajit-5.1-dev libuv1-dev libprotobuf-dev libprotobuf-c-dev \
- libfstrm-dev luajit lua-http libssl-dev libnghttp2-dev protobuf-c-compiler \
- meson
-ENV BUILDENV_DEPS ${KNOT_DNS_BUILD_DEPS} ${KNOT_RESOLVER_BUILD_DEPS}
+
+# Intermediate container for build
+FROM debian:11 AS build
+
+ENV OBS_REPO=knot-resolver-latest
+ENV DISTROTEST_REPO=Debian_11
+
+
RUN apt-get update -qq && \
- apt-get -y -qqq install ${BUILDENV_DEPS}
+ apt-get -qqq -y install python3-pip python3-venv devscripts && \
+ pip3 install pipx && \
+ pipx install apkg
-# Install Knot DNS from sources
-RUN git clone -b $KNOT_DNS_VERSION --depth=1 https://gitlab.nic.cz/knot/knot-dns.git /tmp/knot-dns && \
- cd /tmp/knot-dns && \
- autoreconf -if && \
- ./configure --disable-static --disable-fastparser --disable-documentation \
- --disable-daemon --disable-utilities --with-lmdb=no && \
- make -j4 install && \
- ldconfig
+COPY . /source
-# Copy libknot, libdnssec, libzscanner to runtime
-RUN mkdir -p /tmp/root/usr/local/include /tmp/root/usr/local/lib /tmp/root/usr/local/lib/pkgconfig && \
- cp -rt /tmp/root/usr/local/include /usr/local/include/libknot /usr/local/include/libdnssec /usr/local/include/libzscanner && \
- cp -rt /tmp/root/usr/local/lib /usr/local/lib/libknot* /usr/local/lib/libdnssec* /usr/local/lib/libzscanner* && \
- cp -rt /tmp/root/usr/local/lib/pkgconfig /usr/local/lib/pkgconfig/libknot.pc /usr/local/lib/pkgconfig/libdnssec.pc /usr/local/lib/pkgconfig/libzscanner.pc
+RUN cd /source && \
+ export PATH="$PATH:/root/.local/bin" && \
+ git submodule update --init --recursive && \
+ git config --global user.name "Docker Build" && \
+ git config --global user.email docker-build@knot-resolver && \
+ /root/.local/bin/apkg build-dep -y && \
+ /root/.local/bin/apkg build
-# Intermediate container with runtime dependencies
+# Real container
FROM debian:11-slim AS runtime
-# Install runtime dependencies
-ENV KNOT_DNS_RUNTIME_DEPS libgnutls30
-ENV KNOT_RESOLVER_RUNTIME_DEPS liblmdb0 luajit libluajit-5.1-2 libuv1 lua-http libnghttp2-14
-ENV KNOT_RESOLVER_RUNTIME_DEPS_HTTP lua-http lua-mmdb
-ENV KNOT_RESOLVER_RUNTIME_DEPS_EXTRA lua-cqueues
-ENV KNOT_RESOLVER_RUNTIME_DEPS_DNSTAP libfstrm0 libprotobuf-c1
-ENV KNOT_RESOLVER_RUNTIME_DEPS_SSL ca-certificates
-ENV RUNTIME_DEPS ${KNOT_DNS_RUNTIME_DEPS} ${KNOT_RESOLVER_RUNTIME_DEPS} \
- ${KNOT_RESOLVER_RUNTIME_DEPS_HTTP} ${KNOT_RESOLVER_RUNTIME_DEPS_EXTRA} \
- ${KNOT_RESOLVER_RUNTIME_DEPS_SSL} ${KNOT_RESOLVER_RUNTIME_DEPS_DNSTAP}
-RUN apt-get update -qq && \
- apt-get install -y -qqq ${RUNTIME_DEPS} && \
- apt-get clean && \
- rm -rf /var/lib/apt/lists/*
-
+ENV OBS_REPO=knot-resolver-latest
+ENV DISTROTEST_REPO=Debian_11
-# Intermediate container for Knot Resolver build
-FROM knot-dns-build AS build
+RUN apt-get update -qq && \
+ apt-get update -qq
-# Get Knot Resolver code from current directory
-COPY . /tmp/knot-resolver
+COPY --from=build /source/pkg/pkgs/debian-11 /pkg
-# Build Knot Resolver
-RUN cd /tmp/knot-resolver && \
- meson build_docker --buildtype=plain --prefix=/usr --libdir=lib -Dc_args="-O2 -fstack-protector -g" && \
- DESTDIR=/tmp/root ninja -C build_docker install && \
- cp /tmp/root/usr/share/doc/knot-resolver/examples/config.docker /tmp/root/etc/knot-resolver/kresd.conf
+# install resolver, minimize image and prepare config directory
+RUN apt-get install -y /pkg/*/*.deb && \
+ rm -r /pkg && \
+ apt-get remove -y -qq curl gnupg2 && \
+ apt-get autoremove -y && \
+ apt-get clean && \
+ rm -rf /var/lib/apt/lists/* && \
+ mkdir /config
+COPY manager/etc/knot-resolver/config.docker.yml /config/config.yml
-# Final container
-FROM runtime
LABEL cz.knot-resolver.vendor="CZ.NIC"
LABEL maintainer="knot-resolver-users@lists.nic.cz"
-# Export DNS over UDP & TCP, DNS-over-HTTPS, DNS-over-TLS, web interface
-EXPOSE 53/UDP 53/TCP 443/TCP 853/TCP 8453/TCP
-
-# Fetch Knot Resolver + Knot DNS libraries from build image
-COPY --from=build /tmp/root/ /
-RUN ldconfig
+# Export plain DNS, DoT, DoH and management interface
+EXPOSE 53/UDP 53/TCP 443/TCP 853/TCP 5000/TCP
-ENTRYPOINT ["/usr/sbin/kresd"]
-CMD ["-c", "/etc/knot-resolver/kresd.conf"]
+ENTRYPOINT ["/usr/bin/knot-resolver"]
+CMD ["-c", "/config/config.yml"]
diff --git a/NEWS b/NEWS
index 134ba1f9..e2e23006 100644
--- a/NEWS
+++ b/NEWS
@@ -1,6 +1,20 @@
-Knot Resolver 5.6.1 (2023-0m-dd)
+Knot Resolver 6.0.0 (2023-mm-dd)
================================
+Improvements
+------------
+- Knot Resolver v6 alpha starts
+- 6.0.x versions are dedicated to alpha cycle
+
+
+Knot Resolver 5.7.0 (2023-0m-dd)
+================================
+
+Improvements
+------------
+- forwarding mode: tweak dealing with failures from forwarders,
+ in particular prefer sending CD=0 upstream (!1392)
+
Bugfixes
--------
- fix unusual timestamp format in debug dumps of records (!1386)
diff --git a/ci/images/README.md b/ci/images/README.md
index d9efe0e8..3d09f603 100644
--- a/ci/images/README.md
+++ b/ci/images/README.md
@@ -39,3 +39,11 @@ $ ./push.sh debian-11 # pushes the local image into target registry
$ ./update.sh debian-11 # utility wrapper that both builds and pushes the image
$ ./update.sh */ # use shell expansion of dirnames to update all images
```
+
+By default, a branch of Knot DNS deemed to be stable is selected according to
+the `vars.sh` file. To build an image for a different Knot DNS branch, set the
+`KNOT_BRANCH` environment variable to the name of the branch, e.g.:
+
+```
+$ KNOT_BRANCH='3.2' ./update.sh debian-11
+```
diff --git a/ci/images/build.sh b/ci/images/build.sh
index 39ee6171..1e9eabb5 100755
--- a/ci/images/build.sh
+++ b/ci/images/build.sh
@@ -9,5 +9,15 @@ if [ -n "$COVERITY_SCAN_TOKEN" ]; then
SECRETS="$SECRETS --secret id=coverity-token,env=COVERITY_SCAN_TOKEN"
fi
+DOCKERFILE="$(realpath "${IMAGE}")/Dockerfile"
+
+cd "$CURRENT_DIR/../.."
export DOCKER_BUILDKIT=1 # Enables using secrets in docker-build
-docker build --pull --no-cache -t "${FULL_NAME}" "${IMAGE}" --build-arg KNOT_BRANCH=${KNOT_BRANCH} $SECRETS
+docker build \
+ --pull \
+ --no-cache \
+ --tag "${FULL_NAME}" \
+ --file "${DOCKERFILE}" \
+ . \
+ --build-arg KNOT_BRANCH=${KNOT_BRANCH} \
+ $SECRETS
diff --git a/ci/images/debian-11/Dockerfile b/ci/images/debian-11/Dockerfile
index 59f170ba..9645cddb 100644
--- a/ci/images/debian-11/Dockerfile
+++ b/ci/images/debian-11/Dockerfile
@@ -21,10 +21,12 @@ RUN apt-get install -y -qqq git make cmake pkg-config meson \
# Build and testing deps for Resolver's dnstap module (go stuff is just for testing)
RUN apt-get install -y -qqq \
- protobuf-c-compiler libprotobuf-c-dev libfstrm-dev
-# Maintaining the go stuff in CI really seems more trouble than worth.
-# golang-any
-#RUN bash -c "go get github.com/{FiloSottile/gvt,cloudflare/dns,dnstap/golang-dnstap,golang/protobuf/proto}"
+ protobuf-c-compiler libprotobuf-c-dev libfstrm-dev \
+ golang-any
+COPY ./tests/dnstap /root/tests/dnstap
+WORKDIR /root/tests/dnstap/src/dnstap-test
+RUN go get .
+WORKDIR /root
# documentation dependencies
RUN apt-get install -y -qqq doxygen python3-sphinx python3-breathe python3-sphinx-rtd-theme
diff --git a/ci/images/debian-buster/Dockerfile b/ci/images/debian-buster/Dockerfile
index 4b47dda1..39f43277 100644
--- a/ci/images/debian-buster/Dockerfile
+++ b/ci/images/debian-buster/Dockerfile
@@ -25,9 +25,10 @@ RUN apt-get install -y -qqq git make cmake pkg-config meson \
RUN apt-get install -y -qqq \
protobuf-c-compiler libprotobuf-c-dev libfstrm-dev \
golang-any
-# Some stuff won't work on buster:
-# package crypto/ed25519: unrecognized import path "crypto/ed25519"
-#RUN bash -c "go get github.com/{FiloSottile/gvt,cloudflare/dns,dnstap/golang-dnstap}"
+COPY ./tests/dnstap /root/tests/dnstap
+WORKDIR /root/tests/dnstap/src/dnstap-test
+RUN go get .
+WORKDIR /root
# documentation dependencies
RUN apt-get install -y -qqq doxygen python3-sphinx python3-breathe python3-sphinx-rtd-theme
@@ -41,7 +42,7 @@ RUN pip3 install pylint
RUN pip3 install pep8
RUN pip3 install pytest-xdist
# tests/pytest dependencies: skip over broken versions
-RUN pip3 install 'dnspython != 2.0.0' jinja2 'pytest != 6.0.0' pytest-html pytest-xdist
+RUN pip3 install 'dnspython != 2.0.0' 'jinja2 == 2.11.3' 'pytest != 6.0.0' pytest-html pytest-xdist
# packet capture tools for Deckard
RUN apt-get install --no-install-suggests --no-install-recommends -y -qqq tcpdump wireshark-common
diff --git a/ci/images/manager/Dockerfile b/ci/images/manager/Dockerfile
new file mode 100644
index 00000000..519c824e
--- /dev/null
+++ b/ci/images/manager/Dockerfile
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+FROM fedora:38
+LABEL Knot Resolver <knot-resolver@labs.nic.cz>
+
+WORKDIR /root
+CMD ["/bin/bash"]
+ENV PATH="/root/.local/bin:${PATH}"
+
+# Install Python and deps
+RUN dnf install -y python3.7 python3.8 python3.9 python3.10 python3.10-devel python3 python3-devel python3-gobject\
+ git which diffutils gcc pkg-config cairo-devel gobject-introspection-devel cairo-gobject-devel\
+ && dnf clean all
+
+# Install pip
+RUN python3 -m pip install -U pip\
+ # Install poetry
+ && curl -sSL https://install.python-poetry.org | python3 - --version 1.4.2\
+ # not exactly required, but helpful
+ && python3 -m pip install poethepoet
diff --git a/ci/pkgtest.yaml b/ci/pkgtest.yaml
index e841a972..b7b87c35 100644
--- a/ci/pkgtest.yaml
+++ b/ci/pkgtest.yaml
@@ -51,15 +51,6 @@ centos-7:pkgbuild:
- yum install -y rpm-build python3-pip epel-release
- *apkgbuild
-debian-9:pkgbuild:
- <<: *pkgbuild
- image: $CI_REGISTRY/labs/lxc-gitlab-runner/debian-9
- variables:
- OBS_REPO: knot-resolver-build
- DISTROTEST_REPO: Debian_9.0
- script:
- - *debpkgbuild
-
debian-10:pkgbuild:
<<: *pkgbuild
image: $CI_REGISTRY/labs/lxc-gitlab-runner/debian-10
@@ -169,17 +160,6 @@ centos-7:pkgtest:
- systemctl start kresd@1
- kdig @127.0.0.1 nic.cz | grep -qi NOERROR
-debian-9:pkgtest:
- <<: *pkgtest
- needs:
- - debian-9:pkgbuild
- image: $CI_REGISTRY/labs/lxc-gitlab-runner/debian-9
- variables:
- OBS_REPO: knot-resolver-build
- DISTROTEST_REPO: Debian_9.0
- script:
- - *debpkgtest
-
debian-10:pkgtest:
<<: *pkgtest
needs:
diff --git a/client/.packaging/test.sh b/client/.packaging/test.sh
deleted file mode 100755
index e1311c48..00000000
--- a/client/.packaging/test.sh
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-3.0-or-later
-test -e sbin/kresc
-sbin/kresc # command will fail because of invalid parameters
-test "$?" -eq 1 # linker error would have different exit code
diff --git a/daemon/.packaging/centos/7/builddeps b/daemon/.packaging/centos/7/builddeps
deleted file mode 100644
index 3247738f..00000000
--- a/daemon/.packaging/centos/7/builddeps
+++ /dev/null
@@ -1,13 +0,0 @@
-gcc
-gcc-c++
-gnutls
-knot-libs
-knot-devel
-libcmocka-devel
-libedit-devel
-libcap-ng
-libuv-devel
-lmdb-devel
-luajit-devel
-meson
-systemd-devel
diff --git a/daemon/.packaging/centos/7/pre-build.sh b/daemon/.packaging/centos/7/pre-build.sh
deleted file mode 100755
index d3a9503f..00000000
--- a/daemon/.packaging/centos/7/pre-build.sh
+++ /dev/null
@@ -1,9 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-yum update -y
-yum install -y wget epel-release
-
-# add build repository
-cd /etc/yum.repos.d/
-wget https://download.opensuse.org/repositories/home:CZ-NIC:knot-resolver-build/CentOS_7_EPEL/home:CZ-NIC:knot-resolver-build.repo
-
diff --git a/daemon/.packaging/centos/7/pre-run.sh b/daemon/.packaging/centos/7/pre-run.sh
deleted file mode 100755
index ee15ec7f..00000000
--- a/daemon/.packaging/centos/7/pre-run.sh
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-yum update -y
-yum install -y wget epel-release
-
-# add build repository
-cd /etc/yum.repos.d/
-wget https://download.opensuse.org/repositories/home:CZ-NIC:knot-resolver-latest/CentOS_7_EPEL/home:CZ-NIC:knot-resolver-latest.repo
diff --git a/daemon/.packaging/centos/7/rundeps b/daemon/.packaging/centos/7/rundeps
deleted file mode 100644
index 648501ee..00000000
--- a/daemon/.packaging/centos/7/rundeps
+++ /dev/null
@@ -1,6 +0,0 @@
-knot-libs
-libedit
-libuv
-luajit
-lua-basexx
-lua-http
diff --git a/daemon/.packaging/centos/8/builddeps b/daemon/.packaging/centos/8/builddeps
deleted file mode 100644
index 984fa0b6..00000000
--- a/daemon/.packaging/centos/8/builddeps
+++ /dev/null
@@ -1,14 +0,0 @@
-gcc
-gcc-c++
-meson
-"pkgconfig(cmocka)"
-"pkgconfig(gnutls)"
-"pkgconfig(libedit)"
-"pkgconfig(libknot)"
-"pkgconfig(libzscanner)"
-"pkgconfig(libdnssec)"
-"pkgconfig(libsystemd)"
-"pkgconfig(libcap-ng)"
-"pkgconfig(libuv)"
-"pkgconfig(lmdb)"
-"pkgconfig(luajit)"
diff --git a/daemon/.packaging/centos/8/pre-build.sh b/daemon/.packaging/centos/8/pre-build.sh
deleted file mode 100755
index 31398f80..00000000
--- a/daemon/.packaging/centos/8/pre-build.sh
+++ /dev/null
@@ -1,9 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-dnf install -y wget 'dnf-command(config-manager)' epel-release centos-release
-
-dnf config-manager --enable PowerTools
-dnf config-manager --enable Devel
-dnf config-manager --add-repo https://download.opensuse.org/repositories/home:CZ-NIC:knot-resolver-build/CentOS_8_EPEL/home:CZ-NIC:knot-resolver-build.repo
-dnf install -y knot
-dnf upgrade -y
diff --git a/daemon/.packaging/centos/8/pre-run.sh b/daemon/.packaging/centos/8/pre-run.sh
deleted file mode 100755
index 94f8eb02..00000000
--- a/daemon/.packaging/centos/8/pre-run.sh
+++ /dev/null
@@ -1,7 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-dnf install -y wget 'dnf-command(config-manager)' epel-release centos-release
-
-dnf config-manager --enable PowerTools
-dnf config-manager --add-repo https://download.opensuse.org/repositories/home:CZ-NIC:knot-resolver-latest/CentOS_8_EPEL/home:CZ-NIC:knot-resolver-latest.repo
-dnf upgrade -y
diff --git a/daemon/.packaging/centos/8/rundeps b/daemon/.packaging/centos/8/rundeps
deleted file mode 100644
index e3779ec9..00000000
--- a/daemon/.packaging/centos/8/rundeps
+++ /dev/null
@@ -1,6 +0,0 @@
-libedit
-knot-libs
-libuv
-luajit
-lua5.1-basexx
-lua5.1-http
diff --git a/daemon/.packaging/debian/10/builddeps b/daemon/.packaging/debian/10/builddeps
deleted file mode 100644
index 604993c1..00000000
--- a/daemon/.packaging/debian/10/builddeps
+++ /dev/null
@@ -1,12 +0,0 @@
-debhelper
-libcmocka-dev
-libedit-dev
-libgnutls28-dev
-libknot-dev
-liblmdb-dev
-luajit-5.1-dev
-libsystemd-dev
-libuv1-dev
-luajit
-pkg-config
-meson
diff --git a/daemon/.packaging/debian/10/pre-build.sh b/daemon/.packaging/debian/10/pre-build.sh
deleted file mode 100755
index dc3b8019..00000000
--- a/daemon/.packaging/debian/10/pre-build.sh
+++ /dev/null
@@ -1,11 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# add debian build repository
-apt-get update
-apt-get install -y wget gnupg apt-utils
-echo 'deb http://download.opensuse.org/repositories/home:/CZ-NIC:/knot-resolver-build/Debian_10/ /' > /etc/apt/sources.list.d/home:CZ-NIC:knot-resolver-build.list
-wget -nv https://download.opensuse.org/repositories/home:CZ-NIC:knot-resolver-build/Debian_10/Release.key -O Release.key
-apt-key add - < Release.key
-
-apt-get update
-apt-get upgrade -y
diff --git a/daemon/.packaging/debian/10/pre-run.sh b/daemon/.packaging/debian/10/pre-run.sh
deleted file mode 100755
index 3a3906a4..00000000
--- a/daemon/.packaging/debian/10/pre-run.sh
+++ /dev/null
@@ -1,11 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-apt-get update
-apt-get install -y wget gnupg apt-utils
-
-echo 'deb http://download.opensuse.org/repositories/home:/CZ-NIC:/knot-resolver-latest/Debian_10/ /' > /etc/apt/sources.list.d/home:CZ-NIC:knot-resolver-latest.list
-wget -nv https://download.opensuse.org/repositories/home:CZ-NIC:knot-resolver-latest/Debian_10/Release.key -O Release.key
-apt-key add - < Release.key
-
-apt-get update
-apt-get upgrade -y
diff --git a/daemon/.packaging/debian/10/rundeps b/daemon/.packaging/debian/10/rundeps
deleted file mode 100644
index a0f40c1b..00000000
--- a/daemon/.packaging/debian/10/rundeps
+++ /dev/null
@@ -1,15 +0,0 @@
-adduser
-dns-root-data
-systemd
-libc6
-libdnssec7
-libedit2
-libgcc1
-libgnutls30
-libknot10
-liblmdb0
-libluajit-5.1-2
-libstdc++6
-libsystemd0
-libuv1
-libzscanner3
diff --git a/daemon/.packaging/debian/9/builddeps b/daemon/.packaging/debian/9/builddeps
deleted file mode 100644
index 604993c1..00000000
--- a/daemon/.packaging/debian/9/builddeps
+++ /dev/null
@@ -1,12 +0,0 @@
-debhelper
-libcmocka-dev
-libedit-dev
-libgnutls28-dev
-libknot-dev
-liblmdb-dev
-luajit-5.1-dev
-libsystemd-dev
-libuv1-dev
-luajit
-pkg-config
-meson
diff --git a/daemon/.packaging/debian/9/pre-build.sh b/daemon/.packaging/debian/9/pre-build.sh
deleted file mode 100755
index 953025f9..00000000
--- a/daemon/.packaging/debian/9/pre-build.sh
+++ /dev/null
@@ -1,11 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# add debian build repository
-apt-get update
-apt-get install -y wget gnupg apt-utils
-echo 'deb http://download.opensuse.org/repositories/home:/CZ-NIC:/knot-resolver-build/Debian_9.0/ /' > /etc/apt/sources.list.d/home:CZ-NIC:knot-resolver-build.list
-wget -nv https://download.opensuse.org/repositories/home:CZ-NIC:knot-resolver-build/Debian_9.0/Release.key -O Release.key
-apt-key add - < Release.key
-
-apt-get update
-apt-get upgrade -y
diff --git a/daemon/.packaging/debian/9/pre-run.sh b/daemon/.packaging/debian/9/pre-run.sh
deleted file mode 100755
index fa8d3776..00000000
--- a/daemon/.packaging/debian/9/pre-run.sh
+++ /dev/null
@@ -1,11 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-apt-get update
-apt-get install -y wget gnupg apt-utils
-
-echo 'deb http://download.opensuse.org/repositories/home:/CZ-NIC:/knot-resolver-latest/Debian_9.0/ /' > /etc/apt/sources.list.d/home:CZ-NIC:knot-resolver-latest.list
-wget -nv https://download.opensuse.org/repositories/home:CZ-NIC:knot-resolver-latest/Debian_9.0/Release.key -O Release.key
-apt-key add - < Release.key
-
-apt-get update
-apt-get upgrade -y
diff --git a/daemon/.packaging/debian/9/rundeps b/daemon/.packaging/debian/9/rundeps
deleted file mode 100644
index a0f40c1b..00000000
--- a/daemon/.packaging/debian/9/rundeps
+++ /dev/null
@@ -1,15 +0,0 @@
-adduser
-dns-root-data
-systemd
-libc6
-libdnssec7
-libedit2
-libgcc1
-libgnutls30
-libknot10
-liblmdb0
-libluajit-5.1-2
-libstdc++6
-libsystemd0
-libuv1
-libzscanner3
diff --git a/daemon/.packaging/fedora/31/builddeps b/daemon/.packaging/fedora/31/builddeps
deleted file mode 100644
index 984fa0b6..00000000
--- a/daemon/.packaging/fedora/31/builddeps
+++ /dev/null
@@ -1,14 +0,0 @@
-gcc
-gcc-c++
-meson
-"pkgconfig(cmocka)"
-"pkgconfig(gnutls)"
-"pkgconfig(libedit)"
-"pkgconfig(libknot)"
-"pkgconfig(libzscanner)"
-"pkgconfig(libdnssec)"
-"pkgconfig(libsystemd)"
-"pkgconfig(libcap-ng)"
-"pkgconfig(libuv)"
-"pkgconfig(lmdb)"
-"pkgconfig(luajit)"
diff --git a/daemon/.packaging/fedora/31/pre-build.sh b/daemon/.packaging/fedora/31/pre-build.sh
deleted file mode 100755
index 7e279da5..00000000
--- a/daemon/.packaging/fedora/31/pre-build.sh
+++ /dev/null
@@ -1,7 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-dnf install -y wget
-
-dnf config-manager --add-repo https://download.opensuse.org/repositories/home:CZ-NIC:knot-resolver-build/Fedora_31/home:CZ-NIC:knot-resolver-build.repo
-dnf install -y knot
-dnf upgrade -y
diff --git a/daemon/.packaging/fedora/31/pre-run.sh b/daemon/.packaging/fedora/31/pre-run.sh
deleted file mode 100755
index b84b42de..00000000
--- a/daemon/.packaging/fedora/31/pre-run.sh
+++ /dev/null
@@ -1,6 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-dnf install -y wget
-
-dnf config-manager --add-repo https://download.opensuse.org/repositories/home:CZ-NIC:knot-resolver-latest/Fedora_31/home:CZ-NIC:knot-resolver-latest.repo
-dnf upgrade -y
diff --git a/daemon/.packaging/fedora/31/rundeps b/daemon/.packaging/fedora/31/rundeps
deleted file mode 100644
index 7517b6b0..00000000
--- a/daemon/.packaging/fedora/31/rundeps
+++ /dev/null
@@ -1,7 +0,0 @@
-libedit
-knot-libs
-libuv
-luajit
-lua5.1-basexx
-lua5.1-psl
-lua5.1-http
diff --git a/daemon/.packaging/fedora/32/builddeps b/daemon/.packaging/fedora/32/builddeps
deleted file mode 100644
index 984fa0b6..00000000
--- a/daemon/.packaging/fedora/32/builddeps
+++ /dev/null
@@ -1,14 +0,0 @@
-gcc
-gcc-c++
-meson
-"pkgconfig(cmocka)"
-"pkgconfig(gnutls)"
-"pkgconfig(libedit)"
-"pkgconfig(libknot)"
-"pkgconfig(libzscanner)"
-"pkgconfig(libdnssec)"
-"pkgconfig(libsystemd)"
-"pkgconfig(libcap-ng)"
-"pkgconfig(libuv)"
-"pkgconfig(lmdb)"
-"pkgconfig(luajit)"
diff --git a/daemon/.packaging/fedora/32/pre-build.sh b/daemon/.packaging/fedora/32/pre-build.sh
deleted file mode 100755
index 97caeadf..00000000
--- a/daemon/.packaging/fedora/32/pre-build.sh
+++ /dev/null
@@ -1,7 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-dnf install -y wget
-
-dnf config-manager --add-repo https://download.opensuse.org/repositories/home:CZ-NIC:knot-resolver-build/Fedora_32/home:CZ-NIC:knot-resolver-build.repo
-dnf install -y knot
-dnf upgrade -y
diff --git a/daemon/.packaging/fedora/32/pre-run.sh b/daemon/.packaging/fedora/32/pre-run.sh
deleted file mode 100755
index b224b7eb..00000000
--- a/daemon/.packaging/fedora/32/pre-run.sh
+++ /dev/null
@@ -1,6 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-dnf install -y wget
-
-dnf config-manager --add-repo https://download.opensuse.org/repositories/home:CZ-NIC:knot-resolver-latest/Fedora_32/home:CZ-NIC:knot-resolver-latest.repo
-dnf upgrade -y
diff --git a/daemon/.packaging/fedora/32/rundeps b/daemon/.packaging/fedora/32/rundeps
deleted file mode 100644
index 7517b6b0..00000000
--- a/daemon/.packaging/fedora/32/rundeps
+++ /dev/null
@@ -1,7 +0,0 @@
-libedit
-knot-libs
-libuv
-luajit
-lua5.1-basexx
-lua5.1-psl
-lua5.1-http
diff --git a/daemon/.packaging/leap/15.2/builddeps b/daemon/.packaging/leap/15.2/builddeps
deleted file mode 100644
index e5689051..00000000
--- a/daemon/.packaging/leap/15.2/builddeps
+++ /dev/null
@@ -1,14 +0,0 @@
-gcc
-gcc-c++
-lmdb-devel
-meson
-"pkgconfig(cmocka)"
-"pkgconfig(gnutls)"
-"pkgconfig(libedit)"
-"pkgconfig(libknot)"
-"pkgconfig(libzscanner)"
-"pkgconfig(libdnssec)"
-"pkgconfig(libsystemd)"
-"pkgconfig(libcap-ng)"
-"pkgconfig(libuv)"
-"pkgconfig(luajit)"
diff --git a/daemon/.packaging/leap/15.2/pre-build.sh b/daemon/.packaging/leap/15.2/pre-build.sh
deleted file mode 100755
index 274931a5..00000000
--- a/daemon/.packaging/leap/15.2/pre-build.sh
+++ /dev/null
@@ -1,6 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-zypper addrepo https://download.opensuse.org/repositories/home:CZ-NIC:knot-resolver-build/openSUSE_Leap_15.2/home:CZ-NIC:knot-resolver-build.repo
-zypper --no-gpg-checks refresh
-zypper install -y knot
-
diff --git a/daemon/.packaging/leap/15.2/pre-run.sh b/daemon/.packaging/leap/15.2/pre-run.sh
deleted file mode 100755
index 9b0b5da0..00000000
--- a/daemon/.packaging/leap/15.2/pre-run.sh
+++ /dev/null
@@ -1,3 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-zypper addrepo https://download.opensuse.org/repositories/home:CZ-NIC:knot-resolver-latest/openSUSE_Leap_15.2/home:CZ-NIC:knot-resolver-latest.repo
-zypper --no-gpg-checks refresh
diff --git a/daemon/.packaging/leap/15.2/rundeps b/daemon/.packaging/leap/15.2/rundeps
deleted file mode 100644
index 3f601a0e..00000000
--- a/daemon/.packaging/leap/15.2/rundeps
+++ /dev/null
@@ -1,4 +0,0 @@
-libedit0
-knot-libs
-libuv1
-libluajit-5_1-2
diff --git a/daemon/.packaging/leap/docker-image-name b/daemon/.packaging/leap/docker-image-name
deleted file mode 100644
index 388ed863..00000000
--- a/daemon/.packaging/leap/docker-image-name
+++ /dev/null
@@ -1 +0,0 @@
-opensuse/leap
diff --git a/daemon/.packaging/test.config b/daemon/.packaging/test.config
deleted file mode 100644
index 72ec48db..00000000
--- a/daemon/.packaging/test.config
+++ /dev/null
@@ -1,2 +0,0 @@
--- SPDX-License-Identifier: GPL-3.0-or-later
-quit()
diff --git a/daemon/.packaging/ubuntu/16.04/builddeps b/daemon/.packaging/ubuntu/16.04/builddeps
deleted file mode 100644
index 7b1d9431..00000000
--- a/daemon/.packaging/ubuntu/16.04/builddeps
+++ /dev/null
@@ -1,16 +0,0 @@
-debhelper
-libcmocka-dev
-libedit-dev
-libgnutls28-dev
-libknot-dev
-liblmdb-dev
-libluajit-5.1-dev
-libsystemd-dev
-libuv1-dev
-luajit
-pkg-config
-meson
-doxygen
-python3-breathe
-python3-sphinx
-python3-sphinx-rtd-theme
diff --git a/daemon/.packaging/ubuntu/16.04/pre-build.sh b/daemon/.packaging/ubuntu/16.04/pre-build.sh
deleted file mode 100755
index 5af89ab5..00000000
--- a/daemon/.packaging/ubuntu/16.04/pre-build.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# add build repository
-apt-get update
-apt-get install -y wget gnupg apt-utils
-
-echo 'deb http://download.opensuse.org/repositories/home:/CZ-NIC:/knot-resolver-build/xUbuntu_16.04/ /' > /etc/apt/sources.list.d/home:CZ-NIC:knot-resolver-build.list
-wget -nv https://download.opensuse.org/repositories/home:CZ-NIC:knot-resolver-build/xUbuntu_16.04/Release.key -O Release.key
-apt-key add - < Release.key
-
-apt-get update
-apt-get upgrade -y
diff --git a/daemon/.packaging/ubuntu/16.04/pre-run.sh b/daemon/.packaging/ubuntu/16.04/pre-run.sh
deleted file mode 100755
index bb81453a..00000000
--- a/daemon/.packaging/ubuntu/16.04/pre-run.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# add build repository
-apt-get update
-apt-get install -y wget gnupg apt-utils
-
-echo 'deb http://download.opensuse.org/repositories/home:/CZ-NIC:/knot-resolver-latest/xUbuntu_16.04/ /' > /etc/apt/sources.list.d/home:CZ-NIC:knot-resolver-latest.list
-wget -nv https://download.opensuse.org/repositories/home:CZ-NIC:knot-resolver-latest/xUbuntu_16.04/Release.key -O Release.key
-apt-key add - < Release.key
-
-apt-get update
-apt-get upgrade -y
diff --git a/daemon/.packaging/ubuntu/16.04/rundeps b/daemon/.packaging/ubuntu/16.04/rundeps
deleted file mode 100644
index a0f40c1b..00000000
--- a/daemon/.packaging/ubuntu/16.04/rundeps
+++ /dev/null
@@ -1,15 +0,0 @@
-adduser
-dns-root-data
-systemd
-libc6
-libdnssec7
-libedit2
-libgcc1
-libgnutls30
-libknot10
-liblmdb0
-libluajit-5.1-2
-libstdc++6
-libsystemd0
-libuv1
-libzscanner3
diff --git a/daemon/.packaging/ubuntu/18.04/builddeps b/daemon/.packaging/ubuntu/18.04/builddeps
deleted file mode 100644
index 7b1d9431..00000000
--- a/daemon/.packaging/ubuntu/18.04/builddeps
+++ /dev/null
@@ -1,16 +0,0 @@
-debhelper
-libcmocka-dev
-libedit-dev
-libgnutls28-dev
-libknot-dev
-liblmdb-dev
-libluajit-5.1-dev
-libsystemd-dev
-libuv1-dev
-luajit
-pkg-config
-meson
-doxygen
-python3-breathe
-python3-sphinx
-python3-sphinx-rtd-theme
diff --git a/daemon/.packaging/ubuntu/18.04/pre-build.sh b/daemon/.packaging/ubuntu/18.04/pre-build.sh
deleted file mode 100755
index 77551b80..00000000
--- a/daemon/.packaging/ubuntu/18.04/pre-build.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# add build repository
-apt-get update
-apt-get install -y wget gnupg apt-utils
-
-echo 'deb http://download.opensuse.org/repositories/home:/CZ-NIC:/knot-resolver-build/xUbuntu_18.04/ /' > /etc/apt/sources.list.d/home:CZ-NIC:knot-resolver-build.list
-wget -nv https://download.opensuse.org/repositories/home:CZ-NIC:knot-resolver-build/xUbuntu_18.04/Release.key -O Release.key
-apt-key add - < Release.key
-
-apt-get update
-apt-get upgrade -y
diff --git a/daemon/.packaging/ubuntu/18.04/pre-run.sh b/daemon/.packaging/ubuntu/18.04/pre-run.sh
deleted file mode 100755
index 71d2a324..00000000
--- a/daemon/.packaging/ubuntu/18.04/pre-run.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# add build repository
-apt-get update
-apt-get install -y wget gnupg apt-utils
-
-echo 'deb http://download.opensuse.org/repositories/home:/CZ-NIC:/knot-resolver-latest/xUbuntu_18.04/ /' > /etc/apt/sources.list.d/home:CZ-NIC:knot-resolver-latest.list
-wget -nv https://download.opensuse.org/repositories/home:CZ-NIC:knot-resolver-latest/xUbuntu_18.04/Release.key -O Release.key
-apt-key add - < Release.key
-
-apt-get update
-apt-get upgrade -y
diff --git a/daemon/.packaging/ubuntu/18.04/rundeps b/daemon/.packaging/ubuntu/18.04/rundeps
deleted file mode 100644
index a0f40c1b..00000000
--- a/daemon/.packaging/ubuntu/18.04/rundeps
+++ /dev/null
@@ -1,15 +0,0 @@
-adduser
-dns-root-data
-systemd
-libc6
-libdnssec7
-libedit2
-libgcc1
-libgnutls30
-libknot10
-liblmdb0
-libluajit-5.1-2
-libstdc++6
-libsystemd0
-libuv1
-libzscanner3
diff --git a/daemon/.packaging/ubuntu/20.04/builddeps b/daemon/.packaging/ubuntu/20.04/builddeps
deleted file mode 100644
index 7b1d9431..00000000
--- a/daemon/.packaging/ubuntu/20.04/builddeps
+++ /dev/null
@@ -1,16 +0,0 @@
-debhelper
-libcmocka-dev
-libedit-dev
-libgnutls28-dev
-libknot-dev
-liblmdb-dev
-libluajit-5.1-dev
-libsystemd-dev
-libuv1-dev
-luajit
-pkg-config
-meson
-doxygen
-python3-breathe
-python3-sphinx
-python3-sphinx-rtd-theme
diff --git a/daemon/.packaging/ubuntu/20.04/pre-build.sh b/daemon/.packaging/ubuntu/20.04/pre-build.sh
deleted file mode 100755
index e55fba6a..00000000
--- a/daemon/.packaging/ubuntu/20.04/pre-build.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# add build repository
-apt-get update
-apt-get install -y wget gnupg apt-utils
-
-echo 'deb http://download.opensuse.org/repositories/home:/CZ-NIC:/knot-resolver-build/xUbuntu_20.04/ /' > /etc/apt/sources.list.d/home:CZ-NIC:knot-resolver-build.list
-wget -nv https://download.opensuse.org/repositories/home:CZ-NIC:knot-resolver-build/xUbuntu_20.04/Release.key -O Release.key
-apt-key add - < Release.key
-
-apt-get update
-apt-get upgrade -y
diff --git a/daemon/.packaging/ubuntu/20.04/pre-run.sh b/daemon/.packaging/ubuntu/20.04/pre-run.sh
deleted file mode 100755
index 75c32f8c..00000000
--- a/daemon/.packaging/ubuntu/20.04/pre-run.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# add build repository
-apt-get update
-apt-get install -y wget gnupg apt-utils
-
-echo 'deb http://download.opensuse.org/repositories/home:/CZ-NIC:/knot-resolver-latest/xUbuntu_20.04/ /' > /etc/apt/sources.list.d/home:CZ-NIC:knot-resolver-latest.list
-wget -nv https://download.opensuse.org/repositories/home:CZ-NIC:knot-resolver-latest/xUbuntu_20.04/Release.key -O Release.key
-apt-key add - < Release.key
-
-apt-get update
-apt-get upgrade -y
diff --git a/daemon/.packaging/ubuntu/20.04/rundeps b/daemon/.packaging/ubuntu/20.04/rundeps
deleted file mode 100644
index a0f40c1b..00000000
--- a/daemon/.packaging/ubuntu/20.04/rundeps
+++ /dev/null
@@ -1,15 +0,0 @@
-adduser
-dns-root-data
-systemd
-libc6
-libdnssec7
-libedit2
-libgcc1
-libgnutls30
-libknot10
-liblmdb0
-libluajit-5.1-2
-libstdc++6
-libsystemd0
-libuv1
-libzscanner3
diff --git a/daemon/bindings/cache.c b/daemon/bindings/cache.c
index 21ef2ad2..602974b4 100644
--- a/daemon/bindings/cache.c
+++ b/daemon/bindings/cache.c
@@ -186,8 +186,9 @@ static int cache_open(lua_State *L)
/* Reopen cache */
struct kr_cdb_opts opts = {
- (conf && strlen(conf)) ? conf : ".",
- cache_size
+ .is_cache = true,
+ .path = (conf && strlen(conf)) ? conf : ".",
+ .maxsize = cache_size,
};
int ret = kr_cache_open(&the_resolver->cache, api, &opts, &the_engine->pool);
if (ret != 0) {
diff --git a/daemon/bindings/net.c b/daemon/bindings/net.c
index 9a6374cd..d278ed17 100644
--- a/daemon/bindings/net.c
+++ b/daemon/bindings/net.c
@@ -717,7 +717,7 @@ static int net_tls_client(lua_State *L)
/* check that only allowed keys are present */
{
const char *bad_key = lua_table_checkindices(L, (const char *[])
- { "1", "hostname", "ca_file", "pin_sha256", "insecure", NULL });
+ { "1", "hostname", "ca_file", "pin_sha256", "insecure", "tls", NULL });
if (bad_key)
lua_error_p(L, "found unexpected key '%s'", bad_key);
}
diff --git a/daemon/lua/kres-gen-30.lua b/daemon/lua/kres-gen-30.lua
index 77063727..97d32233 100644
--- a/daemon/lua/kres-gen-30.lua
+++ b/daemon/lua/kres-gen-30.lua
@@ -30,6 +30,10 @@ typedef struct {
uint32_t size;
knot_rdata_t *rdata;
} knot_rdataset_t;
+typedef struct knot_db_val {
+ void *data;
+ size_t len;
+} knot_db_val_t;
typedef struct knot_mm {
void *ctx, *alloc, *free;
@@ -134,6 +138,7 @@ struct kr_qflags {
_Bool NO_NS_FOUND : 1;
_Bool PKT_IS_SANE : 1;
_Bool DNS64_DISABLE : 1;
+ _Bool PASSTHRU_LEGACY : 1;
};
typedef struct ranked_rr_array_entry {
uint32_t qry_uid;
@@ -193,6 +198,23 @@ struct kr_request_qsource_flags {
_Bool http : 1;
_Bool xdp : 1;
};
+typedef unsigned long kr_rule_tags_t;
+struct kr_rule_zonefile_config {
+ const char *filename;
+ const char *input_str;
+ size_t input_len;
+ _Bool is_rpz;
+ _Bool nodata;
+ kr_rule_tags_t tags;
+ const char *origin;
+ uint32_t ttl;
+};
+struct kr_rule_fwd_flags {
+ _Bool is_auth : 1;
+ _Bool is_tcp : 1;
+ _Bool is_nods : 1;
+};
+typedef struct kr_rule_fwd_flags kr_rule_fwd_flags_t;
struct kr_extended_error {
int32_t info_code;
const char *extra_text;
@@ -239,6 +261,7 @@ struct kr_request {
unsigned int count_no_nsaddr;
unsigned int count_fail_row;
alloc_wire_f alloc_wire_cb;
+ kr_rule_tags_t rule_tags;
struct kr_extended_error extended_error;
};
enum kr_rank {KR_RANK_INITIAL, KR_RANK_OMIT, KR_RANK_TRY, KR_RANK_INDET = 4, KR_RANK_BOGUS, KR_RANK_MISMATCH, KR_RANK_MISSING, KR_RANK_INSECURE, KR_RANK_AUTH = 16, KR_RANK_SECURE = 32};
@@ -259,6 +282,7 @@ struct kr_cdb_stats {
uint64_t match_miss;
uint64_t read_leq;
uint64_t read_leq_miss;
+ uint64_t read_less;
double usage_percent;
};
typedef struct uv_timer_s uv_timer_t;
@@ -314,7 +338,14 @@ struct kr_server_selection {
struct local_state *local_state;
};
typedef int kr_log_level_t;
-enum kr_log_group {LOG_GRP_UNKNOWN = -1, LOG_GRP_SYSTEM = 1, LOG_GRP_CACHE, LOG_GRP_IO, LOG_GRP_NETWORK, LOG_GRP_TA, LOG_GRP_TLS, LOG_GRP_GNUTLS, LOG_GRP_TLSCLIENT, LOG_GRP_XDP, LOG_GRP_DOH, LOG_GRP_DNSSEC, LOG_GRP_HINT, LOG_GRP_PLAN, LOG_GRP_ITERATOR, LOG_GRP_VALIDATOR, LOG_GRP_RESOLVER, LOG_GRP_SELECTION, LOG_GRP_ZCUT, LOG_GRP_COOKIES, LOG_GRP_STATISTICS, LOG_GRP_REBIND, LOG_GRP_WORKER, LOG_GRP_POLICY, LOG_GRP_TASENTINEL, LOG_GRP_TASIGNALING, LOG_GRP_TAUPDATE, LOG_GRP_DAF, LOG_GRP_DETECTTIMEJUMP, LOG_GRP_DETECTTIMESKEW, LOG_GRP_GRAPHITE, LOG_GRP_PREFILL, LOG_GRP_PRIMING, LOG_GRP_SRVSTALE, LOG_GRP_WATCHDOG, LOG_GRP_NSID, LOG_GRP_DNSTAP, LOG_GRP_TESTS, LOG_GRP_DOTAUTH, LOG_GRP_HTTP, LOG_GRP_CONTROL, LOG_GRP_MODULE, LOG_GRP_DEVEL, LOG_GRP_RENUMBER, LOG_GRP_EDE, LOG_GRP_PROTOLAYER, LOG_GRP_REQDBG};
+enum kr_log_group {LOG_GRP_UNKNOWN = -1, LOG_GRP_SYSTEM = 1, LOG_GRP_CACHE, LOG_GRP_IO, LOG_GRP_NETWORK, LOG_GRP_TA, LOG_GRP_TLS, LOG_GRP_GNUTLS, LOG_GRP_TLSCLIENT, LOG_GRP_XDP, LOG_GRP_DOH, LOG_GRP_DNSSEC, LOG_GRP_HINT, LOG_GRP_PLAN, LOG_GRP_ITERATOR, LOG_GRP_VALIDATOR, LOG_GRP_RESOLVER, LOG_GRP_SELECTION, LOG_GRP_ZCUT, LOG_GRP_COOKIES, LOG_GRP_STATISTICS, LOG_GRP_REBIND, LOG_GRP_WORKER, LOG_GRP_POLICY, LOG_GRP_TASENTINEL, LOG_GRP_TASIGNALING, LOG_GRP_TAUPDATE, LOG_GRP_DAF, LOG_GRP_DETECTTIMEJUMP, LOG_GRP_DETECTTIMESKEW, LOG_GRP_GRAPHITE, LOG_GRP_PREFILL, LOG_GRP_PRIMING, LOG_GRP_SRVSTALE, LOG_GRP_WATCHDOG, LOG_GRP_NSID, LOG_GRP_DNSTAP, LOG_GRP_TESTS, LOG_GRP_DOTAUTH, LOG_GRP_HTTP, LOG_GRP_CONTROL, LOG_GRP_MODULE, LOG_GRP_DEVEL, LOG_GRP_RENUMBER, LOG_GRP_EDE, LOG_GRP_RULES, LOG_GRP_PROTOLAYER, LOG_GRP_REQDBG};
+struct kr_query_data_src {
+ _Bool initialized;
+ _Bool all_set;
+ uint8_t rule_depth;
+ kr_rule_fwd_flags_t flags;
+ knot_db_val_t targets_ptr;
+};
kr_layer_t kr_layer_t_static;
_Bool kr_dbg_assertion_abort;
@@ -341,6 +372,7 @@ struct kr_query {
struct timeval timestamp;
struct kr_zonecut zone_cut;
struct kr_layer_pickle *deferred;
+ struct kr_query_data_src data_src;
int8_t cname_depth;
struct kr_query *cname_parent;
struct kr_request *request;
@@ -457,6 +489,14 @@ int kr_cache_remove(struct kr_cache *, const knot_dname_t *, uint16_t);
int kr_cache_remove_subtree(struct kr_cache *, const knot_dname_t *, _Bool, int);
int kr_cache_commit(struct kr_cache *);
uint32_t packet_ttl(const knot_pkt_t *);
+int kr_rules_init(const char *, size_t);
+int kr_view_insert_action(const char *, const char *);
+int kr_view_select_action(const struct kr_request *, knot_db_val_t *);
+int kr_rule_tag_add(const char *, kr_rule_tags_t *);
+int kr_rule_local_data_emptyzone(const knot_dname_t *, kr_rule_tags_t);
+int kr_rule_local_data_nxdomain(const knot_dname_t *, kr_rule_tags_t);
+int kr_rule_zonefile(const struct kr_rule_zonefile_config *);
+int kr_rule_forward(const knot_dname_t *, kr_rule_fwd_flags_t, const struct sockaddr **);
typedef struct {
int sock_type;
_Bool tls;
diff --git a/daemon/lua/kres-gen-31.lua b/daemon/lua/kres-gen-31.lua
index 2a90f839..91c7071a 100644
--- a/daemon/lua/kres-gen-31.lua
+++ b/daemon/lua/kres-gen-31.lua
@@ -30,6 +30,10 @@ typedef struct {
uint32_t size;
knot_rdata_t *rdata;
} knot_rdataset_t;
+typedef struct knot_db_val {
+ void *data;
+ size_t len;
+} knot_db_val_t;
typedef struct knot_mm {
void *ctx, *alloc, *free;
@@ -134,6 +138,7 @@ struct kr_qflags {
_Bool NO_NS_FOUND : 1;
_Bool PKT_IS_SANE : 1;
_Bool DNS64_DISABLE : 1;
+ _Bool PASSTHRU_LEGACY : 1;
};
typedef struct ranked_rr_array_entry {
uint32_t qry_uid;
@@ -193,6 +198,23 @@ struct kr_request_qsource_flags {
_Bool http : 1;
_Bool xdp : 1;
};
+typedef unsigned long kr_rule_tags_t;
+struct kr_rule_zonefile_config {
+ const char *filename;
+ const char *input_str;
+ size_t input_len;
+ _Bool is_rpz;
+ _Bool nodata;
+ kr_rule_tags_t tags;
+ const char *origin;
+ uint32_t ttl;
+};
+struct kr_rule_fwd_flags {
+ _Bool is_auth : 1;
+ _Bool is_tcp : 1;
+ _Bool is_nods : 1;
+};
+typedef struct kr_rule_fwd_flags kr_rule_fwd_flags_t;
struct kr_extended_error {
int32_t info_code;
const char *extra_text;
@@ -239,6 +261,7 @@ struct kr_request {
unsigned int count_no_nsaddr;
unsigned int count_fail_row;
alloc_wire_f alloc_wire_cb;
+ kr_rule_tags_t rule_tags;
struct kr_extended_error extended_error;
};
enum kr_rank {KR_RANK_INITIAL, KR_RANK_OMIT, KR_RANK_TRY, KR_RANK_INDET = 4, KR_RANK_BOGUS, KR_RANK_MISMATCH, KR_RANK_MISSING, KR_RANK_INSECURE, KR_RANK_AUTH = 16, KR_RANK_SECURE = 32};
@@ -259,6 +282,7 @@ struct kr_cdb_stats {
uint64_t match_miss;
uint64_t read_leq;
uint64_t read_leq_miss;
+ uint64_t read_less;
double usage_percent;
};
typedef struct uv_timer_s uv_timer_t;
@@ -314,7 +338,14 @@ struct kr_server_selection {
struct local_state *local_state;
};
typedef int kr_log_level_t;
-enum kr_log_group {LOG_GRP_UNKNOWN = -1, LOG_GRP_SYSTEM = 1, LOG_GRP_CACHE, LOG_GRP_IO, LOG_GRP_NETWORK, LOG_GRP_TA, LOG_GRP_TLS, LOG_GRP_GNUTLS, LOG_GRP_TLSCLIENT, LOG_GRP_XDP, LOG_GRP_DOH, LOG_GRP_DNSSEC, LOG_GRP_HINT, LOG_GRP_PLAN, LOG_GRP_ITERATOR, LOG_GRP_VALIDATOR, LOG_GRP_RESOLVER, LOG_GRP_SELECTION, LOG_GRP_ZCUT, LOG_GRP_COOKIES, LOG_GRP_STATISTICS, LOG_GRP_REBIND, LOG_GRP_WORKER, LOG_GRP_POLICY, LOG_GRP_TASENTINEL, LOG_GRP_TASIGNALING, LOG_GRP_TAUPDATE, LOG_GRP_DAF, LOG_GRP_DETECTTIMEJUMP, LOG_GRP_DETECTTIMESKEW, LOG_GRP_GRAPHITE, LOG_GRP_PREFILL, LOG_GRP_PRIMING, LOG_GRP_SRVSTALE, LOG_GRP_WATCHDOG, LOG_GRP_NSID, LOG_GRP_DNSTAP, LOG_GRP_TESTS, LOG_GRP_DOTAUTH, LOG_GRP_HTTP, LOG_GRP_CONTROL, LOG_GRP_MODULE, LOG_GRP_DEVEL, LOG_GRP_RENUMBER, LOG_GRP_EDE, LOG_GRP_PROTOLAYER, LOG_GRP_REQDBG};
+enum kr_log_group {LOG_GRP_UNKNOWN = -1, LOG_GRP_SYSTEM = 1, LOG_GRP_CACHE, LOG_GRP_IO, LOG_GRP_NETWORK, LOG_GRP_TA, LOG_GRP_TLS, LOG_GRP_GNUTLS, LOG_GRP_TLSCLIENT, LOG_GRP_XDP, LOG_GRP_DOH, LOG_GRP_DNSSEC, LOG_GRP_HINT, LOG_GRP_PLAN, LOG_GRP_ITERATOR, LOG_GRP_VALIDATOR, LOG_GRP_RESOLVER, LOG_GRP_SELECTION, LOG_GRP_ZCUT, LOG_GRP_COOKIES, LOG_GRP_STATISTICS, LOG_GRP_REBIND, LOG_GRP_WORKER, LOG_GRP_POLICY, LOG_GRP_TASENTINEL, LOG_GRP_TASIGNALING, LOG_GRP_TAUPDATE, LOG_GRP_DAF, LOG_GRP_DETECTTIMEJUMP, LOG_GRP_DETECTTIMESKEW, LOG_GRP_GRAPHITE, LOG_GRP_PREFILL, LOG_GRP_PRIMING, LOG_GRP_SRVSTALE, LOG_GRP_WATCHDOG, LOG_GRP_NSID, LOG_GRP_DNSTAP, LOG_GRP_TESTS, LOG_GRP_DOTAUTH, LOG_GRP_HTTP, LOG_GRP_CONTROL, LOG_GRP_MODULE, LOG_GRP_DEVEL, LOG_GRP_RENUMBER, LOG_GRP_EDE, LOG_GRP_RULES, LOG_GRP_PROTOLAYER, LOG_GRP_REQDBG};
+struct kr_query_data_src {
+ _Bool initialized;
+ _Bool all_set;
+ uint8_t rule_depth;
+ kr_rule_fwd_flags_t flags;
+ knot_db_val_t targets_ptr;
+};
kr_layer_t kr_layer_t_static;
_Bool kr_dbg_assertion_abort;
@@ -341,6 +372,7 @@ struct kr_query {
struct timeval timestamp;
struct kr_zonecut zone_cut;
struct kr_layer_pickle *deferred;
+ struct kr_query_data_src data_src;
int8_t cname_depth;
struct kr_query *cname_parent;
struct kr_request *request;
@@ -457,6 +489,14 @@ int kr_cache_remove(struct kr_cache *, const knot_dname_t *, uint16_t);
int kr_cache_remove_subtree(struct kr_cache *, const knot_dname_t *, _Bool, int);
int kr_cache_commit(struct kr_cache *);
uint32_t packet_ttl(const knot_pkt_t *);
+int kr_rules_init(const char *, size_t);
+int kr_view_insert_action(const char *, const char *);
+int kr_view_select_action(const struct kr_request *, knot_db_val_t *);
+int kr_rule_tag_add(const char *, kr_rule_tags_t *);
+int kr_rule_local_data_emptyzone(const knot_dname_t *, kr_rule_tags_t);
+int kr_rule_local_data_nxdomain(const knot_dname_t *, kr_rule_tags_t);
+int kr_rule_zonefile(const struct kr_rule_zonefile_config *);
+int kr_rule_forward(const knot_dname_t *, kr_rule_fwd_flags_t, const struct sockaddr **);
typedef struct {
int sock_type;
_Bool tls;
diff --git a/daemon/lua/kres-gen-32.lua b/daemon/lua/kres-gen-32.lua
index 5c7ab7df..c87fc5a6 100644
--- a/daemon/lua/kres-gen-32.lua
+++ b/daemon/lua/kres-gen-32.lua
@@ -30,6 +30,10 @@ typedef struct {
uint32_t size;
knot_rdata_t *rdata;
} knot_rdataset_t;
+typedef struct knot_db_val {
+ void *data;
+ size_t len;
+} knot_db_val_t;
typedef struct knot_mm {
void *ctx, *alloc, *free;
@@ -135,6 +139,7 @@ struct kr_qflags {
_Bool NO_NS_FOUND : 1;
_Bool PKT_IS_SANE : 1;
_Bool DNS64_DISABLE : 1;
+ _Bool PASSTHRU_LEGACY : 1;
};
typedef struct ranked_rr_array_entry {
uint32_t qry_uid;
@@ -194,6 +199,23 @@ struct kr_request_qsource_flags {
_Bool http : 1;
_Bool xdp : 1;
};
+typedef unsigned long kr_rule_tags_t;
+struct kr_rule_zonefile_config {
+ const char *filename;
+ const char *input_str;
+ size_t input_len;
+ _Bool is_rpz;
+ _Bool nodata;
+ kr_rule_tags_t tags;
+ const char *origin;
+ uint32_t ttl;
+};
+struct kr_rule_fwd_flags {
+ _Bool is_auth : 1;
+ _Bool is_tcp : 1;
+ _Bool is_nods : 1;
+};
+typedef struct kr_rule_fwd_flags kr_rule_fwd_flags_t;
struct kr_extended_error {
int32_t info_code;
const char *extra_text;
@@ -240,6 +262,7 @@ struct kr_request {
unsigned int count_no_nsaddr;
unsigned int count_fail_row;
alloc_wire_f alloc_wire_cb;
+ kr_rule_tags_t rule_tags;
struct kr_extended_error extended_error;
};
enum kr_rank {KR_RANK_INITIAL, KR_RANK_OMIT, KR_RANK_TRY, KR_RANK_INDET = 4, KR_RANK_BOGUS, KR_RANK_MISMATCH, KR_RANK_MISSING, KR_RANK_INSECURE, KR_RANK_AUTH = 16, KR_RANK_SECURE = 32};
@@ -260,6 +283,7 @@ struct kr_cdb_stats {
uint64_t match_miss;
uint64_t read_leq;
uint64_t read_leq_miss;
+ uint64_t read_less;
double usage_percent;
};
typedef struct uv_timer_s uv_timer_t;
@@ -315,7 +339,14 @@ struct kr_server_selection {
struct local_state *local_state;
};
typedef int kr_log_level_t;
-enum kr_log_group {LOG_GRP_UNKNOWN = -1, LOG_GRP_SYSTEM = 1, LOG_GRP_CACHE, LOG_GRP_IO, LOG_GRP_NETWORK, LOG_GRP_TA, LOG_GRP_TLS, LOG_GRP_GNUTLS, LOG_GRP_TLSCLIENT, LOG_GRP_XDP, LOG_GRP_DOH, LOG_GRP_DNSSEC, LOG_GRP_HINT, LOG_GRP_PLAN, LOG_GRP_ITERATOR, LOG_GRP_VALIDATOR, LOG_GRP_RESOLVER, LOG_GRP_SELECTION, LOG_GRP_ZCUT, LOG_GRP_COOKIES, LOG_GRP_STATISTICS, LOG_GRP_REBIND, LOG_GRP_WORKER, LOG_GRP_POLICY, LOG_GRP_TASENTINEL, LOG_GRP_TASIGNALING, LOG_GRP_TAUPDATE, LOG_GRP_DAF, LOG_GRP_DETECTTIMEJUMP, LOG_GRP_DETECTTIMESKEW, LOG_GRP_GRAPHITE, LOG_GRP_PREFILL, LOG_GRP_PRIMING, LOG_GRP_SRVSTALE, LOG_GRP_WATCHDOG, LOG_GRP_NSID, LOG_GRP_DNSTAP, LOG_GRP_TESTS, LOG_GRP_DOTAUTH, LOG_GRP_HTTP, LOG_GRP_CONTROL, LOG_GRP_MODULE, LOG_GRP_DEVEL, LOG_GRP_RENUMBER, LOG_GRP_EDE, LOG_GRP_PROTOLAYER, LOG_GRP_REQDBG};
+enum kr_log_group {LOG_GRP_UNKNOWN = -1, LOG_GRP_SYSTEM = 1, LOG_GRP_CACHE, LOG_GRP_IO, LOG_GRP_NETWORK, LOG_GRP_TA, LOG_GRP_TLS, LOG_GRP_GNUTLS, LOG_GRP_TLSCLIENT, LOG_GRP_XDP, LOG_GRP_DOH, LOG_GRP_DNSSEC, LOG_GRP_HINT, LOG_GRP_PLAN, LOG_GRP_ITERATOR, LOG_GRP_VALIDATOR, LOG_GRP_RESOLVER, LOG_GRP_SELECTION, LOG_GRP_ZCUT, LOG_GRP_COOKIES, LOG_GRP_STATISTICS, LOG_GRP_REBIND, LOG_GRP_WORKER, LOG_GRP_POLICY, LOG_GRP_TASENTINEL, LOG_GRP_TASIGNALING, LOG_GRP_TAUPDATE, LOG_GRP_DAF, LOG_GRP_DETECTTIMEJUMP, LOG_GRP_DETECTTIMESKEW, LOG_GRP_GRAPHITE, LOG_GRP_PREFILL, LOG_GRP_PRIMING, LOG_GRP_SRVSTALE, LOG_GRP_WATCHDOG, LOG_GRP_NSID, LOG_GRP_DNSTAP, LOG_GRP_TESTS, LOG_GRP_DOTAUTH, LOG_GRP_HTTP, LOG_GRP_CONTROL, LOG_GRP_MODULE, LOG_GRP_DEVEL, LOG_GRP_RENUMBER, LOG_GRP_EDE, LOG_GRP_RULES, LOG_GRP_PROTOLAYER, LOG_GRP_REQDBG};
+struct kr_query_data_src {
+ _Bool initialized;
+ _Bool all_set;
+ uint8_t rule_depth;
+ kr_rule_fwd_flags_t flags;
+ knot_db_val_t targets_ptr;
+};
kr_layer_t kr_layer_t_static;
_Bool kr_dbg_assertion_abort;
@@ -342,6 +373,7 @@ struct kr_query {
struct timeval timestamp;
struct kr_zonecut zone_cut;
struct kr_layer_pickle *deferred;
+ struct kr_query_data_src data_src;
int8_t cname_depth;
struct kr_query *cname_parent;
struct kr_request *request;
@@ -458,6 +490,14 @@ int kr_cache_remove(struct kr_cache *, const knot_dname_t *, uint16_t);
int kr_cache_remove_subtree(struct kr_cache *, const knot_dname_t *, _Bool, int);
int kr_cache_commit(struct kr_cache *);
uint32_t packet_ttl(const knot_pkt_t *);
+int kr_rules_init(const char *, size_t);
+int kr_view_insert_action(const char *, const char *);
+int kr_view_select_action(const struct kr_request *, knot_db_val_t *);
+int kr_rule_tag_add(const char *, kr_rule_tags_t *);
+int kr_rule_local_data_emptyzone(const knot_dname_t *, kr_rule_tags_t);
+int kr_rule_local_data_nxdomain(const knot_dname_t *, kr_rule_tags_t);
+int kr_rule_zonefile(const struct kr_rule_zonefile_config *);
+int kr_rule_forward(const knot_dname_t *, kr_rule_fwd_flags_t, const struct sockaddr **);
typedef struct {
int sock_type;
_Bool tls;
diff --git a/daemon/lua/kres-gen.sh b/daemon/lua/kres-gen.sh
index 94cb448d..00dae4ff 100755
--- a/daemon/lua/kres-gen.sh
+++ b/daemon/lua/kres-gen.sh
@@ -69,12 +69,13 @@ struct kr_cdb_api {};
struct lru {};
"
-${CDEFS} ${LIBKRES} types <<-EOF
+${CDEFS} libknot types <<-EOF
knot_section_t
knot_rrinfo_t
knot_dname_t
knot_rdata_t
knot_rdataset_t
+ knot_db_val_t
EOF
# The generator doesn't work well with typedefs of functions.
@@ -125,6 +126,10 @@ ${CDEFS} ${LIBKRES} types <<-EOF
kr_qarray_t
struct kr_rplan
struct kr_request_qsource_flags
+ kr_rule_tags_t
+ struct kr_rule_zonefile_config
+ struct kr_rule_fwd_flags
+ typedef kr_rule_fwd_flags_t
struct kr_extended_error
struct kr_request
enum kr_rank
@@ -141,6 +146,7 @@ ${CDEFS} ${LIBKRES} types <<-EOF
struct kr_server_selection
kr_log_level_t
enum kr_log_group
+ struct kr_query_data_src
EOF
# static variables; these lines might not be simple to generate
@@ -282,6 +288,15 @@ ${CDEFS} ${LIBKRES} functions <<-EOF
kr_cache_commit
# FIXME: perhaps rename this exported symbol
packet_ttl
+# New policy
+ kr_rules_init
+ kr_view_insert_action
+ kr_view_select_action
+ kr_rule_tag_add
+ kr_rule_local_data_emptyzone
+ kr_rule_local_data_nxdomain
+ kr_rule_zonefile
+ kr_rule_forward
EOF
diff --git a/daemon/main.c b/daemon/main.c
index af7c920e..efbe8e55 100644
--- a/daemon/main.c
+++ b/daemon/main.c
@@ -18,6 +18,8 @@
#include "lib/defines.h"
#include "lib/dnssec.h"
#include "lib/log.h"
+#include "lib/resolve.h"
+#include "lib/rules/api.h"
#include <arpa/inet.h>
#include <getopt.h>
@@ -69,6 +71,12 @@ KR_EXPORT const char *malloc_conf = "narenas:1";
#define TCP_BACKLOG_DEFAULT 128
#endif
+/** I don't know why linker is dropping this _zonefile function otherwise. TODO: revisit. */
+KR_EXPORT void kr_misc_unused(void)
+{
+ kr_rule_zonefile(NULL);
+}
+
struct args the_args_value; /** Static allocation for the_args singleton. */
static void signal_handler(uv_signal_t *handle, int signum)
@@ -609,6 +617,14 @@ int main(int argc, char **argv)
lua_settop(the_engine->L, 0);
}
+ ret = kr_rules_init_ensure();
+ if (ret) {
+ kr_log_error(RULES, "failed to initialize policy rule engine: %s\n",
+ kr_strerror(ret));
+ ret = EXIT_FAILURE;
+ goto cleanup;
+ }
+
drop_capabilities();
if (engine_start() != 0) {
@@ -621,6 +637,9 @@ int main(int argc, char **argv)
goto cleanup;
}
+ /* Starting everything succeeded, so commit rule DB changes. */
+ kr_rules_commit(true);
+
/* Run the event loop */
ret = run_worker(loop, fork_id == 0, the_args);
@@ -631,6 +650,8 @@ cleanup:/* Cleanup. */
worker_deinit();
engine_deinit();
network_deinit();
+ kr_rules_commit(false);
+ kr_rules_deinit();
if (loop != NULL) {
uv_loop_close(loop);
}
diff --git a/daemon/scripting.rst b/daemon/scripting.rst
index 1950a2be..b2eab0fc 100644
--- a/daemon/scripting.rst
+++ b/daemon/scripting.rst
@@ -30,7 +30,7 @@ another program, e.g. a maintenance script.
:ref:`systemd-multiple-instances`.
When Knot Resolver is started using Systemd (see section
-:ref:`quickstart-startup`) it creates a control socket in path
+:ref:`gettingstarted-startup`) it creates a control socket in path
``/run/knot-resolver/control/$ID``. Connection to the socket can
be made from command line using e.g. ``socat``:
diff --git a/distro/pkg/deb/clean b/distro/pkg/deb/clean
index 3c2f3ba3..574b40f8 100644
--- a/distro/pkg/deb/clean
+++ b/distro/pkg/deb/clean
@@ -1,3 +1 @@
build_deb/
-doc/doxyxml/
-doc/html/
diff --git a/distro/pkg/deb/control b/distro/pkg/deb/control
index 1f9964d2..c4d1edb2 100644
--- a/distro/pkg/deb/control
+++ b/distro/pkg/deb/control
@@ -4,6 +4,7 @@ Priority: optional
Maintainer: Knot Resolver <knot-resolver@labs.nic.cz>
Build-Depends:
debhelper (>= 9~),
+ dh-python,
libcmocka-dev (>= 1.0.0),
libedit-dev,
libfstrm-dev,
@@ -20,12 +21,10 @@ Build-Depends:
luajit,
pkg-config,
meson (>= 0.49),
- doxygen,
protobuf-c-compiler,
- python3-breathe,
- python3-sphinx,
- python3-sphinx-rtd-theme,
- texinfo,
+ python3-all,
+ python3-dev,
+ python3-setuptools,
libssl-dev,
Homepage: https://www.knot-resolver.cz/
@@ -122,19 +121,16 @@ Description: HTTP module for Knot Resolver
This package contains HTTP/2 module for local visualization of the
resolver cache and queries.
-Package: knot-resolver-doc
+Package: python3-knot-resolver-manager
Architecture: all
-Section: doc
+Provides: knot-resolver-manager
Depends:
- libjs-jquery,
- libjs-underscore,
+ knot-resolver (= ${binary:Version}),
${misc:Depends},
-Description: Documentation for Knot Resolver
- The Knot Resolver is a caching full resolver implementation
- written in C and LuaJIT, including both a resolver library and a
- daemon. Modular architecture of the library keeps the core tiny and
- efficient, and provides a state-machine like API for
- extensions. There are three built-in modules - iterator, cache,
- validator, and many external.
- .
- This package contains Knot Resolver Documentation.
+ ${python3:Depends},
+Section: python
+Description: Configuration tool for Knot Resolver
+ Knot Resolver Manager is a configuration tool for Knot Resolver. The Manager
+ hides the complexity of running several independent resolver processes while
+ ensuring zero-downtime reconfiguration with YAML/JSON declarative
+ configuration and an optional HTTP API for dynamic changes.
diff --git a/distro/pkg/deb/knot-resolver-doc.doc-base b/distro/pkg/deb/knot-resolver-doc.doc-base
deleted file mode 100644
index 9cd0fdf0..00000000
--- a/distro/pkg/deb/knot-resolver-doc.doc-base
+++ /dev/null
@@ -1,11 +0,0 @@
-Document: knot-resolver
-Title: Knot Resolver documentation
-Author: CZ.NIC labs
-Abstract: Documentation for the Knot Resolver,
- including building from source, using the library,
- and configuration and operation of the daemon.
-Section: Network/Communication
-
-Format: HTML
-Index: /usr/share/doc/knot-resolver/html/index.html
-Files: /usr/share/doc/knot-resolver/html/*.html
diff --git a/distro/pkg/deb/knot-resolver-doc.docs b/distro/pkg/deb/knot-resolver-doc.docs
deleted file mode 100644
index baa81f7c..00000000
--- a/distro/pkg/deb/knot-resolver-doc.docs
+++ /dev/null
@@ -1 +0,0 @@
-debian/tmp/usr/share/doc/knot-resolver/html/*
diff --git a/distro/pkg/deb/knot-resolver-doc.info b/distro/pkg/deb/knot-resolver-doc.info
deleted file mode 100644
index 2283d88e..00000000
--- a/distro/pkg/deb/knot-resolver-doc.info
+++ /dev/null
@@ -1,2 +0,0 @@
-debian/tmp/usr/share/info/knot-resolver.info
-debian/tmp/usr/share/info/knot-resolver-figures/*
diff --git a/distro/pkg/deb/knot-resolver-doc.links b/distro/pkg/deb/knot-resolver-doc.links
deleted file mode 100644
index 25e95848..00000000
--- a/distro/pkg/deb/knot-resolver-doc.links
+++ /dev/null
@@ -1,2 +0,0 @@
-usr/share/javascript/jquery/jquery.min.js usr/share/doc/knot-resolver/html/_static/jquery.js
-usr/share/javascript/underscore/underscore.min.js usr/share/doc/knot-resolver/html/_static/underscore.js
diff --git a/distro/pkg/deb/not-installed b/distro/pkg/deb/not-installed
index f527e79f..ceb8f20d 100644
--- a/distro/pkg/deb/not-installed
+++ b/distro/pkg/deb/not-installed
@@ -1,6 +1,5 @@
usr/lib/knot-resolver/kres_modules/http/LICENSE
usr/lib/knot-resolver/kres_modules/etcd.lua
-debian/tmp/usr/share/doc/knot-resolver/html/.buildinfo
usr/include/libkres/*.h
usr/lib/*.so
usr/lib/pkgconfig/libkres.pc
diff --git a/distro/pkg/deb/python3-knot-resolver-manager.install b/distro/pkg/deb/python3-knot-resolver-manager.install
new file mode 100644
index 00000000..3ec23ee1
--- /dev/null
+++ b/distro/pkg/deb/python3-knot-resolver-manager.install
@@ -0,0 +1,4 @@
+etc/knot-resolver/config.yml
+usr/lib/systemd/system/knot-resolver.service
+usr/share/bash-completion/completions/kresctl
+usr/share/fish/completions/kresctl.fish
diff --git a/distro/pkg/deb/python3-knot-resolver-manager.manpages b/distro/pkg/deb/python3-knot-resolver-manager.manpages
new file mode 100644
index 00000000..a453f7e9
--- /dev/null
+++ b/distro/pkg/deb/python3-knot-resolver-manager.manpages
@@ -0,0 +1 @@
+debian/tmp/usr/share/man/man8/kresctl.8*
diff --git a/distro/pkg/deb/rules b/distro/pkg/deb/rules
index c5774760..787dad99 100755
--- a/distro/pkg/deb/rules
+++ b/distro/pkg/deb/rules
@@ -2,7 +2,7 @@
# SPDX-License-Identifier: GPL-3.0-or-later
# see FEATURE AREAS in dpkg-buildflags(1)
-export DEB_BUILD_MAINT_OPTIONS = hardening=+all,-pie
+export DEB_BUILD_MAINT_OPTIONS = hardening=+all
# see ENVIRONMENT in dpkg-buildflags(1)
# package maintainers to append CFLAGS
@@ -10,21 +10,23 @@ export DEB_CFLAGS_MAINT_APPEND = -Wall -pedantic -fno-omit-frame-pointer
# package maintainers to append LDFLAGS
export DEB_LDFLAGS_MAINT_APPEND = -Wl,--as-needed
+export PYBUILD_NAME=knot_resolver_manager
+
# see EXAMPLES in dpkg-buildflags(1) and read /usr/share/dpkg/*
DPKG_EXPORT_BUILDFLAGS = 1
include /usr/share/dpkg/default.mk
export ARCH=$(DEB_HOST_GNU_CPU)
+
%:
- dh $@
+ dh $@ --with python3
override_dh_auto_build:
meson build_deb \
--buildtype=plain \
--prefix=/usr \
--libdir=lib \
- -Ddoc=enabled \
-Dsystemd_files=enabled \
-Dclient=enabled \
-Ddnstap=enabled \
@@ -36,10 +38,14 @@ override_dh_auto_build:
-Dc_args="$${CFLAGS}" \
-Dc_link_args="$${LDFLAGS}"
ninja -v -C build_deb
- ninja -v -C build_deb doc
+ dh_auto_build --buildsystem=pybuild --sourcedirectory manager
override_dh_auto_install:
DESTDIR="${PWD}/debian/tmp" ninja -v -C build_deb install
+ dh_auto_install --buildsystem=pybuild --sourcedirectory manager
+ install -m 644 -D $(CURDIR)/manager/etc/knot-resolver/config.yml $(CURDIR)/debian/tmp/etc/knot-resolver/config.yml
+ install -m 644 -D $(CURDIR)/manager/shell-completion/client.bash $(CURDIR)/debian/tmp/usr/share/bash-completion/completions/kresctl
+ install -m 644 -D $(CURDIR)/manager/shell-completion/client.fish $(CURDIR)/debian/tmp/usr/share/fish/completions/kresctl.fish
override_dh_auto_test:
meson test -C build_deb
diff --git a/distro/pkg/rpm/knot-resolver.spec b/distro/pkg/rpm/knot-resolver.spec
index 0a7e2838..0b60b6b7 100644
--- a/distro/pkg/rpm/knot-resolver.spec
+++ b/distro/pkg/rpm/knot-resolver.spec
@@ -49,6 +49,7 @@ BuildRequires: pkgconfig(libcap-ng)
BuildRequires: pkgconfig(libuv)
BuildRequires: pkgconfig(luajit) >= 2.0
BuildRequires: jemalloc-devel
+BuildRequires: python3-devel
Requires: systemd
Requires(post): systemd
@@ -71,7 +72,6 @@ Requires(pre): shadow-utils
%endif
%if 0%{?fedora} || 0%{?rhel} > 7
BuildRequires: pkgconfig(lmdb)
-BuildRequires: python3-sphinx
Requires: lua5.1-basexx
Requires: lua5.1-cqueues
Requires: lua5.1-http
@@ -87,21 +87,9 @@ BuildRequires: openssl-devel
%if 0%{?suse_version}
%define NINJA ninja
BuildRequires: lmdb-devel
-BuildRequires: python3-Sphinx
Requires(pre): shadow
%endif
-%if "x%{?rhel}" == "x"
-# dependencies for doc package
-# NOTE: doc isn't possible to build on CentOS 7, 8
-# python2-sphinx is too old and python36-breathe is broken on CentOS 7
-# python3-breathe isn't available for CentOS 8 (yet? rhbz#1808766)
-BuildRequires: doxygen
-BuildRequires: python3-breathe
-BuildRequires: python3-sphinx_rtd_theme
-BuildRequires: texinfo
-%endif
-
%description
The Knot Resolver is a DNSSEC-enabled caching full resolver implementation
written in C and LuaJIT, including both a resolver library and a daemon.
@@ -119,16 +107,6 @@ Requires: %{name}%{?_isa} = %{version}-%{release}
%description devel
The package contains development headers for Knot Resolver.
-%if "x%{?rhel}" == "x"
-%package doc
-Summary: Documentation for Knot Resolver
-BuildArch: noarch
-Requires: %{name} = %{version}-%{release}
-
-%description doc
-Documentation for Knot Resolver
-%endif
-
%if "x%{?suse_version}" == "x"
%package module-dnstap
Summary: dnstap module for Knot Resolver
@@ -159,6 +137,32 @@ queries. It can also serve DNS-over-HTTPS, but it is deprecated in favor of
native C implementation, which doesn't require this package.
%endif
+%package -n python3-knot-resolver-manager
+Summary: Configuration tool for Knot Resolver
+Requires: %{name} = %{version}-%{release}
+%if 0%{?rhel} == 8
+Requires: python3
+Requires: python3-pyyaml
+Requires: python3-aiohttp
+Requires: python3-typing-extensions
+Requires: python3-prometheus_client
+Requires: supervisor
+%endif
+%if 0%{?suse_version}
+Requires: python3
+Requires: python3-PyYAML
+Requires: python3-aiohttp
+Requires: python3-typing_extensions
+Requires: python3-prometheus_client
+Requires: supervisor
+%endif
+
+%description -n python3-knot-resolver-manager
+Knot Resolver Manager is a configuration tool for Knot Resolver. The Manager
+hides the complexity of running several independent resolver processes while
+ensuring zero-downtime reconfiguration with YAML/JSON declarative
+configuration and an optional HTTP API for dynamic changes.
+
%prep
%if 0%{GPG_CHECK}
export GNUPGHOME=./gpg-keyring
@@ -170,9 +174,6 @@ gpg2 --verify %{SOURCE1} %{SOURCE0}
%build
CFLAGS="%{optflags}" LDFLAGS="%{?__global_ldflags}" meson build_rpm \
-%if "x%{?rhel}" == "x"
- -Ddoc=enabled \
-%endif
-Dsystemd_files=enabled \
-Dclient=enabled \
%if "x%{?suse_version}" == "x"
@@ -192,9 +193,10 @@ CFLAGS="%{optflags}" LDFLAGS="%{?__global_ldflags}" meson build_rpm \
--sysconfdir="%{_sysconfdir}" \
%{NINJA} -v -C build_rpm
-%if "x%{?rhel}" == "x"
-%{NINJA} -v -C build_rpm doc
-%endif
+
+pushd manager
+%py3_build
+popd
%check
meson test -C build_rpm
@@ -225,6 +227,15 @@ install -m 755 -d %{buildroot}/%{_pkgdocdir}
mv %{buildroot}/%{_datadir}/doc/%{name}/* %{buildroot}/%{_pkgdocdir}/
%endif
+# install knot-resolver-manager
+pushd manager
+%py3_install
+install -m 644 -D etc/knot-resolver/config.yml %{buildroot}%{_sysconfdir}/knot-resolver/config.yml
+install -m 644 -D shell-completion/client.bash %{buildroot}%{_datarootdir}/bash-completion/completions/kresctl
+install -m 644 -D shell-completion/client.fish %{buildroot}%{_datarootdir}/fish/completions/kresctl.fish
+
+popd
+
%pre
getent group knot-resolver >/dev/null || groupadd -r knot-resolver
getent passwd knot-resolver >/dev/null || useradd -r -g knot-resolver -d %{_sysconfdir}/knot-resolver -s /sbin/nologin -c "Knot Resolver" knot-resolver
@@ -350,21 +361,13 @@ fi
%{_libdir}/knot-resolver/kres_modules/watchdog.lua
%{_libdir}/knot-resolver/kres_modules/workarounds.lua
%{_mandir}/man8/kresd.8.gz
+%{_mandir}/man8/kresctl.8.gz
%files devel
%{_includedir}/libkres
%{_libdir}/pkgconfig/libkres.pc
%{_libdir}/libkres.so
-%if "x%{?rhel}" == "x"
-%files doc
-%dir %{_pkgdocdir}
-%doc %{_pkgdocdir}/html
-%doc %{_datadir}/info/knot-resolver.info*
-%dir %{_datadir}/info/knot-resolver-figures
-%doc %{_datadir}/info/knot-resolver-figures/*
-%endif
-
%if "x%{?suse_version}" == "x"
%files module-dnstap
%{_libdir}/knot-resolver/kres_modules/dnstap.so
@@ -378,6 +381,15 @@ fi
%{_libdir}/knot-resolver/kres_modules/prometheus.lua
%endif
+%files -n python3-knot-resolver-manager
+%{python3_sitearch}/knot_resolver_manager*
+%{_sysconfdir}/knot-resolver/config.yml
+%{_unitdir}/knot-resolver.service
+%{_bindir}/kresctl
+%{_bindir}/knot-resolver
+%{_datarootdir}/bash-completion/completions/kresctl
+%{_datarootdir}/fish/completions/kresctl.fish
+
%changelog
* {{ now }} Jakub Ružička <jakub.ruzicka@nic.cz> - {{ version }}-{{ release }}
- upstream package
diff --git a/distro/tests/.ansible.cfg b/distro/tests/.ansible.cfg
deleted file mode 100644
index eef20150..00000000
--- a/distro/tests/.ansible.cfg
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-[defaults]
-
-# additional paths to search for roles in, colon separated
-roles_path = ../ansible-roles
-interpreter_python = auto
-stdout_callback=debug
diff --git a/distro/tests/README.md b/distro/tests/README.md
deleted file mode 100644
index a1a5e96a..00000000
--- a/distro/tests/README.md
+++ /dev/null
@@ -1,42 +0,0 @@
-Requirements
-------------
-
-- ansible
-- vagrant
-- libvirt (+vagrant-libvirt) / virtualbox
-
-Usage
------
-
-`vagrant up` command is configured to trigger ansible provisioning
-which configures OBS repository, installs the knot-resolver package,
-starts the kresd@1 service and finally attempts to use it to resolve
-a domain name. It also tests that DNSSEC validation is turned on.
-
-By default, the *knot-resolver-devel* repo (for knot-resolver) along
-with *knot-resolver-latest* (for knot) is used. To test only the
-*knot-resolver-latest* repo, set it in `repos.yaml` (or use the
-test-distro.sh script which overwrites this file). If you're running
-tests in parallel, they all HAVE TO use the same repo(s).
-
-Run the following command for every distro (aka directory with
-Vagrantfile):
-
-```
-./test-distro.sh knot-resolver-devel debian9
-```
-
-or
-
-```
-./test-distro.sh knot-resolver-testing debian9
-```
-
-or
-
-```
-./test-distro.sh knot-resolver-latest debian9
-```
-
-At the end of the test, the package version that was tested is
-printed out. Make sure you're testing what you intended to.
diff --git a/distro/tests/ansible-roles/knot_resolver/defaults/main.yaml b/distro/tests/ansible-roles/knot_resolver/defaults/main.yaml
deleted file mode 100644
index 0860c26b..00000000
--- a/distro/tests/ansible-roles/knot_resolver/defaults/main.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-# SPDX-License-Identifier: GPL-3.0-or-later
-repos:
- - knot-resolver-latest
-distro: "{{ ansible_distribution | replace(' ', '_') }}"
-update_packages: false
diff --git a/distro/tests/ansible-roles/knot_resolver/tasks/configure_dnstap.yaml b/distro/tests/ansible-roles/knot_resolver/tasks/configure_dnstap.yaml
deleted file mode 100644
index 817b1179..00000000
--- a/distro/tests/ansible-roles/knot_resolver/tasks/configure_dnstap.yaml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-# SPDX-License-Identifier: GPL-3.0-or-later
-- name: dnstap_config set up kresd.conf
- blockinfile:
- marker: -- {mark} ANSIBLE MANAGED BLOCK
- block: |
- modules.load('dnstap')
- assert(dnstap)
- path: /etc/knot-resolver/kresd.conf
- insertbefore: BOF
diff --git a/distro/tests/ansible-roles/knot_resolver/tasks/configure_doh.yaml b/distro/tests/ansible-roles/knot_resolver/tasks/configure_doh.yaml
deleted file mode 100644
index cd4e7492..00000000
--- a/distro/tests/ansible-roles/knot_resolver/tasks/configure_doh.yaml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-# SPDX-License-Identifier: GPL-3.0-or-later
-- name: doh_config set up kresd.conf
- blockinfile:
- marker: -- {mark} ANSIBLE MANAGED BLOCK
- block: |
- net.listen('127.0.0.1', 44353, { kind = 'doh_legacy' })
- modules.load('http')
- path: /etc/knot-resolver/kresd.conf
- insertbefore: BOF
diff --git a/distro/tests/ansible-roles/knot_resolver/tasks/configure_doh2.yaml b/distro/tests/ansible-roles/knot_resolver/tasks/configure_doh2.yaml
deleted file mode 100644
index eebca204..00000000
--- a/distro/tests/ansible-roles/knot_resolver/tasks/configure_doh2.yaml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-# SPDX-License-Identifier: GPL-3.0-or-later
-- name: doh2_config set up kresd.conf
- blockinfile:
- marker: -- {mark} ANSIBLE MANAGED BLOCK
- block: |
- net.listen('127.0.0.1', 44354, { kind = 'doh2' })
- path: /etc/knot-resolver/kresd.conf
diff --git a/distro/tests/ansible-roles/knot_resolver/tasks/main.yaml b/distro/tests/ansible-roles/knot_resolver/tasks/main.yaml
deleted file mode 100644
index 8d683c80..00000000
--- a/distro/tests/ansible-roles/knot_resolver/tasks/main.yaml
+++ /dev/null
@@ -1,71 +0,0 @@
----
-# SPDX-License-Identifier: GPL-3.0-or-later
-- name: Include distribution specific vars
- include_vars: "{{ distro }}.yaml"
-
-- name: Update all packages
- package:
- name: '*'
- state: latest
- when: update_packages|bool
-
-- name: Install packages
- package:
- name: "{{ packages }}"
- state: latest
- # knot-utils may be missing on opensuse (depending on upstream vs downstream pkg)
- failed_when: false
-
-- name: Always print package version at the end
- block:
-
- - include: restart_kresd.yaml
-
- - include: test_udp.yaml
- - include: test_tcp.yaml
- - include: test_tls.yaml
- - include: test_dnssec.yaml
-
- - include: test_kres_cache_gc.yaml
-
- - name: Test DoH (new implementation)
- block:
- - include: configure_doh2.yaml
- - include: restart_kresd.yaml
- - include: test_doh2.yaml
-
- - name: Test DoH (legacy)
- block:
- - name: Install knot-resolver-module-http
- package:
- name: knot-resolver-module-http
- state: latest
-
- - include: configure_doh.yaml
- when: ansible_distribution in ["CentOS", "Rocky", "Fedora", "Debian", "Ubuntu"]
-
- - include: restart_kresd.yaml
- - include: test_doh.yaml
- when: distro in ["Fedora", "Debian", "CentOS", "Rocky"] or (distro == "Ubuntu" and ansible_distribution_major_version|int >= 18)
-
- - name: Test dnstap module
- block:
- - name: Install knot-resolver-module-dnstap
- package:
- name: knot-resolver-module-dnstap
- state: latest
- - include: configure_dnstap.yaml
- - include: restart_kresd.yaml
- when: distro in ["Fedora", "Debian", "CentOS", "Rocky", "Ubuntu"]
-
- always:
-
- - name: Get installed package version
- shell: "{{ show_package_version }}"
- args:
- warn: false
- register: package_version
-
- - name: Show installed version
- debug:
- var: package_version.stdout
diff --git a/distro/tests/ansible-roles/knot_resolver/tasks/restart_kresd.yaml b/distro/tests/ansible-roles/knot_resolver/tasks/restart_kresd.yaml
deleted file mode 100644
index 00dbf5db..00000000
--- a/distro/tests/ansible-roles/knot_resolver/tasks/restart_kresd.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-# SPDX-License-Identifier: GPL-3.0-or-later
-- block:
- - name: Restart kresd@1.service
- service:
- name: kresd@1.service
- state: restarted
- rescue:
- - name: Get kresd@1.service journal
- shell: journalctl -u kresd@1 --since -20s
- register: journal
- - name: Print journal
- debug:
- var: journal
- - name: Restart kresd@*.service failed, see log above
- shell: /bin/false
diff --git a/distro/tests/ansible-roles/knot_resolver/tasks/test_dnssec.yaml b/distro/tests/ansible-roles/knot_resolver/tasks/test_dnssec.yaml
deleted file mode 100644
index 1cc6ea39..00000000
--- a/distro/tests/ansible-roles/knot_resolver/tasks/test_dnssec.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-# SPDX-License-Identifier: GPL-3.0-or-later
-- name: dnssec_test rhybar.cz. +cd returns NOERROR
- tags:
- - test
- shell: kdig +cd @127.0.0.1 rhybar.cz.
- register: res
- failed_when: '"status: NOERROR" not in res.stdout'
-
-- name: dnssec_test rhybar.cz. returns SERVFAIL
- tags:
- - test
- shell: kdig +timeout=16 @127.0.0.1 rhybar.cz.
- register: res
- failed_when: '"status: SERVFAIL" not in res.stdout'
diff --git a/distro/tests/ansible-roles/knot_resolver/tasks/test_doh.yaml b/distro/tests/ansible-roles/knot_resolver/tasks/test_doh.yaml
deleted file mode 100644
index 2c200e17..00000000
--- a/distro/tests/ansible-roles/knot_resolver/tasks/test_doh.yaml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-# SPDX-License-Identifier: GPL-3.0-or-later
-- name: doh_test query localhost. A
- get_url:
- url: https://127.0.0.1:44353/doh?dns=1Y0BAAABAAAAAAAACWxvY2FsaG9zdAAAAQAB
- sha256sum: e5c2710e6ecb78c089ab608ad5861b87be0d1c623c4d58b4eee3b21c06aa2008
- dest: /tmp/doh_test
- mode: 0644
- validate_certs: false
diff --git a/distro/tests/ansible-roles/knot_resolver/tasks/test_doh2.yaml b/distro/tests/ansible-roles/knot_resolver/tasks/test_doh2.yaml
deleted file mode 100644
index 32cf2950..00000000
--- a/distro/tests/ansible-roles/knot_resolver/tasks/test_doh2.yaml
+++ /dev/null
@@ -1,24 +0,0 @@
----
-# SPDX-License-Identifier: GPL-3.0-or-later
-- name: doh2_test check kdig https support
- shell: kdig --help | grep -q '+\S*https'
- register: kdig_https
- ignore_errors: true
-
-- name: doh2_test query localhost. A
- # use curl instead of ansible builtins (get_url/uri)
- # because they currently use unsupported HTTP/1.1
- shell: |
- curl -k -o /tmp/doh_test https://127.0.0.1:44354/doh?dns=1Y0BAAABAAAAAAAACWxvY2FsaG9zdAAAAQAB
- echo "e5c2710e6ecb78c089ab608ad5861b87be0d1c623c4d58b4eee3b21c06aa2008 /tmp/doh_test" > /tmp/doh_test.sha256
- sha256sum --check /tmp/doh_test.sha256
- args:
- # disable warning about using curl - we know what we're doing
- warn: false
- when: kdig_https is failed
-
-- name: doh2_test kdig localhost. A
- shell: |
- kdig @127.0.0.1 -p 44354 +https nic.cz || exit 1
- kdig @127.0.0.1 -p 44354 +https-get nic.cz || exit 2
- when: kdig_https is succeeded
diff --git a/distro/tests/ansible-roles/knot_resolver/tasks/test_kres_cache_gc.yaml b/distro/tests/ansible-roles/knot_resolver/tasks/test_kres_cache_gc.yaml
deleted file mode 100644
index 3a7c9c90..00000000
--- a/distro/tests/ansible-roles/knot_resolver/tasks/test_kres_cache_gc.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-# SPDX-License-Identifier: GPL-3.0-or-later
-- name: check kres-cache-gc.service is active
- shell: systemctl is-active -q kres-cache-gc.service
diff --git a/distro/tests/ansible-roles/knot_resolver/tasks/test_tcp.yaml b/distro/tests/ansible-roles/knot_resolver/tasks/test_tcp.yaml
deleted file mode 100644
index 1af18fd1..00000000
--- a/distro/tests/ansible-roles/knot_resolver/tasks/test_tcp.yaml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-# SPDX-License-Identifier: GPL-3.0-or-later
-- name: tcp_test resolve nic.cz
- tags:
- - test
- shell: kdig +tcp @127.0.0.1 nic.cz
- register: res
- failed_when: '"status: NOERROR" not in res.stdout'
diff --git a/distro/tests/ansible-roles/knot_resolver/tasks/test_tls.yaml b/distro/tests/ansible-roles/knot_resolver/tasks/test_tls.yaml
deleted file mode 100644
index c780657b..00000000
--- a/distro/tests/ansible-roles/knot_resolver/tasks/test_tls.yaml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-# SPDX-License-Identifier: GPL-3.0-or-later
-- name: tls_test resolve nic.cz
- tags:
- - test
- shell: kdig +tls @127.0.0.1 nic.cz
- register: res
- failed_when: '"status: NOERROR" not in res.stdout'
diff --git a/distro/tests/ansible-roles/knot_resolver/tasks/test_udp.yaml b/distro/tests/ansible-roles/knot_resolver/tasks/test_udp.yaml
deleted file mode 100644
index 64023ffe..00000000
--- a/distro/tests/ansible-roles/knot_resolver/tasks/test_udp.yaml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-# SPDX-License-Identifier: GPL-3.0-or-later
-- name: udp_test resolve nic.cz
- tags:
- - test
- shell: kdig @127.0.0.1 nic.cz
- register: res
- failed_when: '"status: NOERROR" not in res.stdout'
diff --git a/distro/tests/ansible-roles/knot_resolver/vars/CentOS.yaml b/distro/tests/ansible-roles/knot_resolver/vars/CentOS.yaml
deleted file mode 100644
index d69cb13d..00000000
--- a/distro/tests/ansible-roles/knot_resolver/vars/CentOS.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-# SPDX-License-Identifier: GPL-3.0-or-later
-show_package_version: rpm -qi knot-resolver | grep '^Version'
-packages:
- - knot-resolver
- - knot-utils
diff --git a/distro/tests/ansible-roles/knot_resolver/vars/Debian.yaml b/distro/tests/ansible-roles/knot_resolver/vars/Debian.yaml
deleted file mode 100644
index bcdc37ae..00000000
--- a/distro/tests/ansible-roles/knot_resolver/vars/Debian.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-# SPDX-License-Identifier: GPL-3.0-or-later
-show_package_version: dpkg -s knot-resolver | grep '^Version'
-packages:
- - knot-resolver
- - knot-dnsutils
diff --git a/distro/tests/ansible-roles/knot_resolver/vars/Fedora.yaml b/distro/tests/ansible-roles/knot_resolver/vars/Fedora.yaml
deleted file mode 100644
index d69cb13d..00000000
--- a/distro/tests/ansible-roles/knot_resolver/vars/Fedora.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-# SPDX-License-Identifier: GPL-3.0-or-later
-show_package_version: rpm -qi knot-resolver | grep '^Version'
-packages:
- - knot-resolver
- - knot-utils
diff --git a/distro/tests/ansible-roles/knot_resolver/vars/Rocky.yaml b/distro/tests/ansible-roles/knot_resolver/vars/Rocky.yaml
deleted file mode 100644
index d69cb13d..00000000
--- a/distro/tests/ansible-roles/knot_resolver/vars/Rocky.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-# SPDX-License-Identifier: GPL-3.0-or-later
-show_package_version: rpm -qi knot-resolver | grep '^Version'
-packages:
- - knot-resolver
- - knot-utils
diff --git a/distro/tests/ansible-roles/knot_resolver/vars/Ubuntu.yaml b/distro/tests/ansible-roles/knot_resolver/vars/Ubuntu.yaml
deleted file mode 100644
index bcdc37ae..00000000
--- a/distro/tests/ansible-roles/knot_resolver/vars/Ubuntu.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-# SPDX-License-Identifier: GPL-3.0-or-later
-show_package_version: dpkg -s knot-resolver | grep '^Version'
-packages:
- - knot-resolver
- - knot-dnsutils
diff --git a/distro/tests/ansible-roles/knot_resolver/vars/openSUSE_Leap.yaml b/distro/tests/ansible-roles/knot_resolver/vars/openSUSE_Leap.yaml
deleted file mode 100644
index d69cb13d..00000000
--- a/distro/tests/ansible-roles/knot_resolver/vars/openSUSE_Leap.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-# SPDX-License-Identifier: GPL-3.0-or-later
-show_package_version: rpm -qi knot-resolver | grep '^Version'
-packages:
- - knot-resolver
- - knot-utils
diff --git a/distro/tests/ansible-roles/knot_resolver/vars/openSUSE_Tumbleweed.yaml b/distro/tests/ansible-roles/knot_resolver/vars/openSUSE_Tumbleweed.yaml
deleted file mode 100644
index 39d5ef0c..00000000
--- a/distro/tests/ansible-roles/knot_resolver/vars/openSUSE_Tumbleweed.yaml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-# SPDX-License-Identifier: GPL-3.0-or-later
-show_package_version: rpm -qi knot-resolver | grep '^Version'
-update_packages: true
-packages:
- - knot-resolver
- - knot-utils
diff --git a/distro/tests/ansible-roles/obs_repos/defaults/main.yaml b/distro/tests/ansible-roles/obs_repos/defaults/main.yaml
deleted file mode 100644
index 05ffcb6c..00000000
--- a/distro/tests/ansible-roles/obs_repos/defaults/main.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-# SPDX-License-Identifier: GPL-3.0-or-later
-obs_distro: "{{ ansible_distribution | replace(' ', '_') }}"
-obs_repofile_url: "https://download.opensuse.org/repositories/home:CZ-NIC:{{ item }}/{{ obs_repo_version }}/home:CZ-NIC:{{ item }}.repo"
diff --git a/distro/tests/ansible-roles/obs_repos/tasks/CentOS.yaml b/distro/tests/ansible-roles/obs_repos/tasks/CentOS.yaml
deleted file mode 100644
index 2333a958..00000000
--- a/distro/tests/ansible-roles/obs_repos/tasks/CentOS.yaml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-# SPDX-License-Identifier: GPL-3.0-or-later
-- name: update CA certificates
- yum:
- name: ca-certificates
- state: latest
-
-- name: Install EPEL
- yum:
- name: epel-release
- state: present
-
-- name: Download repo file(s)
- get_url:
- url: "{{ obs_repofile_url }}"
- dest: /etc/yum.repos.d/home:CZ-NIC:{{ item }}.repo
- mode: 0644
- with_items: "{{ repos }}"
diff --git a/distro/tests/ansible-roles/obs_repos/tasks/Debian.yaml b/distro/tests/ansible-roles/obs_repos/tasks/Debian.yaml
deleted file mode 100644
index 6220f895..00000000
--- a/distro/tests/ansible-roles/obs_repos/tasks/Debian.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-# SPDX-License-Identifier: GPL-3.0-or-later
-- name: Add upstream package signing key
- get_url:
- url: https://gitlab.nic.cz/knot/knot-resolver-release/raw/master/cznic-obs.gpg.asc
- dest: /etc/apt/trusted.gpg.d/cznic-obs.gpg.asc
- mode: 0644
-
-- name: Add OBS repo(s)
- apt_repository:
- repo: >
- deb http://download.opensuse.org/repositories/home:/CZ-NIC:/{{ item }}/{{ obs_repo_version }}/ /
- state: present
- update_cache: true
- with_items: "{{ repos }}"
diff --git a/distro/tests/ansible-roles/obs_repos/tasks/Fedora.yaml b/distro/tests/ansible-roles/obs_repos/tasks/Fedora.yaml
deleted file mode 100644
index 520e057c..00000000
--- a/distro/tests/ansible-roles/obs_repos/tasks/Fedora.yaml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-# SPDX-License-Identifier: GPL-3.0-or-later
-- name: Download repo file(s)
- get_url:
- url: "{{ obs_repofile_url }}"
- dest: "/etc/yum.repos.d/home:CZ-NIC:{{ item }}.repo"
- mode: 0644
- with_items: "{{ repos }}"
diff --git a/distro/tests/ansible-roles/obs_repos/tasks/Rocky.yaml b/distro/tests/ansible-roles/obs_repos/tasks/Rocky.yaml
deleted file mode 100644
index fecfbeaa..00000000
--- a/distro/tests/ansible-roles/obs_repos/tasks/Rocky.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-# SPDX-License-Identifier: GPL-3.0-or-later
-- name: Install EPEL
- yum:
- name: epel-release
- state: present
-
-- name: Download repo file(s)
- get_url:
- url: "{{ obs_repofile_url }}"
- dest: /etc/yum.repos.d/home:CZ-NIC:{{ item }}.repo
- mode: 0644
- with_items: "{{ repos }}"
diff --git a/distro/tests/ansible-roles/obs_repos/tasks/Ubuntu.yaml b/distro/tests/ansible-roles/obs_repos/tasks/Ubuntu.yaml
deleted file mode 100644
index ba424c47..00000000
--- a/distro/tests/ansible-roles/obs_repos/tasks/Ubuntu.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-# SPDX-License-Identifier: GPL-3.0-or-later
-- name: Add upstream package signing key
- apt_key:
- url: https://gitlab.nic.cz/knot/knot-resolver-release/raw/master/cznic-obs.gpg.asc
- state: present
-
-- name: Add OBS repo(s)
- apt_repository:
- repo: >
- deb http://download.opensuse.org/repositories/home:/CZ-NIC:/{{ item }}/{{ obs_repo_version }}/ /
- state: present
- update_cache: true
- with_items: "{{ repos }}"
diff --git a/distro/tests/ansible-roles/obs_repos/tasks/main.yaml b/distro/tests/ansible-roles/obs_repos/tasks/main.yaml
deleted file mode 100644
index 6bae0018..00000000
--- a/distro/tests/ansible-roles/obs_repos/tasks/main.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-# SPDX-License-Identifier: GPL-3.0-or-later
-- name: Include Debian specific vars
- include_vars: "{{ obs_distro }}_{{ ansible_distribution_major_version }}.yaml"
- when: obs_distro == "Debian"
-
-- name: Include distribution specific vars
- include_vars: "{{ obs_distro }}.yaml"
- when: obs_distro != "Debian"
-
-- name: Configure upstream repositories
- include: "{{ obs_distro }}.yaml"
diff --git a/distro/tests/ansible-roles/obs_repos/tasks/openSUSE_Leap.yaml b/distro/tests/ansible-roles/obs_repos/tasks/openSUSE_Leap.yaml
deleted file mode 100644
index 84ab5a97..00000000
--- a/distro/tests/ansible-roles/obs_repos/tasks/openSUSE_Leap.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
----
-# SPDX-License-Identifier: GPL-3.0-or-later
-- name: Install python-xml dependency for zypper_repository
- shell: zypper install -y python-xml
- args:
- warn: false
-
-- name: Add upstream repo(s)
- zypper_repository:
- repo: "{{ obs_repofile_url }}"
- state: present
- disable_gpg_check: true # auto_import_keys is broken
- with_items: "{{ repos }}"
-
-- name: Refresh all repositories
- zypper_repository:
- repo: '*'
- runrefresh: true
- failed_when: false
diff --git a/distro/tests/ansible-roles/obs_repos/tasks/openSUSE_Tumbleweed.yaml b/distro/tests/ansible-roles/obs_repos/tasks/openSUSE_Tumbleweed.yaml
deleted file mode 100644
index c0630143..00000000
--- a/distro/tests/ansible-roles/obs_repos/tasks/openSUSE_Tumbleweed.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-# SPDX-License-Identifier: GPL-3.0-or-later
-- name: Add upstream repo(s)
- zypper_repository:
- repo: "{{ obs_repofile_url }}"
- state: present
- disable_gpg_check: true # auto_import_keys is broken
- with_items: "{{ repos }}"
-
-- name: Refresh all repositories
- zypper_repository:
- repo: '*'
- runrefresh: true
diff --git a/distro/tests/ansible-roles/obs_repos/vars/CentOS.yaml b/distro/tests/ansible-roles/obs_repos/vars/CentOS.yaml
deleted file mode 100644
index 22b4795b..00000000
--- a/distro/tests/ansible-roles/obs_repos/vars/CentOS.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-# SPDX-License-Identifier: GPL-3.0-or-later
-obs_repo_version: "{{ obs_distro }}_{{ ansible_distribution_major_version }}_EPEL"
diff --git a/distro/tests/ansible-roles/obs_repos/vars/Debian_10.yaml b/distro/tests/ansible-roles/obs_repos/vars/Debian_10.yaml
deleted file mode 100644
index 5db857e6..00000000
--- a/distro/tests/ansible-roles/obs_repos/vars/Debian_10.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-# SPDX-License-Identifier: GPL-3.0-or-later
-obs_repo_version: "{{ obs_distro }}_{{ ansible_distribution_major_version }}"
diff --git a/distro/tests/ansible-roles/obs_repos/vars/Debian_11.yaml b/distro/tests/ansible-roles/obs_repos/vars/Debian_11.yaml
deleted file mode 120000
index 4babdf4e..00000000
--- a/distro/tests/ansible-roles/obs_repos/vars/Debian_11.yaml
+++ /dev/null
@@ -1 +0,0 @@
-Debian_10.yaml \ No newline at end of file
diff --git a/distro/tests/ansible-roles/obs_repos/vars/Debian_9.yaml b/distro/tests/ansible-roles/obs_repos/vars/Debian_9.yaml
deleted file mode 100644
index 21cce250..00000000
--- a/distro/tests/ansible-roles/obs_repos/vars/Debian_9.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-# SPDX-License-Identifier: GPL-3.0-or-later
-obs_repo_version: "{{ obs_distro }}_{{ ansible_distribution_major_version }}.0"
diff --git a/distro/tests/ansible-roles/obs_repos/vars/Fedora.yaml b/distro/tests/ansible-roles/obs_repos/vars/Fedora.yaml
deleted file mode 100644
index 5db857e6..00000000
--- a/distro/tests/ansible-roles/obs_repos/vars/Fedora.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-# SPDX-License-Identifier: GPL-3.0-or-later
-obs_repo_version: "{{ obs_distro }}_{{ ansible_distribution_major_version }}"
diff --git a/distro/tests/ansible-roles/obs_repos/vars/Rocky.yaml b/distro/tests/ansible-roles/obs_repos/vars/Rocky.yaml
deleted file mode 100644
index b8b52744..00000000
--- a/distro/tests/ansible-roles/obs_repos/vars/Rocky.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-# SPDX-License-Identifier: GPL-3.0-or-later
-obs_repo_version: "CentOS_{{ ansible_distribution_major_version }}_EPEL"
diff --git a/distro/tests/ansible-roles/obs_repos/vars/Ubuntu.yaml b/distro/tests/ansible-roles/obs_repos/vars/Ubuntu.yaml
deleted file mode 100644
index 4e5cd2c9..00000000
--- a/distro/tests/ansible-roles/obs_repos/vars/Ubuntu.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-# SPDX-License-Identifier: GPL-3.0-or-later
-obs_repo_version: "x{{ obs_distro }}_{{ ansible_distribution_version }}"
diff --git a/distro/tests/ansible-roles/obs_repos/vars/openSUSE_Leap.yaml b/distro/tests/ansible-roles/obs_repos/vars/openSUSE_Leap.yaml
deleted file mode 100644
index 7dbd7d8a..00000000
--- a/distro/tests/ansible-roles/obs_repos/vars/openSUSE_Leap.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-# SPDX-License-Identifier: GPL-3.0-or-later
-obs_repo_version: "{{ obs_distro }}_{{ ansible_distribution_version }}"
diff --git a/distro/tests/ansible-roles/obs_repos/vars/openSUSE_Tumbleweed.yaml b/distro/tests/ansible-roles/obs_repos/vars/openSUSE_Tumbleweed.yaml
deleted file mode 100644
index d875db72..00000000
--- a/distro/tests/ansible-roles/obs_repos/vars/openSUSE_Tumbleweed.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-# SPDX-License-Identifier: GPL-3.0-or-later
-obs_repo_version: "{{ obs_distro }}"
diff --git a/distro/tests/centos7/Vagrantfile b/distro/tests/centos7/Vagrantfile
deleted file mode 100644
index 2358be32..00000000
--- a/distro/tests/centos7/Vagrantfile
+++ /dev/null
@@ -1,30 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-# -*- mode: ruby -*-
-# vi: set ft=ruby :
-#
-
-Vagrant.configure(2) do |config|
-
- config.vm.box = "centos/7"
- config.vm.synced_folder ".", "/vagrant", disabled: true
-
- config.vm.define "centos7_knot-resolver" do |machine|
- machine.vm.provision "ansible" do |ansible|
- ansible.playbook = "../knot-resolver-pkgtest.yaml"
- ansible.extra_vars = {
- ansible_python_interpreter: "/usr/bin/python2"
- }
- end
- end
-
- config.vm.provider :libvirt do |libvirt|
- libvirt.cpus = 1
- libvirt.memory = 1024
- end
-
- config.vm.provider :virtualbox do |vbox|
- vbox.cpus = 1
- vbox.memory = 1024
- end
-
-end
diff --git a/distro/tests/centos7/ansible.cfg b/distro/tests/centos7/ansible.cfg
deleted file mode 120000
index f80698e8..00000000
--- a/distro/tests/centos7/ansible.cfg
+++ /dev/null
@@ -1 +0,0 @@
-../.ansible.cfg \ No newline at end of file
diff --git a/distro/tests/debian10/Vagrantfile b/distro/tests/debian10/Vagrantfile
deleted file mode 100644
index 7f51f1a7..00000000
--- a/distro/tests/debian10/Vagrantfile
+++ /dev/null
@@ -1,28 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-# -*- mode: ruby -*-
-# vi: set ft=ruby :
-#
-
-Vagrant.configure(2) do |config|
-
- # debian/buster64 requires manual intervention for apt update as of 2019-07-18
- config.vm.box = "generic/debian10"
- config.vm.synced_folder ".", "/vagrant", disabled: true
-
- config.vm.define "debian10_knot-resolver" do |machine|
- machine.vm.provision "ansible" do |ansible|
- ansible.playbook = "../knot-resolver-pkgtest.yaml"
- end
- end
-
- config.vm.provider :libvirt do |libvirt|
- libvirt.cpus = 1
- libvirt.memory = 1024
- end
-
- config.vm.provider :virtualbox do |vbox|
- vbox.cpus = 1
- vbox.memory = 1024
- end
-
-end
diff --git a/distro/tests/debian10/ansible.cfg b/distro/tests/debian10/ansible.cfg
deleted file mode 120000
index f80698e8..00000000
--- a/distro/tests/debian10/ansible.cfg
+++ /dev/null
@@ -1 +0,0 @@
-../.ansible.cfg \ No newline at end of file
diff --git a/distro/tests/debian11/Vagrantfile b/distro/tests/debian11/Vagrantfile
deleted file mode 100644
index 1ca31a63..00000000
--- a/distro/tests/debian11/Vagrantfile
+++ /dev/null
@@ -1,27 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-# -*- mode: ruby -*-
-# vi: set ft=ruby :
-#
-
-Vagrant.configure(2) do |config|
-
- config.vm.box = "generic/debian11"
- config.vm.synced_folder ".", "/vagrant", disabled: true
-
- config.vm.define "debian11_knot-resolver" do |machine|
- machine.vm.provision "ansible" do |ansible|
- ansible.playbook = "../knot-resolver-pkgtest.yaml"
- end
- end
-
- config.vm.provider :libvirt do |libvirt|
- libvirt.cpus = 1
- libvirt.memory = 1024
- end
-
- config.vm.provider :virtualbox do |vbox|
- vbox.cpus = 1
- vbox.memory = 1024
- end
-
-end
diff --git a/distro/tests/debian11/ansible.cfg b/distro/tests/debian11/ansible.cfg
deleted file mode 120000
index f80698e8..00000000
--- a/distro/tests/debian11/ansible.cfg
+++ /dev/null
@@ -1 +0,0 @@
-../.ansible.cfg \ No newline at end of file
diff --git a/distro/tests/debian9/Vagrantfile b/distro/tests/debian9/Vagrantfile
deleted file mode 100644
index c4b6a243..00000000
--- a/distro/tests/debian9/Vagrantfile
+++ /dev/null
@@ -1,27 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-# -*- mode: ruby -*-
-# vi: set ft=ruby :
-#
-
-Vagrant.configure(2) do |config|
-
- config.vm.box = "debian/stretch64"
- config.vm.synced_folder ".", "/vagrant", disabled: true
-
- config.vm.define "debian9_knot-resolver" do |machine|
- machine.vm.provision "ansible" do |ansible|
- ansible.playbook = "../knot-resolver-pkgtest.yaml"
- end
- end
-
- config.vm.provider :libvirt do |libvirt|
- libvirt.cpus = 1
- libvirt.memory = 1024
- end
-
- config.vm.provider :virtualbox do |vbox|
- vbox.cpus = 1
- vbox.memory = 1024
- end
-
-end
diff --git a/distro/tests/debian9/ansible.cfg b/distro/tests/debian9/ansible.cfg
deleted file mode 120000
index f80698e8..00000000
--- a/distro/tests/debian9/ansible.cfg
+++ /dev/null
@@ -1 +0,0 @@
-../.ansible.cfg \ No newline at end of file
diff --git a/distro/tests/extra/all/control b/distro/tests/extra/all/control
new file mode 100644
index 00000000..b13cc27d
--- /dev/null
+++ b/distro/tests/extra/all/control
@@ -0,0 +1,2 @@
+{# This adds all tests for manager's packaging #}
+{% include 'manager/tests/packaging/control' %}
diff --git a/distro/tests/fedora35/Vagrantfile b/distro/tests/fedora35/Vagrantfile
deleted file mode 100644
index 1fe18ecc..00000000
--- a/distro/tests/fedora35/Vagrantfile
+++ /dev/null
@@ -1,30 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-# -*- mode: ruby -*-
-# vi: set ft=ruby :
-#
-
-Vagrant.configure(2) do |config|
-
- config.vm.box = "fedora/35-cloud-base"
- config.vm.synced_folder ".", "/vagrant", disabled: true
-
- config.vm.define "fedora35_knot-resolver" do |machine|
- machine.vm.provision "ansible" do |ansible|
- ansible.playbook = "../knot-resolver-pkgtest.yaml"
- ansible.extra_vars = {
- ansible_python_interpreter: "/usr/bin/python3",
- }
- end
- end
-
- config.vm.provider :libvirt do |libvirt|
- libvirt.cpus = 1
- libvirt.memory = 1024
- end
-
- config.vm.provider :virtualbox do |vbox|
- vbox.cpus = 1
- vbox.memory = 1024
- end
-
-end
diff --git a/distro/tests/fedora35/ansible.cfg b/distro/tests/fedora35/ansible.cfg
deleted file mode 120000
index f80698e8..00000000
--- a/distro/tests/fedora35/ansible.cfg
+++ /dev/null
@@ -1 +0,0 @@
-../.ansible.cfg \ No newline at end of file
diff --git a/distro/tests/fedora36/Vagrantfile b/distro/tests/fedora36/Vagrantfile
deleted file mode 100644
index 56659e59..00000000
--- a/distro/tests/fedora36/Vagrantfile
+++ /dev/null
@@ -1,30 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-# -*- mode: ruby -*-
-# vi: set ft=ruby :
-#
-
-Vagrant.configure(2) do |config|
-
- config.vm.box = "fedora/36-cloud-base"
- config.vm.synced_folder ".", "/vagrant", disabled: true
-
- config.vm.define "fedora36_knot-resolver" do |machine|
- machine.vm.provision "ansible" do |ansible|
- ansible.playbook = "../knot-resolver-pkgtest.yaml"
- ansible.extra_vars = {
- ansible_python_interpreter: "/usr/bin/python3",
- }
- end
- end
-
- config.vm.provider :libvirt do |libvirt|
- libvirt.cpus = 1
- libvirt.memory = 1024
- end
-
- config.vm.provider :virtualbox do |vbox|
- vbox.cpus = 1
- vbox.memory = 1024
- end
-
-end
diff --git a/distro/tests/fedora36/ansible.cfg b/distro/tests/fedora36/ansible.cfg
deleted file mode 120000
index f80698e8..00000000
--- a/distro/tests/fedora36/ansible.cfg
+++ /dev/null
@@ -1 +0,0 @@
-../.ansible.cfg \ No newline at end of file
diff --git a/distro/tests/knot-resolver-pkgtest.yaml b/distro/tests/knot-resolver-pkgtest.yaml
deleted file mode 100644
index 83545bbc..00000000
--- a/distro/tests/knot-resolver-pkgtest.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-# SPDX-License-Identifier: GPL-3.0-or-later
-- hosts: all
-
- remote_user: root
- become: true
-
- vars_files:
- - repos.yaml
-
- roles:
- - obs_repos
- - knot_resolver
diff --git a/distro/tests/leap15/Vagrantfile b/distro/tests/leap15/Vagrantfile
deleted file mode 100644
index a2f76468..00000000
--- a/distro/tests/leap15/Vagrantfile
+++ /dev/null
@@ -1,29 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-# -*- mode: ruby -*-
-# vi: set ft=ruby :
-#
-
-Vagrant.configure(2) do |config|
-
- config.vm.box = "generic/opensuse15"
-
- config.vm.synced_folder ".", "/vagrant", disabled: true
-
- config.vm.define "leap15_knot-resolver" do |machine|
- machine.vm.provision "ansible" do |ansible|
- ansible.playbook = "../knot-resolver-pkgtest.yaml"
- end
- end
-
- config.vm.provider :libvirt do |libvirt|
- libvirt.cpus = 1
- libvirt.memory = 1024
- libvirt.disk_bus = "sata"
- end
-
- config.vm.provider :virtualbox do |vbox|
- vbox.cpus = 1
- vbox.memory = 1024
- end
-
-end
diff --git a/distro/tests/leap15/ansible.cfg b/distro/tests/leap15/ansible.cfg
deleted file mode 120000
index f80698e8..00000000
--- a/distro/tests/leap15/ansible.cfg
+++ /dev/null
@@ -1 +0,0 @@
-../.ansible.cfg \ No newline at end of file
diff --git a/distro/tests/repos.yaml b/distro/tests/repos.yaml
deleted file mode 100644
index bd4bedd5..00000000
--- a/distro/tests/repos.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-repos:
- - knot-resolver-latest
- - knot-resolver-devel
diff --git a/distro/tests/rocky8/Vagrantfile b/distro/tests/rocky8/Vagrantfile
deleted file mode 100644
index f82c194f..00000000
--- a/distro/tests/rocky8/Vagrantfile
+++ /dev/null
@@ -1,30 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-# -*- mode: ruby -*-
-# vi: set ft=ruby :
-#
-
-Vagrant.configure(2) do |config|
-
- config.vm.box = "generic/rocky8"
- config.vm.synced_folder ".", "/vagrant", disabled: true
-
- config.vm.define "rocky8_knot-resolver" do |machine|
- machine.vm.provision "ansible" do |ansible|
- ansible.playbook = "../knot-resolver-pkgtest.yaml"
- ansible.extra_vars = {
- ansible_python_interpreter: "/usr/libexec/platform-python"
- }
- end
- end
-
- config.vm.provider :libvirt do |libvirt|
- libvirt.cpus = 1
- libvirt.memory = 1024
- end
-
- config.vm.provider :virtualbox do |vbox|
- vbox.cpus = 1
- vbox.memory = 1024
- end
-
-end
diff --git a/distro/tests/rocky8/ansible.cfg b/distro/tests/rocky8/ansible.cfg
deleted file mode 120000
index f80698e8..00000000
--- a/distro/tests/rocky8/ansible.cfg
+++ /dev/null
@@ -1 +0,0 @@
-../.ansible.cfg \ No newline at end of file
diff --git a/distro/tests/test-distro.sh b/distro/tests/test-distro.sh
deleted file mode 100755
index 55b75d06..00000000
--- a/distro/tests/test-distro.sh
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/bin/bash -x
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# ./test-distro.sh {obs_repo} {distro}
-# Example usage: ./test-distro.sh knot-resolver-devel debian9
-
-pkgtestdir="$(dirname ${0})"
-repofile="$pkgtestdir/repos.yaml"
-
-distro=$2
-repo=$1
-
-# Select repos
-echo -e "repos:\n - $repo" > $repofile
-if [ "$repo" == "knot-resolver-devel" ]; then
- # get Knot DNS from knot-resolver-latest
- echo -e ' - knot-resolver-latest' >> $repofile
-fi
-
-pushd "$pkgtestdir/$distro"
-vagrant destroy -f &>/dev/null
-vagrant up
-ret=$?
-vagrant destroy -f &>/dev/null
-popd
-exit $ret
diff --git a/distro/tests/ubuntu1804/Vagrantfile b/distro/tests/ubuntu1804/Vagrantfile
deleted file mode 100644
index 5c538950..00000000
--- a/distro/tests/ubuntu1804/Vagrantfile
+++ /dev/null
@@ -1,30 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-# -*- mode: ruby -*-
-# vi: set ft=ruby :
-#
-
-Vagrant.configure(2) do |config|
-
- config.vm.box = "generic/ubuntu1804"
- config.vm.synced_folder ".", "/vagrant", disabled: true
-
- config.vm.define "ubuntu1804_knot-resolver" do |machine|
- machine.vm.provision "ansible" do |ansible|
- ansible.playbook = "../knot-resolver-pkgtest.yaml"
- ansible.extra_vars = {
- ansible_python_interpreter: "/usr/bin/python3"
- }
- end
- end
-
- config.vm.provider :libvirt do |libvirt|
- libvirt.cpus = 1
- libvirt.memory = 1024
- end
-
- config.vm.provider :virtualbox do |vbox|
- vbox.cpus = 1
- vbox.memory = 1024
- end
-
-end
diff --git a/distro/tests/ubuntu1804/ansible.cfg b/distro/tests/ubuntu1804/ansible.cfg
deleted file mode 120000
index f80698e8..00000000
--- a/distro/tests/ubuntu1804/ansible.cfg
+++ /dev/null
@@ -1 +0,0 @@
-../.ansible.cfg \ No newline at end of file
diff --git a/distro/tests/ubuntu2004/Vagrantfile b/distro/tests/ubuntu2004/Vagrantfile
deleted file mode 100644
index 3d5c40a5..00000000
--- a/distro/tests/ubuntu2004/Vagrantfile
+++ /dev/null
@@ -1,30 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-# -*- mode: ruby -*-
-# vi: set ft=ruby :
-#
-
-Vagrant.configure(2) do |config|
-
- config.vm.box = "generic/ubuntu2004"
- config.vm.synced_folder ".", "/vagrant", disabled: true
-
- config.vm.define "ubuntu2004_knot-resolver" do |machine|
- machine.vm.provision "ansible" do |ansible|
- ansible.playbook = "../knot-resolver-pkgtest.yaml"
- ansible.extra_vars = {
- ansible_python_interpreter: "/usr/bin/python3"
- }
- end
- end
-
- config.vm.provider :libvirt do |libvirt|
- libvirt.cpus = 1
- libvirt.memory = 1024
- end
-
- config.vm.provider :virtualbox do |vbox|
- vbox.cpus = 1
- vbox.memory = 1024
- end
-
-end
diff --git a/distro/tests/ubuntu2004/ansible.cfg b/distro/tests/ubuntu2004/ansible.cfg
deleted file mode 120000
index f80698e8..00000000
--- a/distro/tests/ubuntu2004/ansible.cfg
+++ /dev/null
@@ -1 +0,0 @@
-../.ansible.cfg \ No newline at end of file
diff --git a/distro/tests/ubuntu2204/Vagrantfile b/distro/tests/ubuntu2204/Vagrantfile
deleted file mode 100644
index e2b97507..00000000
--- a/distro/tests/ubuntu2204/Vagrantfile
+++ /dev/null
@@ -1,30 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-# -*- mode: ruby -*-
-# vi: set ft=ruby :
-#
-
-Vagrant.configure(2) do |config|
-
- config.vm.box = "generic/ubuntu2204"
- config.vm.synced_folder ".", "/vagrant", disabled: true
-
- config.vm.define "ubuntu2204_knot-resolver" do |machine|
- machine.vm.provision "ansible" do |ansible|
- ansible.playbook = "../knot-resolver-pkgtest.yaml"
- ansible.extra_vars = {
- ansible_python_interpreter: "/usr/bin/python3"
- }
- end
- end
-
- config.vm.provider :libvirt do |libvirt|
- libvirt.cpus = 1
- libvirt.memory = 1024
- end
-
- config.vm.provider :virtualbox do |vbox|
- vbox.cpus = 1
- vbox.memory = 1024
- end
-
-end
diff --git a/distro/tests/ubuntu2204/ansible.cfg b/distro/tests/ubuntu2204/ansible.cfg
deleted file mode 120000
index f80698e8..00000000
--- a/distro/tests/ubuntu2204/ansible.cfg
+++ /dev/null
@@ -1 +0,0 @@
-../.ansible.cfg \ No newline at end of file
diff --git a/doc/.packaging/centos/8/NOTSUPPORTED b/doc/.packaging/centos/8/NOTSUPPORTED
deleted file mode 100644
index e69de29b..00000000
--- a/doc/.packaging/centos/8/NOTSUPPORTED
+++ /dev/null
diff --git a/doc/.packaging/debian/10/build.sh b/doc/.packaging/debian/10/build.sh
deleted file mode 100755
index e6084df4..00000000
--- a/doc/.packaging/debian/10/build.sh
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-3.0-or-later
-[ -d /root/kresd/build_packaging ] && rm -rf /root/kresd/build_packaging/;
-CFLAGS="$CFLAGS -Wall -pedantic -fno-omit-frame-pointer"
-LDFLAGS="$LDFLAGS -Wl,--as-needed"
-meson build_packaging \
- --buildtype=plain \
- --prefix=/root/kresd/install_packaging \
- --libdir=lib \
- --default-library=static \
- -Ddoc=enabled \
- -Dsystemd_files=enabled \
- -Dclient=enabled \
- -Dkeyfile_default=/usr/share/dns/root.key \
- -Droot_hints=/usr/share/dns/root.hints \
- -Dinstall_kresd_conf=enabled \
- -Dunit_tests=enabled \
- -Dc_args="${CFLAGS}" \
- -Dc_link_args="${LDFLAGS}";
diff --git a/doc/.packaging/debian/10/builddeps b/doc/.packaging/debian/10/builddeps
deleted file mode 100644
index 81b7a5b9..00000000
--- a/doc/.packaging/debian/10/builddeps
+++ /dev/null
@@ -1,4 +0,0 @@
-doxygen
-python3-sphinx
-python3-breathe
-python3-sphinx-rtd-theme
diff --git a/doc/.packaging/debian/10/install.sh b/doc/.packaging/debian/10/install.sh
deleted file mode 100755
index 3422d684..00000000
--- a/doc/.packaging/debian/10/install.sh
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-3.0-or-later
-ninja -C build_packaging doc
diff --git a/doc/.packaging/debian/9/build.sh b/doc/.packaging/debian/9/build.sh
deleted file mode 100755
index e6084df4..00000000
--- a/doc/.packaging/debian/9/build.sh
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-3.0-or-later
-[ -d /root/kresd/build_packaging ] && rm -rf /root/kresd/build_packaging/;
-CFLAGS="$CFLAGS -Wall -pedantic -fno-omit-frame-pointer"
-LDFLAGS="$LDFLAGS -Wl,--as-needed"
-meson build_packaging \
- --buildtype=plain \
- --prefix=/root/kresd/install_packaging \
- --libdir=lib \
- --default-library=static \
- -Ddoc=enabled \
- -Dsystemd_files=enabled \
- -Dclient=enabled \
- -Dkeyfile_default=/usr/share/dns/root.key \
- -Droot_hints=/usr/share/dns/root.hints \
- -Dinstall_kresd_conf=enabled \
- -Dunit_tests=enabled \
- -Dc_args="${CFLAGS}" \
- -Dc_link_args="${LDFLAGS}";
diff --git a/doc/.packaging/debian/9/builddeps b/doc/.packaging/debian/9/builddeps
deleted file mode 100644
index 81b7a5b9..00000000
--- a/doc/.packaging/debian/9/builddeps
+++ /dev/null
@@ -1,4 +0,0 @@
-doxygen
-python3-sphinx
-python3-breathe
-python3-sphinx-rtd-theme
diff --git a/doc/.packaging/debian/9/install.sh b/doc/.packaging/debian/9/install.sh
deleted file mode 100755
index 3422d684..00000000
--- a/doc/.packaging/debian/9/install.sh
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-3.0-or-later
-ninja -C build_packaging doc
diff --git a/doc/.packaging/fedora/31/build.sh b/doc/.packaging/fedora/31/build.sh
deleted file mode 100755
index 68ea49e6..00000000
--- a/doc/.packaging/fedora/31/build.sh
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-3.0-or-later
-[ -d /root/kresd/build_packaging ] && rm -rf /root/kresd/build_packaging/;
-CFLAGS="$CFLAGS -Wall -pedantic -fno-omit-frame-pointer"
-LDFLAGS="$LDFLAGS -Wl,--as-needed"
-meson build_packaging \
- --buildtype=plain \
- --prefix=/root/kresd/install_packaging \
- --sbindir=sbin \
- --libdir=lib \
- --includedir=include \
- --sysconfdir=etc \
- -Ddoc=enabled \
- -Dsystemd_files=enabled \
- -Dclient=enabled \
- -Dunit_tests=enabled \
- -Dmanaged_ta=enabled \
- -Dkeyfile_default=/var/lib/knot-resolver/root.keys \
- -Dinstall_root_keys=enabled \
- -Dinstall_kresd_conf=enabled;
diff --git a/doc/.packaging/fedora/31/builddeps b/doc/.packaging/fedora/31/builddeps
deleted file mode 100644
index 0a4b8869..00000000
--- a/doc/.packaging/fedora/31/builddeps
+++ /dev/null
@@ -1,4 +0,0 @@
-doxygen
-python3-sphinx
-python3-breathe
-python3-sphinx_rtd_theme
diff --git a/doc/.packaging/fedora/31/install.sh b/doc/.packaging/fedora/31/install.sh
deleted file mode 100755
index 3422d684..00000000
--- a/doc/.packaging/fedora/31/install.sh
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-3.0-or-later
-ninja -C build_packaging doc
diff --git a/doc/.packaging/fedora/32/30/build.sh b/doc/.packaging/fedora/32/30/build.sh
deleted file mode 100755
index 68ea49e6..00000000
--- a/doc/.packaging/fedora/32/30/build.sh
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-3.0-or-later
-[ -d /root/kresd/build_packaging ] && rm -rf /root/kresd/build_packaging/;
-CFLAGS="$CFLAGS -Wall -pedantic -fno-omit-frame-pointer"
-LDFLAGS="$LDFLAGS -Wl,--as-needed"
-meson build_packaging \
- --buildtype=plain \
- --prefix=/root/kresd/install_packaging \
- --sbindir=sbin \
- --libdir=lib \
- --includedir=include \
- --sysconfdir=etc \
- -Ddoc=enabled \
- -Dsystemd_files=enabled \
- -Dclient=enabled \
- -Dunit_tests=enabled \
- -Dmanaged_ta=enabled \
- -Dkeyfile_default=/var/lib/knot-resolver/root.keys \
- -Dinstall_root_keys=enabled \
- -Dinstall_kresd_conf=enabled;
diff --git a/doc/.packaging/fedora/32/30/builddeps b/doc/.packaging/fedora/32/30/builddeps
deleted file mode 100644
index 0a4b8869..00000000
--- a/doc/.packaging/fedora/32/30/builddeps
+++ /dev/null
@@ -1,4 +0,0 @@
-doxygen
-python3-sphinx
-python3-breathe
-python3-sphinx_rtd_theme
diff --git a/doc/.packaging/fedora/32/30/install.sh b/doc/.packaging/fedora/32/30/install.sh
deleted file mode 100755
index 3422d684..00000000
--- a/doc/.packaging/fedora/32/30/install.sh
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-3.0-or-later
-ninja -C build_packaging doc
diff --git a/doc/.packaging/fedora/32/build.sh b/doc/.packaging/fedora/32/build.sh
deleted file mode 100755
index 68ea49e6..00000000
--- a/doc/.packaging/fedora/32/build.sh
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-3.0-or-later
-[ -d /root/kresd/build_packaging ] && rm -rf /root/kresd/build_packaging/;
-CFLAGS="$CFLAGS -Wall -pedantic -fno-omit-frame-pointer"
-LDFLAGS="$LDFLAGS -Wl,--as-needed"
-meson build_packaging \
- --buildtype=plain \
- --prefix=/root/kresd/install_packaging \
- --sbindir=sbin \
- --libdir=lib \
- --includedir=include \
- --sysconfdir=etc \
- -Ddoc=enabled \
- -Dsystemd_files=enabled \
- -Dclient=enabled \
- -Dunit_tests=enabled \
- -Dmanaged_ta=enabled \
- -Dkeyfile_default=/var/lib/knot-resolver/root.keys \
- -Dinstall_root_keys=enabled \
- -Dinstall_kresd_conf=enabled;
diff --git a/doc/.packaging/fedora/32/builddeps b/doc/.packaging/fedora/32/builddeps
deleted file mode 100644
index 0a4b8869..00000000
--- a/doc/.packaging/fedora/32/builddeps
+++ /dev/null
@@ -1,4 +0,0 @@
-doxygen
-python3-sphinx
-python3-breathe
-python3-sphinx_rtd_theme
diff --git a/doc/.packaging/fedora/32/install.sh b/doc/.packaging/fedora/32/install.sh
deleted file mode 100755
index 3422d684..00000000
--- a/doc/.packaging/fedora/32/install.sh
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-3.0-or-later
-ninja -C build_packaging doc
diff --git a/doc/.packaging/leap/15.2/build.sh b/doc/.packaging/leap/15.2/build.sh
deleted file mode 100755
index 68ea49e6..00000000
--- a/doc/.packaging/leap/15.2/build.sh
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-3.0-or-later
-[ -d /root/kresd/build_packaging ] && rm -rf /root/kresd/build_packaging/;
-CFLAGS="$CFLAGS -Wall -pedantic -fno-omit-frame-pointer"
-LDFLAGS="$LDFLAGS -Wl,--as-needed"
-meson build_packaging \
- --buildtype=plain \
- --prefix=/root/kresd/install_packaging \
- --sbindir=sbin \
- --libdir=lib \
- --includedir=include \
- --sysconfdir=etc \
- -Ddoc=enabled \
- -Dsystemd_files=enabled \
- -Dclient=enabled \
- -Dunit_tests=enabled \
- -Dmanaged_ta=enabled \
- -Dkeyfile_default=/var/lib/knot-resolver/root.keys \
- -Dinstall_root_keys=enabled \
- -Dinstall_kresd_conf=enabled;
diff --git a/doc/.packaging/leap/15.2/builddeps b/doc/.packaging/leap/15.2/builddeps
deleted file mode 100644
index 60daf9cb..00000000
--- a/doc/.packaging/leap/15.2/builddeps
+++ /dev/null
@@ -1,4 +0,0 @@
-doxygen
-python3-Sphinx
-python3-breathe
-python3-sphinx_rtd_theme
diff --git a/doc/.packaging/leap/15.2/install.sh b/doc/.packaging/leap/15.2/install.sh
deleted file mode 100755
index 3422d684..00000000
--- a/doc/.packaging/leap/15.2/install.sh
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-3.0-or-later
-ninja -C build_packaging doc
diff --git a/doc/.packaging/test.sh b/doc/.packaging/test.sh
deleted file mode 100755
index 33bf175d..00000000
--- a/doc/.packaging/test.sh
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-3.0-or-later
-test -e ../doc/html/index.html
diff --git a/doc/.packaging/ubuntu/16.04/build.sh b/doc/.packaging/ubuntu/16.04/build.sh
deleted file mode 100755
index e6084df4..00000000
--- a/doc/.packaging/ubuntu/16.04/build.sh
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-3.0-or-later
-[ -d /root/kresd/build_packaging ] && rm -rf /root/kresd/build_packaging/;
-CFLAGS="$CFLAGS -Wall -pedantic -fno-omit-frame-pointer"
-LDFLAGS="$LDFLAGS -Wl,--as-needed"
-meson build_packaging \
- --buildtype=plain \
- --prefix=/root/kresd/install_packaging \
- --libdir=lib \
- --default-library=static \
- -Ddoc=enabled \
- -Dsystemd_files=enabled \
- -Dclient=enabled \
- -Dkeyfile_default=/usr/share/dns/root.key \
- -Droot_hints=/usr/share/dns/root.hints \
- -Dinstall_kresd_conf=enabled \
- -Dunit_tests=enabled \
- -Dc_args="${CFLAGS}" \
- -Dc_link_args="${LDFLAGS}";
diff --git a/doc/.packaging/ubuntu/16.04/builddeps b/doc/.packaging/ubuntu/16.04/builddeps
deleted file mode 100644
index 81b7a5b9..00000000
--- a/doc/.packaging/ubuntu/16.04/builddeps
+++ /dev/null
@@ -1,4 +0,0 @@
-doxygen
-python3-sphinx
-python3-breathe
-python3-sphinx-rtd-theme
diff --git a/doc/.packaging/ubuntu/16.04/install.sh b/doc/.packaging/ubuntu/16.04/install.sh
deleted file mode 100755
index 3422d684..00000000
--- a/doc/.packaging/ubuntu/16.04/install.sh
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-3.0-or-later
-ninja -C build_packaging doc
diff --git a/doc/.packaging/ubuntu/18.04/build.sh b/doc/.packaging/ubuntu/18.04/build.sh
deleted file mode 100755
index e6084df4..00000000
--- a/doc/.packaging/ubuntu/18.04/build.sh
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-3.0-or-later
-[ -d /root/kresd/build_packaging ] && rm -rf /root/kresd/build_packaging/;
-CFLAGS="$CFLAGS -Wall -pedantic -fno-omit-frame-pointer"
-LDFLAGS="$LDFLAGS -Wl,--as-needed"
-meson build_packaging \
- --buildtype=plain \
- --prefix=/root/kresd/install_packaging \
- --libdir=lib \
- --default-library=static \
- -Ddoc=enabled \
- -Dsystemd_files=enabled \
- -Dclient=enabled \
- -Dkeyfile_default=/usr/share/dns/root.key \
- -Droot_hints=/usr/share/dns/root.hints \
- -Dinstall_kresd_conf=enabled \
- -Dunit_tests=enabled \
- -Dc_args="${CFLAGS}" \
- -Dc_link_args="${LDFLAGS}";
diff --git a/doc/.packaging/ubuntu/18.04/builddeps b/doc/.packaging/ubuntu/18.04/builddeps
deleted file mode 100644
index 81b7a5b9..00000000
--- a/doc/.packaging/ubuntu/18.04/builddeps
+++ /dev/null
@@ -1,4 +0,0 @@
-doxygen
-python3-sphinx
-python3-breathe
-python3-sphinx-rtd-theme
diff --git a/doc/.packaging/ubuntu/18.04/install.sh b/doc/.packaging/ubuntu/18.04/install.sh
deleted file mode 100755
index 3422d684..00000000
--- a/doc/.packaging/ubuntu/18.04/install.sh
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-3.0-or-later
-ninja -C build_packaging doc
diff --git a/doc/.packaging/ubuntu/20.04/build.sh b/doc/.packaging/ubuntu/20.04/build.sh
deleted file mode 100755
index e6084df4..00000000
--- a/doc/.packaging/ubuntu/20.04/build.sh
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-3.0-or-later
-[ -d /root/kresd/build_packaging ] && rm -rf /root/kresd/build_packaging/;
-CFLAGS="$CFLAGS -Wall -pedantic -fno-omit-frame-pointer"
-LDFLAGS="$LDFLAGS -Wl,--as-needed"
-meson build_packaging \
- --buildtype=plain \
- --prefix=/root/kresd/install_packaging \
- --libdir=lib \
- --default-library=static \
- -Ddoc=enabled \
- -Dsystemd_files=enabled \
- -Dclient=enabled \
- -Dkeyfile_default=/usr/share/dns/root.key \
- -Droot_hints=/usr/share/dns/root.hints \
- -Dinstall_kresd_conf=enabled \
- -Dunit_tests=enabled \
- -Dc_args="${CFLAGS}" \
- -Dc_link_args="${LDFLAGS}";
diff --git a/doc/.packaging/ubuntu/20.04/builddeps b/doc/.packaging/ubuntu/20.04/builddeps
deleted file mode 100644
index 81b7a5b9..00000000
--- a/doc/.packaging/ubuntu/20.04/builddeps
+++ /dev/null
@@ -1,4 +0,0 @@
-doxygen
-python3-sphinx
-python3-breathe
-python3-sphinx-rtd-theme
diff --git a/doc/.packaging/ubuntu/20.04/install.sh b/doc/.packaging/ubuntu/20.04/install.sh
deleted file mode 100755
index 3422d684..00000000
--- a/doc/.packaging/ubuntu/20.04/install.sh
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-3.0-or-later
-ninja -C build_packaging doc
diff --git a/doc/README.md b/doc/README.md
index 6860672a..eedc60f3 100644
--- a/doc/README.md
+++ b/doc/README.md
@@ -5,23 +5,35 @@ It does not however contain API documentation, which is built separately in this
### Requirements
+To generate documentation you need to install [meson][meson] and [ninja][ninja].
+
The code is documented with [Doxygen][doxygen] JavaDoc style, a prettified documentation
-also requires [breathe][breathe] and [Sphinx][sphinx] for building sane documentation pages.
-It is not however required.
+also requires [breathe][breathe], [Sphinx][sphinx], [Sphinx tabs][sphinx-tabs] and [Sphinx Read the Docs theme][sphinx_rtd_theme] for building sane documentation pages.
+[meson]: https://mesonbuild.com/
+[ninja]: https://ninja-build.org/
[doxygen]:https://www.stack.nl/~dimitri/doxygen/manual/index.html
[breathe]: https://github.com/michaeljones/breathe
[sphinx]: http://sphinx-doc.org/
+[sphinx-tabs]: https://sphinx-tabs.readthedocs.io/
+[sphinx_rtd_theme]: https://sphinx-rtd-theme.readthedocs.io/en/stable/
-You can get the extra dependencies with pip:
+You can install dependencies with pip:
```sh
-pip install -U Sphinx breathe
+pip install -U Sphinx sphinx-tabs sphinx_rtd_theme breathe
# Alternatively
-pip -r doc/requirements.txt
+pip install -r doc/requirements.txt
```
### Building documentation
-If you satisfy the requirements, it's as easy as `make doc`, which builds the documentation in this folder.
+If you satisfy the requirements, the documentation will be generated to `doc/html` directory.
+You must be in the root directory of the project.
+
+It may be needed to initialize git submodules `git submodule update --init --recursive`.
+```sh
+$ meson setup build_dir -Ddoc=enabled
+$ ninja -C build_dir doc
+```
diff --git a/doc/_static/.gitignore b/doc/_static/.gitignore
deleted file mode 100644
index e69de29b..00000000
--- a/doc/_static/.gitignore
+++ /dev/null
diff --git a/doc/_static/package-lock.json b/doc/_static/package-lock.json
new file mode 100644
index 00000000..eb5f9fb7
--- /dev/null
+++ b/doc/_static/package-lock.json
@@ -0,0 +1,2484 @@
+{
+ "name": "_static",
+ "lockfileVersion": 2,
+ "requires": true,
+ "packages": {
+ "": {
+ "dependencies": {
+ "@adobe/jsonschema2md": "^7.1.5"
+ }
+ },
+ "node_modules/@adobe/helix-log": {
+ "version": "6.0.0",
+ "resolved": "https://registry.npmjs.org/@adobe/helix-log/-/helix-log-6.0.0.tgz",
+ "integrity": "sha512-+9gpf49sFDmZLV3gtjY+RmEUistqYJdVWpiqlRYpxE59x5bHFzYf93dZ7fljSTBtZdVq8lm97HxrTUloh5HvRg==",
+ "dependencies": {
+ "big.js": "^6.1.1",
+ "colorette": "^2.0.2",
+ "ferrum": "^1.9.3",
+ "phin": "^3.6.0",
+ "polka": "^0.5.2"
+ }
+ },
+ "node_modules/@adobe/jsonschema2md": {
+ "version": "7.1.5",
+ "resolved": "https://registry.npmjs.org/@adobe/jsonschema2md/-/jsonschema2md-7.1.5.tgz",
+ "integrity": "sha512-uybF3Ryn0xz5lzGz6sb6Th5nkX9H60zOnKVYCUXunUtWENGb7Ut+8CYPzPA9sjY8+gLK8pQq3rbmsKprcjkN0A==",
+ "dependencies": {
+ "@adobe/helix-log": "6.0.0",
+ "@types/json-schema": "^7.0.8",
+ "@types/mdast": "^3.0.4",
+ "es2015-i18n-tag": "1.6.1",
+ "ferrum": "1.9.4",
+ "fs-extra": "11.1.0",
+ "github-slugger": "2.0.0",
+ "js-yaml": "4.1.0",
+ "json-schema": "^0.4.0",
+ "mdast-builder": "1.1.1",
+ "mdast-util-to-string": "3.1.0",
+ "readdirp": "3.6.0",
+ "remark-gfm": "^3.0.0",
+ "remark-parse": "10.0.1",
+ "remark-stringify": "10.0.2",
+ "unified": "10.1.2",
+ "unist-util-inspect": "7.0.1",
+ "yargs": "17.6.2"
+ },
+ "bin": {
+ "jsonschema2md": "cli.js"
+ },
+ "engines": {
+ "node": ">= 14.0.0"
+ }
+ },
+ "node_modules/@arr/every": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/@arr/every/-/every-1.0.1.tgz",
+ "integrity": "sha512-UQFQ6SgyJ6LX42W8rHCs8KVc0JS0tzVL9ct4XYedJukskYVWTo49tNiMEK9C2HTyarbNiT/RVIRSY82vH+6sTg==",
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/@polka/url": {
+ "version": "0.5.0",
+ "resolved": "https://registry.npmjs.org/@polka/url/-/url-0.5.0.tgz",
+ "integrity": "sha512-oZLYFEAzUKyi3SKnXvj32ZCEGH6RDnao7COuCVhDydMS9NrCSVXhM79VaKyP5+Zc33m0QXEd2DN3UkU7OsHcfw=="
+ },
+ "node_modules/@types/debug": {
+ "version": "4.1.7",
+ "resolved": "https://registry.npmjs.org/@types/debug/-/debug-4.1.7.tgz",
+ "integrity": "sha512-9AonUzyTjXXhEOa0DnqpzZi6VHlqKMswga9EXjpXnnqxwLtdvPPtlO8evrI5D9S6asFRCQ6v+wpiUKbw+vKqyg==",
+ "dependencies": {
+ "@types/ms": "*"
+ }
+ },
+ "node_modules/@types/json-schema": {
+ "version": "7.0.11",
+ "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.11.tgz",
+ "integrity": "sha512-wOuvG1SN4Us4rez+tylwwwCV1psiNVOkJeM3AUWUNWg/jDQY2+HE/444y5gc+jBmRqASOm2Oeh5c1axHobwRKQ=="
+ },
+ "node_modules/@types/mdast": {
+ "version": "3.0.10",
+ "resolved": "https://registry.npmjs.org/@types/mdast/-/mdast-3.0.10.tgz",
+ "integrity": "sha512-W864tg/Osz1+9f4lrGTZpCSO5/z4608eUp19tbozkq2HJK6i3z1kT0H9tlADXuYIb1YYOBByU4Jsqkk75q48qA==",
+ "dependencies": {
+ "@types/unist": "*"
+ }
+ },
+ "node_modules/@types/ms": {
+ "version": "0.7.31",
+ "resolved": "https://registry.npmjs.org/@types/ms/-/ms-0.7.31.tgz",
+ "integrity": "sha512-iiUgKzV9AuaEkZqkOLDIvlQiL6ltuZd9tGcW3gwpnX8JbuiuhFlEGmmFXEXkN50Cvq7Os88IY2v0dkDqXYWVgA=="
+ },
+ "node_modules/@types/unist": {
+ "version": "2.0.6",
+ "resolved": "https://registry.npmjs.org/@types/unist/-/unist-2.0.6.tgz",
+ "integrity": "sha512-PBjIUxZHOuj0R15/xuwJYjFi+KZdNFrehocChv4g5hu6aFroHue8m0lBP0POdK2nKzbw0cgV1mws8+V/JAcEkQ=="
+ },
+ "node_modules/ansi-regex": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz",
+ "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/ansi-styles": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz",
+ "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==",
+ "dependencies": {
+ "color-convert": "^2.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/ansi-styles?sponsor=1"
+ }
+ },
+ "node_modules/argparse": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz",
+ "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q=="
+ },
+ "node_modules/bail": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz",
+ "integrity": "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/big.js": {
+ "version": "6.2.1",
+ "resolved": "https://registry.npmjs.org/big.js/-/big.js-6.2.1.tgz",
+ "integrity": "sha512-bCtHMwL9LeDIozFn+oNhhFoq+yQ3BNdnsLSASUxLciOb1vgvpHsIO1dsENiGMgbb4SkP5TrzWzRiLddn8ahVOQ==",
+ "engines": {
+ "node": "*"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/bigjs"
+ }
+ },
+ "node_modules/ccount": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/ccount/-/ccount-2.0.1.tgz",
+ "integrity": "sha512-eyrF0jiFpY+3drT6383f1qhkbGsLSifNAjA61IUjZjmLCWjItY6LB9ft9YhoDgwfmclB2zhu51Lc7+95b8NRAg==",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/centra": {
+ "version": "2.6.0",
+ "resolved": "https://registry.npmjs.org/centra/-/centra-2.6.0.tgz",
+ "integrity": "sha512-dgh+YleemrT8u85QL11Z6tYhegAs3MMxsaWAq/oXeAmYJ7VxL3SI9TZtnfaEvNDMAPolj25FXIb3S+HCI4wQaQ=="
+ },
+ "node_modules/character-entities": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/character-entities/-/character-entities-2.0.2.tgz",
+ "integrity": "sha512-shx7oQ0Awen/BRIdkjkvz54PnEEI/EjwXDSIZp86/KKdbafHh1Df/RYGBhn4hbe2+uKC9FnT5UCEdyPz3ai9hQ==",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/cliui": {
+ "version": "8.0.1",
+ "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz",
+ "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==",
+ "dependencies": {
+ "string-width": "^4.2.0",
+ "strip-ansi": "^6.0.1",
+ "wrap-ansi": "^7.0.0"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/color-convert": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
+ "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
+ "dependencies": {
+ "color-name": "~1.1.4"
+ },
+ "engines": {
+ "node": ">=7.0.0"
+ }
+ },
+ "node_modules/color-name": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz",
+ "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA=="
+ },
+ "node_modules/colorette": {
+ "version": "2.0.19",
+ "resolved": "https://registry.npmjs.org/colorette/-/colorette-2.0.19.tgz",
+ "integrity": "sha512-3tlv/dIP7FWvj3BsbHrGLJ6l/oKh1O3TcgBqMn+yyCagOxc23fyzDS6HypQbgxWbkpDnf52p1LuR4eWDQ/K9WQ=="
+ },
+ "node_modules/cuint": {
+ "version": "0.2.2",
+ "resolved": "https://registry.npmjs.org/cuint/-/cuint-0.2.2.tgz",
+ "integrity": "sha512-d4ZVpCW31eWwCMe1YT3ur7mUDnTXbgwyzaL320DrcRT45rfjYxkt5QWLrmOJ+/UEAI2+fQgKe/fCjR8l4TpRgw=="
+ },
+ "node_modules/debug": {
+ "version": "4.3.4",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz",
+ "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==",
+ "dependencies": {
+ "ms": "2.1.2"
+ },
+ "engines": {
+ "node": ">=6.0"
+ },
+ "peerDependenciesMeta": {
+ "supports-color": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/decode-named-character-reference": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/decode-named-character-reference/-/decode-named-character-reference-1.0.2.tgz",
+ "integrity": "sha512-O8x12RzrUF8xyVcY0KJowWsmaJxQbmy0/EtnNtHRpsOcT7dFk5W598coHqBVpmWo1oQQfsCqfCmkZN5DJrZVdg==",
+ "dependencies": {
+ "character-entities": "^2.0.0"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/dequal": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/dequal/-/dequal-2.0.3.tgz",
+ "integrity": "sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/diff": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/diff/-/diff-5.1.0.tgz",
+ "integrity": "sha512-D+mk+qE8VC/PAUrlAU34N+VfXev0ghe5ywmpqrawphmVZc1bEfn56uo9qpyGp1p4xpzOHkSW4ztBd6L7Xx4ACw==",
+ "engines": {
+ "node": ">=0.3.1"
+ }
+ },
+ "node_modules/emoji-regex": {
+ "version": "8.0.0",
+ "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz",
+ "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A=="
+ },
+ "node_modules/es2015-i18n-tag": {
+ "version": "1.6.1",
+ "resolved": "https://registry.npmjs.org/es2015-i18n-tag/-/es2015-i18n-tag-1.6.1.tgz",
+ "integrity": "sha512-MYoh9p+JTkgnzBh0MEBON6xUyzdmwT6wzsmmFJvZujGSXiI2kM+3XvFl6+AcIO2eeL6VWgtX9szSiDTMwDxyYA==",
+ "engines": {
+ "node": ">= 4.0.0"
+ }
+ },
+ "node_modules/escalade": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz",
+ "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/escape-string-regexp": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-5.0.0.tgz",
+ "integrity": "sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==",
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/extend": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz",
+ "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g=="
+ },
+ "node_modules/fastestsmallesttextencoderdecoder": {
+ "version": "1.0.22",
+ "resolved": "https://registry.npmjs.org/fastestsmallesttextencoderdecoder/-/fastestsmallesttextencoderdecoder-1.0.22.tgz",
+ "integrity": "sha512-Pb8d48e+oIuY4MaM64Cd7OW1gt4nxCHs7/ddPPZ/Ic3sg8yVGM7O9wDvZ7us6ScaUupzM+pfBolwtYhN1IxBIw=="
+ },
+ "node_modules/ferrum": {
+ "version": "1.9.4",
+ "resolved": "https://registry.npmjs.org/ferrum/-/ferrum-1.9.4.tgz",
+ "integrity": "sha512-ooNerLoIht/dK4CQJux93z/hnt9JysrXniJCI3r6YRgmHeXC57EJ8XaTCT1Gm8LfhIAeWxyJA0O7d/W3pqDYRg==",
+ "dependencies": {
+ "fastestsmallesttextencoderdecoder": "1.0.22",
+ "lodash.isplainobject": "4.0.6",
+ "xxhashjs": "0.2.2"
+ }
+ },
+ "node_modules/fs-extra": {
+ "version": "11.1.0",
+ "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-11.1.0.tgz",
+ "integrity": "sha512-0rcTq621PD5jM/e0a3EJoGC/1TC5ZBCERW82LQuwfGnCa1V8w7dpYH1yNu+SLb6E5dkeCBzKEyLGlFrnr+dUyw==",
+ "dependencies": {
+ "graceful-fs": "^4.2.0",
+ "jsonfile": "^6.0.1",
+ "universalify": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=14.14"
+ }
+ },
+ "node_modules/get-caller-file": {
+ "version": "2.0.5",
+ "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz",
+ "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==",
+ "engines": {
+ "node": "6.* || 8.* || >= 10.*"
+ }
+ },
+ "node_modules/github-slugger": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/github-slugger/-/github-slugger-2.0.0.tgz",
+ "integrity": "sha512-IaOQ9puYtjrkq7Y0Ygl9KDZnrf/aiUJYUpVf89y8kyaxbRG7Y1SrX/jaumrv81vc61+kiMempujsM3Yw7w5qcw=="
+ },
+ "node_modules/graceful-fs": {
+ "version": "4.2.10",
+ "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.10.tgz",
+ "integrity": "sha512-9ByhssR2fPVsNZj478qUUbKfmL0+t5BDVyjShtyZZLiK7ZDAArFFfopyOTj0M05wE2tJPisA4iTnnXl2YoPvOA=="
+ },
+ "node_modules/is-buffer": {
+ "version": "2.0.5",
+ "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-2.0.5.tgz",
+ "integrity": "sha512-i2R6zNFDwgEHJyQUtJEk0XFi1i0dPFn/oqjK3/vPCcDeJvW5NQ83V8QbicfF1SupOaB0h8ntgBC2YiE7dfyctQ==",
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/feross"
+ },
+ {
+ "type": "patreon",
+ "url": "https://www.patreon.com/feross"
+ },
+ {
+ "type": "consulting",
+ "url": "https://feross.org/support"
+ }
+ ],
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/is-fullwidth-code-point": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz",
+ "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/is-plain-obj": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz",
+ "integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==",
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/js-yaml": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz",
+ "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==",
+ "dependencies": {
+ "argparse": "^2.0.1"
+ },
+ "bin": {
+ "js-yaml": "bin/js-yaml.js"
+ }
+ },
+ "node_modules/json-schema": {
+ "version": "0.4.0",
+ "resolved": "https://registry.npmjs.org/json-schema/-/json-schema-0.4.0.tgz",
+ "integrity": "sha512-es94M3nTIfsEPisRafak+HDLfHXnKBhV3vU5eqPcS3flIWqcxJWgXHXiey3YrpaNsanY5ei1VoYEbOzijuq9BA=="
+ },
+ "node_modules/jsonfile": {
+ "version": "6.1.0",
+ "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz",
+ "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==",
+ "dependencies": {
+ "universalify": "^2.0.0"
+ },
+ "optionalDependencies": {
+ "graceful-fs": "^4.1.6"
+ }
+ },
+ "node_modules/kleur": {
+ "version": "4.1.5",
+ "resolved": "https://registry.npmjs.org/kleur/-/kleur-4.1.5.tgz",
+ "integrity": "sha512-o+NO+8WrRiQEE4/7nwRJhN1HWpVmJm511pBHUxPLtp0BUISzlBplORYSmTclCnJvQq2tKu/sgl3xVpkc7ZWuQQ==",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/lodash.isplainobject": {
+ "version": "4.0.6",
+ "resolved": "https://registry.npmjs.org/lodash.isplainobject/-/lodash.isplainobject-4.0.6.tgz",
+ "integrity": "sha512-oSXzaWypCMHkPC3NvBEaPHf0KsA5mvPrOPgQWDsbg8n7orZ290M0BmC/jgRZ4vcJ6DTAhjrsSYgdsW/F+MFOBA=="
+ },
+ "node_modules/longest-streak": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/longest-streak/-/longest-streak-3.1.0.tgz",
+ "integrity": "sha512-9Ri+o0JYgehTaVBBDoMqIl8GXtbWg711O3srftcHhZ0dqnETqLaoIK0x17fUw9rFSlK/0NlsKe0Ahhyl5pXE2g==",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/markdown-table": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/markdown-table/-/markdown-table-3.0.3.tgz",
+ "integrity": "sha512-Z1NL3Tb1M9wH4XESsCDEksWoKTdlUafKc4pt0GRwjUyXaCFZ+dc3g2erqB6zm3szA2IUSi7VnPI+o/9jnxh9hw==",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/matchit": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/matchit/-/matchit-1.1.0.tgz",
+ "integrity": "sha512-+nGYoOlfHmxe5BW5tE0EMJppXEwdSf8uBA1GTZC7Q77kbT35+VKLYJMzVNWCHSsga1ps1tPYFtFyvxvKzWVmMA==",
+ "dependencies": {
+ "@arr/every": "^1.0.0"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/mdast-builder": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/mdast-builder/-/mdast-builder-1.1.1.tgz",
+ "integrity": "sha512-a3KBk/LmYD6wKsWi8WJrGU/rXR4yuF4Men0JO0z6dSZCm5FrXXWTRDjqK0vGSqa+1M6p9edeuypZAZAzSehTUw==",
+ "dependencies": {
+ "@types/unist": "^2.0.3"
+ }
+ },
+ "node_modules/mdast-util-find-and-replace": {
+ "version": "2.2.1",
+ "resolved": "https://registry.npmjs.org/mdast-util-find-and-replace/-/mdast-util-find-and-replace-2.2.1.tgz",
+ "integrity": "sha512-SobxkQXFAdd4b5WmEakmkVoh18icjQRxGy5OWTCzgsLRm1Fu/KCtwD1HIQSsmq5ZRjVH0Ehwg6/Fn3xIUk+nKw==",
+ "dependencies": {
+ "escape-string-regexp": "^5.0.0",
+ "unist-util-is": "^5.0.0",
+ "unist-util-visit-parents": "^5.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-from-markdown": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/mdast-util-from-markdown/-/mdast-util-from-markdown-1.2.0.tgz",
+ "integrity": "sha512-iZJyyvKD1+K7QX1b5jXdE7Sc5dtoTry1vzV28UZZe8Z1xVnB/czKntJ7ZAkG0tANqRnBF6p3p7GpU1y19DTf2Q==",
+ "dependencies": {
+ "@types/mdast": "^3.0.0",
+ "@types/unist": "^2.0.0",
+ "decode-named-character-reference": "^1.0.0",
+ "mdast-util-to-string": "^3.1.0",
+ "micromark": "^3.0.0",
+ "micromark-util-decode-numeric-character-reference": "^1.0.0",
+ "micromark-util-decode-string": "^1.0.0",
+ "micromark-util-normalize-identifier": "^1.0.0",
+ "micromark-util-symbol": "^1.0.0",
+ "micromark-util-types": "^1.0.0",
+ "unist-util-stringify-position": "^3.0.0",
+ "uvu": "^0.5.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-gfm": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/mdast-util-gfm/-/mdast-util-gfm-2.0.1.tgz",
+ "integrity": "sha512-42yHBbfWIFisaAfV1eixlabbsa6q7vHeSPY+cg+BBjX51M8xhgMacqH9g6TftB/9+YkcI0ooV4ncfrJslzm/RQ==",
+ "dependencies": {
+ "mdast-util-from-markdown": "^1.0.0",
+ "mdast-util-gfm-autolink-literal": "^1.0.0",
+ "mdast-util-gfm-footnote": "^1.0.0",
+ "mdast-util-gfm-strikethrough": "^1.0.0",
+ "mdast-util-gfm-table": "^1.0.0",
+ "mdast-util-gfm-task-list-item": "^1.0.0",
+ "mdast-util-to-markdown": "^1.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-gfm-autolink-literal": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/mdast-util-gfm-autolink-literal/-/mdast-util-gfm-autolink-literal-1.0.2.tgz",
+ "integrity": "sha512-FzopkOd4xTTBeGXhXSBU0OCDDh5lUj2rd+HQqG92Ld+jL4lpUfgX2AT2OHAVP9aEeDKp7G92fuooSZcYJA3cRg==",
+ "dependencies": {
+ "@types/mdast": "^3.0.0",
+ "ccount": "^2.0.0",
+ "mdast-util-find-and-replace": "^2.0.0",
+ "micromark-util-character": "^1.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-gfm-footnote": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/mdast-util-gfm-footnote/-/mdast-util-gfm-footnote-1.0.1.tgz",
+ "integrity": "sha512-p+PrYlkw9DeCRkTVw1duWqPRHX6Ywh2BNKJQcZbCwAuP/59B0Lk9kakuAd7KbQprVO4GzdW8eS5++A9PUSqIyw==",
+ "dependencies": {
+ "@types/mdast": "^3.0.0",
+ "mdast-util-to-markdown": "^1.3.0",
+ "micromark-util-normalize-identifier": "^1.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-gfm-strikethrough": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/mdast-util-gfm-strikethrough/-/mdast-util-gfm-strikethrough-1.0.2.tgz",
+ "integrity": "sha512-T/4DVHXcujH6jx1yqpcAYYwd+z5lAYMw4Ls6yhTfbMMtCt0PHY4gEfhW9+lKsLBtyhUGKRIzcUA2FATVqnvPDA==",
+ "dependencies": {
+ "@types/mdast": "^3.0.0",
+ "mdast-util-to-markdown": "^1.3.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-gfm-table": {
+ "version": "1.0.6",
+ "resolved": "https://registry.npmjs.org/mdast-util-gfm-table/-/mdast-util-gfm-table-1.0.6.tgz",
+ "integrity": "sha512-uHR+fqFq3IvB3Rd4+kzXW8dmpxUhvgCQZep6KdjsLK4O6meK5dYZEayLtIxNus1XO3gfjfcIFe8a7L0HZRGgag==",
+ "dependencies": {
+ "@types/mdast": "^3.0.0",
+ "markdown-table": "^3.0.0",
+ "mdast-util-from-markdown": "^1.0.0",
+ "mdast-util-to-markdown": "^1.3.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-gfm-task-list-item": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/mdast-util-gfm-task-list-item/-/mdast-util-gfm-task-list-item-1.0.1.tgz",
+ "integrity": "sha512-KZ4KLmPdABXOsfnM6JHUIjxEvcx2ulk656Z/4Balw071/5qgnhz+H1uGtf2zIGnrnvDC8xR4Fj9uKbjAFGNIeA==",
+ "dependencies": {
+ "@types/mdast": "^3.0.0",
+ "mdast-util-to-markdown": "^1.3.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-to-markdown": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/mdast-util-to-markdown/-/mdast-util-to-markdown-1.4.0.tgz",
+ "integrity": "sha512-IjXARf/O8VGx/pc5SZ7syfydq1DYL9vd92orsG5U0b4GNCmAvXzu+n7sbzfIKrXwB0AVrYk3NV2kXl0AIi9LCA==",
+ "dependencies": {
+ "@types/mdast": "^3.0.0",
+ "@types/unist": "^2.0.0",
+ "longest-streak": "^3.0.0",
+ "mdast-util-to-string": "^3.0.0",
+ "micromark-util-decode-string": "^1.0.0",
+ "unist-util-visit": "^4.0.0",
+ "zwitch": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-to-string": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/mdast-util-to-string/-/mdast-util-to-string-3.1.0.tgz",
+ "integrity": "sha512-n4Vypz/DZgwo0iMHLQL49dJzlp7YtAJP+N07MZHpjPf/5XJuHUWstviF4Mn2jEiR/GNmtnRRqnwsXExk3igfFA==",
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/micromark": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/micromark/-/micromark-3.1.0.tgz",
+ "integrity": "sha512-6Mj0yHLdUZjHnOPgr5xfWIMqMWS12zDN6iws9SLuSz76W8jTtAv24MN4/CL7gJrl5vtxGInkkqDv/JIoRsQOvA==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "dependencies": {
+ "@types/debug": "^4.0.0",
+ "debug": "^4.0.0",
+ "decode-named-character-reference": "^1.0.0",
+ "micromark-core-commonmark": "^1.0.1",
+ "micromark-factory-space": "^1.0.0",
+ "micromark-util-character": "^1.0.0",
+ "micromark-util-chunked": "^1.0.0",
+ "micromark-util-combine-extensions": "^1.0.0",
+ "micromark-util-decode-numeric-character-reference": "^1.0.0",
+ "micromark-util-encode": "^1.0.0",
+ "micromark-util-normalize-identifier": "^1.0.0",
+ "micromark-util-resolve-all": "^1.0.0",
+ "micromark-util-sanitize-uri": "^1.0.0",
+ "micromark-util-subtokenize": "^1.0.0",
+ "micromark-util-symbol": "^1.0.0",
+ "micromark-util-types": "^1.0.1",
+ "uvu": "^0.5.0"
+ }
+ },
+ "node_modules/micromark-core-commonmark": {
+ "version": "1.0.6",
+ "resolved": "https://registry.npmjs.org/micromark-core-commonmark/-/micromark-core-commonmark-1.0.6.tgz",
+ "integrity": "sha512-K+PkJTxqjFfSNkfAhp4GB+cZPfQd6dxtTXnf+RjZOV7T4EEXnvgzOcnp+eSTmpGk9d1S9sL6/lqrgSNn/s0HZA==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "dependencies": {
+ "decode-named-character-reference": "^1.0.0",
+ "micromark-factory-destination": "^1.0.0",
+ "micromark-factory-label": "^1.0.0",
+ "micromark-factory-space": "^1.0.0",
+ "micromark-factory-title": "^1.0.0",
+ "micromark-factory-whitespace": "^1.0.0",
+ "micromark-util-character": "^1.0.0",
+ "micromark-util-chunked": "^1.0.0",
+ "micromark-util-classify-character": "^1.0.0",
+ "micromark-util-html-tag-name": "^1.0.0",
+ "micromark-util-normalize-identifier": "^1.0.0",
+ "micromark-util-resolve-all": "^1.0.0",
+ "micromark-util-subtokenize": "^1.0.0",
+ "micromark-util-symbol": "^1.0.0",
+ "micromark-util-types": "^1.0.1",
+ "uvu": "^0.5.0"
+ }
+ },
+ "node_modules/micromark-extension-gfm": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-extension-gfm/-/micromark-extension-gfm-2.0.1.tgz",
+ "integrity": "sha512-p2sGjajLa0iYiGQdT0oelahRYtMWvLjy8J9LOCxzIQsllMCGLbsLW+Nc+N4vi02jcRJvedVJ68cjelKIO6bpDA==",
+ "dependencies": {
+ "micromark-extension-gfm-autolink-literal": "^1.0.0",
+ "micromark-extension-gfm-footnote": "^1.0.0",
+ "micromark-extension-gfm-strikethrough": "^1.0.0",
+ "micromark-extension-gfm-table": "^1.0.0",
+ "micromark-extension-gfm-tagfilter": "^1.0.0",
+ "micromark-extension-gfm-task-list-item": "^1.0.0",
+ "micromark-util-combine-extensions": "^1.0.0",
+ "micromark-util-types": "^1.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/micromark-extension-gfm-autolink-literal": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/micromark-extension-gfm-autolink-literal/-/micromark-extension-gfm-autolink-literal-1.0.3.tgz",
+ "integrity": "sha512-i3dmvU0htawfWED8aHMMAzAVp/F0Z+0bPh3YrbTPPL1v4YAlCZpy5rBO5p0LPYiZo0zFVkoYh7vDU7yQSiCMjg==",
+ "dependencies": {
+ "micromark-util-character": "^1.0.0",
+ "micromark-util-sanitize-uri": "^1.0.0",
+ "micromark-util-symbol": "^1.0.0",
+ "micromark-util-types": "^1.0.0",
+ "uvu": "^0.5.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/micromark-extension-gfm-footnote": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/micromark-extension-gfm-footnote/-/micromark-extension-gfm-footnote-1.0.4.tgz",
+ "integrity": "sha512-E/fmPmDqLiMUP8mLJ8NbJWJ4bTw6tS+FEQS8CcuDtZpILuOb2kjLqPEeAePF1djXROHXChM/wPJw0iS4kHCcIg==",
+ "dependencies": {
+ "micromark-core-commonmark": "^1.0.0",
+ "micromark-factory-space": "^1.0.0",
+ "micromark-util-character": "^1.0.0",
+ "micromark-util-normalize-identifier": "^1.0.0",
+ "micromark-util-sanitize-uri": "^1.0.0",
+ "micromark-util-symbol": "^1.0.0",
+ "micromark-util-types": "^1.0.0",
+ "uvu": "^0.5.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/micromark-extension-gfm-strikethrough": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/micromark-extension-gfm-strikethrough/-/micromark-extension-gfm-strikethrough-1.0.4.tgz",
+ "integrity": "sha512-/vjHU/lalmjZCT5xt7CcHVJGq8sYRm80z24qAKXzaHzem/xsDYb2yLL+NNVbYvmpLx3O7SYPuGL5pzusL9CLIQ==",
+ "dependencies": {
+ "micromark-util-chunked": "^1.0.0",
+ "micromark-util-classify-character": "^1.0.0",
+ "micromark-util-resolve-all": "^1.0.0",
+ "micromark-util-symbol": "^1.0.0",
+ "micromark-util-types": "^1.0.0",
+ "uvu": "^0.5.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/micromark-extension-gfm-table": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/micromark-extension-gfm-table/-/micromark-extension-gfm-table-1.0.5.tgz",
+ "integrity": "sha512-xAZ8J1X9W9K3JTJTUL7G6wSKhp2ZYHrFk5qJgY/4B33scJzE2kpfRL6oiw/veJTbt7jiM/1rngLlOKPWr1G+vg==",
+ "dependencies": {
+ "micromark-factory-space": "^1.0.0",
+ "micromark-util-character": "^1.0.0",
+ "micromark-util-symbol": "^1.0.0",
+ "micromark-util-types": "^1.0.0",
+ "uvu": "^0.5.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/micromark-extension-gfm-tagfilter": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-extension-gfm-tagfilter/-/micromark-extension-gfm-tagfilter-1.0.1.tgz",
+ "integrity": "sha512-Ty6psLAcAjboRa/UKUbbUcwjVAv5plxmpUTy2XC/3nJFL37eHej8jrHrRzkqcpipJliuBH30DTs7+3wqNcQUVA==",
+ "dependencies": {
+ "micromark-util-types": "^1.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/micromark-extension-gfm-task-list-item": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/micromark-extension-gfm-task-list-item/-/micromark-extension-gfm-task-list-item-1.0.3.tgz",
+ "integrity": "sha512-PpysK2S1Q/5VXi72IIapbi/jliaiOFzv7THH4amwXeYXLq3l1uo8/2Be0Ac1rEwK20MQEsGH2ltAZLNY2KI/0Q==",
+ "dependencies": {
+ "micromark-factory-space": "^1.0.0",
+ "micromark-util-character": "^1.0.0",
+ "micromark-util-symbol": "^1.0.0",
+ "micromark-util-types": "^1.0.0",
+ "uvu": "^0.5.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/micromark-factory-destination": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/micromark-factory-destination/-/micromark-factory-destination-1.0.0.tgz",
+ "integrity": "sha512-eUBA7Rs1/xtTVun9TmV3gjfPz2wEwgK5R5xcbIM5ZYAtvGF6JkyaDsj0agx8urXnO31tEO6Ug83iVH3tdedLnw==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "dependencies": {
+ "micromark-util-character": "^1.0.0",
+ "micromark-util-symbol": "^1.0.0",
+ "micromark-util-types": "^1.0.0"
+ }
+ },
+ "node_modules/micromark-factory-label": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/micromark-factory-label/-/micromark-factory-label-1.0.2.tgz",
+ "integrity": "sha512-CTIwxlOnU7dEshXDQ+dsr2n+yxpP0+fn271pu0bwDIS8uqfFcumXpj5mLn3hSC8iw2MUr6Gx8EcKng1dD7i6hg==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "dependencies": {
+ "micromark-util-character": "^1.0.0",
+ "micromark-util-symbol": "^1.0.0",
+ "micromark-util-types": "^1.0.0",
+ "uvu": "^0.5.0"
+ }
+ },
+ "node_modules/micromark-factory-space": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-1.0.0.tgz",
+ "integrity": "sha512-qUmqs4kj9a5yBnk3JMLyjtWYN6Mzfcx8uJfi5XAveBniDevmZasdGBba5b4QsvRcAkmvGo5ACmSUmyGiKTLZew==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "dependencies": {
+ "micromark-util-character": "^1.0.0",
+ "micromark-util-types": "^1.0.0"
+ }
+ },
+ "node_modules/micromark-factory-title": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/micromark-factory-title/-/micromark-factory-title-1.0.2.tgz",
+ "integrity": "sha512-zily+Nr4yFqgMGRKLpTVsNl5L4PMu485fGFDOQJQBl2NFpjGte1e86zC0da93wf97jrc4+2G2GQudFMHn3IX+A==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "dependencies": {
+ "micromark-factory-space": "^1.0.0",
+ "micromark-util-character": "^1.0.0",
+ "micromark-util-symbol": "^1.0.0",
+ "micromark-util-types": "^1.0.0",
+ "uvu": "^0.5.0"
+ }
+ },
+ "node_modules/micromark-factory-whitespace": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/micromark-factory-whitespace/-/micromark-factory-whitespace-1.0.0.tgz",
+ "integrity": "sha512-Qx7uEyahU1lt1RnsECBiuEbfr9INjQTGa6Err+gF3g0Tx4YEviPbqqGKNv/NrBaE7dVHdn1bVZKM/n5I/Bak7A==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "dependencies": {
+ "micromark-factory-space": "^1.0.0",
+ "micromark-util-character": "^1.0.0",
+ "micromark-util-symbol": "^1.0.0",
+ "micromark-util-types": "^1.0.0"
+ }
+ },
+ "node_modules/micromark-util-character": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-1.1.0.tgz",
+ "integrity": "sha512-agJ5B3unGNJ9rJvADMJ5ZiYjBRyDpzKAOk01Kpi1TKhlT1APx3XZk6eN7RtSz1erbWHC2L8T3xLZ81wdtGRZzg==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "dependencies": {
+ "micromark-util-symbol": "^1.0.0",
+ "micromark-util-types": "^1.0.0"
+ }
+ },
+ "node_modules/micromark-util-chunked": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/micromark-util-chunked/-/micromark-util-chunked-1.0.0.tgz",
+ "integrity": "sha512-5e8xTis5tEZKgesfbQMKRCyzvffRRUX+lK/y+DvsMFdabAicPkkZV6gO+FEWi9RfuKKoxxPwNL+dFF0SMImc1g==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "dependencies": {
+ "micromark-util-symbol": "^1.0.0"
+ }
+ },
+ "node_modules/micromark-util-classify-character": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/micromark-util-classify-character/-/micromark-util-classify-character-1.0.0.tgz",
+ "integrity": "sha512-F8oW2KKrQRb3vS5ud5HIqBVkCqQi224Nm55o5wYLzY/9PwHGXC01tr3d7+TqHHz6zrKQ72Okwtvm/xQm6OVNZA==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "dependencies": {
+ "micromark-util-character": "^1.0.0",
+ "micromark-util-symbol": "^1.0.0",
+ "micromark-util-types": "^1.0.0"
+ }
+ },
+ "node_modules/micromark-util-combine-extensions": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/micromark-util-combine-extensions/-/micromark-util-combine-extensions-1.0.0.tgz",
+ "integrity": "sha512-J8H058vFBdo/6+AsjHp2NF7AJ02SZtWaVUjsayNFeAiydTxUwViQPxN0Hf8dp4FmCQi0UUFovFsEyRSUmFH3MA==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "dependencies": {
+ "micromark-util-chunked": "^1.0.0",
+ "micromark-util-types": "^1.0.0"
+ }
+ },
+ "node_modules/micromark-util-decode-numeric-character-reference": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/micromark-util-decode-numeric-character-reference/-/micromark-util-decode-numeric-character-reference-1.0.0.tgz",
+ "integrity": "sha512-OzO9AI5VUtrTD7KSdagf4MWgHMtET17Ua1fIpXTpuhclCqD8egFWo85GxSGvxgkGS74bEahvtM0WP0HjvV0e4w==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "dependencies": {
+ "micromark-util-symbol": "^1.0.0"
+ }
+ },
+ "node_modules/micromark-util-decode-string": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/micromark-util-decode-string/-/micromark-util-decode-string-1.0.2.tgz",
+ "integrity": "sha512-DLT5Ho02qr6QWVNYbRZ3RYOSSWWFuH3tJexd3dgN1odEuPNxCngTCXJum7+ViRAd9BbdxCvMToPOD/IvVhzG6Q==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "dependencies": {
+ "decode-named-character-reference": "^1.0.0",
+ "micromark-util-character": "^1.0.0",
+ "micromark-util-decode-numeric-character-reference": "^1.0.0",
+ "micromark-util-symbol": "^1.0.0"
+ }
+ },
+ "node_modules/micromark-util-encode": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-util-encode/-/micromark-util-encode-1.0.1.tgz",
+ "integrity": "sha512-U2s5YdnAYexjKDel31SVMPbfi+eF8y1U4pfiRW/Y8EFVCy/vgxk/2wWTxzcqE71LHtCuCzlBDRU2a5CQ5j+mQA==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ]
+ },
+ "node_modules/micromark-util-html-tag-name": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/micromark-util-html-tag-name/-/micromark-util-html-tag-name-1.1.0.tgz",
+ "integrity": "sha512-BKlClMmYROy9UiV03SwNmckkjn8QHVaWkqoAqzivabvdGcwNGMMMH/5szAnywmsTBUzDsU57/mFi0sp4BQO6dA==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ]
+ },
+ "node_modules/micromark-util-normalize-identifier": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/micromark-util-normalize-identifier/-/micromark-util-normalize-identifier-1.0.0.tgz",
+ "integrity": "sha512-yg+zrL14bBTFrQ7n35CmByWUTFsgst5JhA4gJYoty4Dqzj4Z4Fr/DHekSS5aLfH9bdlfnSvKAWsAgJhIbogyBg==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "dependencies": {
+ "micromark-util-symbol": "^1.0.0"
+ }
+ },
+ "node_modules/micromark-util-resolve-all": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/micromark-util-resolve-all/-/micromark-util-resolve-all-1.0.0.tgz",
+ "integrity": "sha512-CB/AGk98u50k42kvgaMM94wzBqozSzDDaonKU7P7jwQIuH2RU0TeBqGYJz2WY1UdihhjweivStrJ2JdkdEmcfw==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "dependencies": {
+ "micromark-util-types": "^1.0.0"
+ }
+ },
+ "node_modules/micromark-util-sanitize-uri": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/micromark-util-sanitize-uri/-/micromark-util-sanitize-uri-1.1.0.tgz",
+ "integrity": "sha512-RoxtuSCX6sUNtxhbmsEFQfWzs8VN7cTctmBPvYivo98xb/kDEoTCtJQX5wyzIYEmk/lvNFTat4hL8oW0KndFpg==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "dependencies": {
+ "micromark-util-character": "^1.0.0",
+ "micromark-util-encode": "^1.0.0",
+ "micromark-util-symbol": "^1.0.0"
+ }
+ },
+ "node_modules/micromark-util-subtokenize": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/micromark-util-subtokenize/-/micromark-util-subtokenize-1.0.2.tgz",
+ "integrity": "sha512-d90uqCnXp/cy4G881Ub4psE57Sf8YD0pim9QdjCRNjfas2M1u6Lbt+XZK9gnHL2XFhnozZiEdCa9CNfXSfQ6xA==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ],
+ "dependencies": {
+ "micromark-util-chunked": "^1.0.0",
+ "micromark-util-symbol": "^1.0.0",
+ "micromark-util-types": "^1.0.0",
+ "uvu": "^0.5.0"
+ }
+ },
+ "node_modules/micromark-util-symbol": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-1.0.1.tgz",
+ "integrity": "sha512-oKDEMK2u5qqAptasDAwWDXq0tG9AssVwAx3E9bBF3t/shRIGsWIRG+cGafs2p/SnDSOecnt6hZPCE2o6lHfFmQ==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ]
+ },
+ "node_modules/micromark-util-types": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/micromark-util-types/-/micromark-util-types-1.0.2.tgz",
+ "integrity": "sha512-DCfg/T8fcrhrRKTPjRrw/5LLvdGV7BHySf/1LOZx7TzWZdYRjogNtyNq885z3nNallwr3QUKARjqvHqX1/7t+w==",
+ "funding": [
+ {
+ "type": "GitHub Sponsors",
+ "url": "https://github.com/sponsors/unifiedjs"
+ },
+ {
+ "type": "OpenCollective",
+ "url": "https://opencollective.com/unified"
+ }
+ ]
+ },
+ "node_modules/mri": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/mri/-/mri-1.2.0.tgz",
+ "integrity": "sha512-tzzskb3bG8LvYGFF/mDTpq3jpI6Q9wc3LEmBaghu+DdCssd1FakN7Bc0hVNmEyGq1bq3RgfkCb3cmQLpNPOroA==",
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/ms": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz",
+ "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w=="
+ },
+ "node_modules/phin": {
+ "version": "3.7.0",
+ "resolved": "https://registry.npmjs.org/phin/-/phin-3.7.0.tgz",
+ "integrity": "sha512-DqnVNrpYhKGBZppNKprD+UJylMeEKOZxHgPB+ZP6mGzf3uA2uox4Ep9tUm+rUc8WLIdHT3HcAE4X8fhwQA9JKg==",
+ "dependencies": {
+ "centra": "^2.6.0"
+ },
+ "engines": {
+ "node": ">= 8"
+ }
+ },
+ "node_modules/picomatch": {
+ "version": "2.3.1",
+ "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz",
+ "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==",
+ "engines": {
+ "node": ">=8.6"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/jonschlinkert"
+ }
+ },
+ "node_modules/polka": {
+ "version": "0.5.2",
+ "resolved": "https://registry.npmjs.org/polka/-/polka-0.5.2.tgz",
+ "integrity": "sha512-FVg3vDmCqP80tOrs+OeNlgXYmFppTXdjD5E7I4ET1NjvtNmQrb1/mJibybKkb/d4NA7YWAr1ojxuhpL3FHqdlw==",
+ "dependencies": {
+ "@polka/url": "^0.5.0",
+ "trouter": "^2.0.1"
+ }
+ },
+ "node_modules/readdirp": {
+ "version": "3.6.0",
+ "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz",
+ "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==",
+ "dependencies": {
+ "picomatch": "^2.2.1"
+ },
+ "engines": {
+ "node": ">=8.10.0"
+ }
+ },
+ "node_modules/remark-gfm": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/remark-gfm/-/remark-gfm-3.0.1.tgz",
+ "integrity": "sha512-lEFDoi2PICJyNrACFOfDD3JlLkuSbOa5Wd8EPt06HUdptv8Gn0bxYTdbU/XXQ3swAPkEaGxxPN9cbnMHvVu1Ig==",
+ "dependencies": {
+ "@types/mdast": "^3.0.0",
+ "mdast-util-gfm": "^2.0.0",
+ "micromark-extension-gfm": "^2.0.0",
+ "unified": "^10.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/remark-parse": {
+ "version": "10.0.1",
+ "resolved": "https://registry.npmjs.org/remark-parse/-/remark-parse-10.0.1.tgz",
+ "integrity": "sha512-1fUyHr2jLsVOkhbvPRBJ5zTKZZyD6yZzYaWCS6BPBdQ8vEMBCH+9zNCDA6tET/zHCi/jLqjCWtlJZUPk+DbnFw==",
+ "dependencies": {
+ "@types/mdast": "^3.0.0",
+ "mdast-util-from-markdown": "^1.0.0",
+ "unified": "^10.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/remark-stringify": {
+ "version": "10.0.2",
+ "resolved": "https://registry.npmjs.org/remark-stringify/-/remark-stringify-10.0.2.tgz",
+ "integrity": "sha512-6wV3pvbPvHkbNnWB0wdDvVFHOe1hBRAx1Q/5g/EpH4RppAII6J8Gnwe7VbHuXaoKIF6LAg6ExTel/+kNqSQ7lw==",
+ "dependencies": {
+ "@types/mdast": "^3.0.0",
+ "mdast-util-to-markdown": "^1.0.0",
+ "unified": "^10.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/require-directory": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz",
+ "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/sade": {
+ "version": "1.8.1",
+ "resolved": "https://registry.npmjs.org/sade/-/sade-1.8.1.tgz",
+ "integrity": "sha512-xal3CZX1Xlo/k4ApwCFrHVACi9fBqJ7V+mwhBsuf/1IOKbBy098Fex+Wa/5QMubw09pSZ/u8EY8PWgevJsXp1A==",
+ "dependencies": {
+ "mri": "^1.1.0"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/string-width": {
+ "version": "4.2.3",
+ "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz",
+ "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==",
+ "dependencies": {
+ "emoji-regex": "^8.0.0",
+ "is-fullwidth-code-point": "^3.0.0",
+ "strip-ansi": "^6.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/strip-ansi": {
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz",
+ "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==",
+ "dependencies": {
+ "ansi-regex": "^5.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/trough": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/trough/-/trough-2.1.0.tgz",
+ "integrity": "sha512-AqTiAOLcj85xS7vQ8QkAV41hPDIJ71XJB4RCUrzo/1GM2CQwhkJGaf9Hgr7BOugMRpgGUrqRg/DrBDl4H40+8g==",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/trouter": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/trouter/-/trouter-2.0.1.tgz",
+ "integrity": "sha512-kr8SKKw94OI+xTGOkfsvwZQ8mWoikZDd2n8XZHjJVZUARZT+4/VV6cacRS6CLsH9bNm+HFIPU1Zx4CnNnb4qlQ==",
+ "dependencies": {
+ "matchit": "^1.0.0"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/unified": {
+ "version": "10.1.2",
+ "resolved": "https://registry.npmjs.org/unified/-/unified-10.1.2.tgz",
+ "integrity": "sha512-pUSWAi/RAnVy1Pif2kAoeWNBa3JVrx0MId2LASj8G+7AiHWoKZNTomq6LG326T68U7/e263X6fTdcXIy7XnF7Q==",
+ "dependencies": {
+ "@types/unist": "^2.0.0",
+ "bail": "^2.0.0",
+ "extend": "^3.0.0",
+ "is-buffer": "^2.0.0",
+ "is-plain-obj": "^4.0.0",
+ "trough": "^2.0.0",
+ "vfile": "^5.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/unist-util-inspect": {
+ "version": "7.0.1",
+ "resolved": "https://registry.npmjs.org/unist-util-inspect/-/unist-util-inspect-7.0.1.tgz",
+ "integrity": "sha512-gEPeSrsYXus8012VJ00p9uZC8D0iogtLLiHlBgvS61hU22KNKduQhMKezJm83viHlLf3TYS2y9SDEFglWPDMKw==",
+ "dependencies": {
+ "@types/unist": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/unist-util-is": {
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-5.1.1.tgz",
+ "integrity": "sha512-F5CZ68eYzuSvJjGhCLPL3cYx45IxkqXSetCcRgUXtbcm50X2L9oOWQlfUfDdAf+6Pd27YDblBfdtmsThXmwpbQ==",
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/unist-util-stringify-position": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-3.0.2.tgz",
+ "integrity": "sha512-7A6eiDCs9UtjcwZOcCpM4aPII3bAAGv13E96IkawkOAW0OhH+yRxtY0lzo8KiHpzEMfH7Q+FizUmwp8Iqy5EWg==",
+ "dependencies": {
+ "@types/unist": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/unist-util-visit": {
+ "version": "4.1.1",
+ "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-4.1.1.tgz",
+ "integrity": "sha512-n9KN3WV9k4h1DxYR1LoajgN93wpEi/7ZplVe02IoB4gH5ctI1AaF2670BLHQYbwj+pY83gFtyeySFiyMHJklrg==",
+ "dependencies": {
+ "@types/unist": "^2.0.0",
+ "unist-util-is": "^5.0.0",
+ "unist-util-visit-parents": "^5.1.1"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/unist-util-visit-parents": {
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-5.1.1.tgz",
+ "integrity": "sha512-gks4baapT/kNRaWxuGkl5BIhoanZo7sC/cUT/JToSRNL1dYoXRFl75d++NkjYk4TAu2uv2Px+l8guMajogeuiw==",
+ "dependencies": {
+ "@types/unist": "^2.0.0",
+ "unist-util-is": "^5.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/universalify": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz",
+ "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==",
+ "engines": {
+ "node": ">= 10.0.0"
+ }
+ },
+ "node_modules/uvu": {
+ "version": "0.5.6",
+ "resolved": "https://registry.npmjs.org/uvu/-/uvu-0.5.6.tgz",
+ "integrity": "sha512-+g8ENReyr8YsOc6fv/NVJs2vFdHBnBNdfE49rshrTzDWOlUx4Gq7KOS2GD8eqhy2j+Ejq29+SbKH8yjkAqXqoA==",
+ "dependencies": {
+ "dequal": "^2.0.0",
+ "diff": "^5.0.0",
+ "kleur": "^4.0.3",
+ "sade": "^1.7.3"
+ },
+ "bin": {
+ "uvu": "bin.js"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/vfile": {
+ "version": "5.3.6",
+ "resolved": "https://registry.npmjs.org/vfile/-/vfile-5.3.6.tgz",
+ "integrity": "sha512-ADBsmerdGBs2WYckrLBEmuETSPyTD4TuLxTrw0DvjirxW1ra4ZwkbzG8ndsv3Q57smvHxo677MHaQrY9yxH8cA==",
+ "dependencies": {
+ "@types/unist": "^2.0.0",
+ "is-buffer": "^2.0.0",
+ "unist-util-stringify-position": "^3.0.0",
+ "vfile-message": "^3.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/vfile-message": {
+ "version": "3.1.3",
+ "resolved": "https://registry.npmjs.org/vfile-message/-/vfile-message-3.1.3.tgz",
+ "integrity": "sha512-0yaU+rj2gKAyEk12ffdSbBfjnnj+b1zqTBv3OQCTn8yEB02bsPizwdBPrLJjHnK+cU9EMMcUnNv938XcZIkmdA==",
+ "dependencies": {
+ "@types/unist": "^2.0.0",
+ "unist-util-stringify-position": "^3.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/wrap-ansi": {
+ "version": "7.0.0",
+ "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz",
+ "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==",
+ "dependencies": {
+ "ansi-styles": "^4.0.0",
+ "string-width": "^4.1.0",
+ "strip-ansi": "^6.0.0"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/wrap-ansi?sponsor=1"
+ }
+ },
+ "node_modules/xxhashjs": {
+ "version": "0.2.2",
+ "resolved": "https://registry.npmjs.org/xxhashjs/-/xxhashjs-0.2.2.tgz",
+ "integrity": "sha512-AkTuIuVTET12tpsVIQo+ZU6f/qDmKuRUcjaqR+OIvm+aCBsZ95i7UVY5WJ9TMsSaZ0DA2WxoZ4acu0sPH+OKAw==",
+ "dependencies": {
+ "cuint": "^0.2.2"
+ }
+ },
+ "node_modules/y18n": {
+ "version": "5.0.8",
+ "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz",
+ "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==",
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/yargs": {
+ "version": "17.6.2",
+ "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.6.2.tgz",
+ "integrity": "sha512-1/9UrdHjDZc0eOU0HxOHoS78C69UD3JRMvzlJ7S79S2nTaWRA/whGCTV8o9e/N/1Va9YIV7Q4sOxD8VV4pCWOw==",
+ "dependencies": {
+ "cliui": "^8.0.1",
+ "escalade": "^3.1.1",
+ "get-caller-file": "^2.0.5",
+ "require-directory": "^2.1.1",
+ "string-width": "^4.2.3",
+ "y18n": "^5.0.5",
+ "yargs-parser": "^21.1.1"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/yargs-parser": {
+ "version": "21.1.1",
+ "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz",
+ "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==",
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/zwitch": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/zwitch/-/zwitch-2.0.4.tgz",
+ "integrity": "sha512-bXE4cR/kVZhKZX/RjPEflHaKVhUVl85noU3v6b8apfQEc1x4A+zBxjZ4lN8LqGd6WZ3dl98pY4o717VFmoPp+A==",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ }
+ },
+ "dependencies": {
+ "@adobe/helix-log": {
+ "version": "6.0.0",
+ "resolved": "https://registry.npmjs.org/@adobe/helix-log/-/helix-log-6.0.0.tgz",
+ "integrity": "sha512-+9gpf49sFDmZLV3gtjY+RmEUistqYJdVWpiqlRYpxE59x5bHFzYf93dZ7fljSTBtZdVq8lm97HxrTUloh5HvRg==",
+ "requires": {
+ "big.js": "^6.1.1",
+ "colorette": "^2.0.2",
+ "ferrum": "^1.9.3",
+ "phin": "^3.6.0",
+ "polka": "^0.5.2"
+ }
+ },
+ "@adobe/jsonschema2md": {
+ "version": "7.1.5",
+ "resolved": "https://registry.npmjs.org/@adobe/jsonschema2md/-/jsonschema2md-7.1.5.tgz",
+ "integrity": "sha512-uybF3Ryn0xz5lzGz6sb6Th5nkX9H60zOnKVYCUXunUtWENGb7Ut+8CYPzPA9sjY8+gLK8pQq3rbmsKprcjkN0A==",
+ "requires": {
+ "@adobe/helix-log": "6.0.0",
+ "@types/json-schema": "^7.0.8",
+ "@types/mdast": "^3.0.4",
+ "es2015-i18n-tag": "1.6.1",
+ "ferrum": "1.9.4",
+ "fs-extra": "11.1.0",
+ "github-slugger": "2.0.0",
+ "js-yaml": "4.1.0",
+ "json-schema": "^0.4.0",
+ "mdast-builder": "1.1.1",
+ "mdast-util-to-string": "3.1.0",
+ "readdirp": "3.6.0",
+ "remark-gfm": "^3.0.0",
+ "remark-parse": "10.0.1",
+ "remark-stringify": "10.0.2",
+ "unified": "10.1.2",
+ "unist-util-inspect": "7.0.1",
+ "yargs": "17.6.2"
+ }
+ },
+ "@arr/every": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/@arr/every/-/every-1.0.1.tgz",
+ "integrity": "sha512-UQFQ6SgyJ6LX42W8rHCs8KVc0JS0tzVL9ct4XYedJukskYVWTo49tNiMEK9C2HTyarbNiT/RVIRSY82vH+6sTg=="
+ },
+ "@polka/url": {
+ "version": "0.5.0",
+ "resolved": "https://registry.npmjs.org/@polka/url/-/url-0.5.0.tgz",
+ "integrity": "sha512-oZLYFEAzUKyi3SKnXvj32ZCEGH6RDnao7COuCVhDydMS9NrCSVXhM79VaKyP5+Zc33m0QXEd2DN3UkU7OsHcfw=="
+ },
+ "@types/debug": {
+ "version": "4.1.7",
+ "resolved": "https://registry.npmjs.org/@types/debug/-/debug-4.1.7.tgz",
+ "integrity": "sha512-9AonUzyTjXXhEOa0DnqpzZi6VHlqKMswga9EXjpXnnqxwLtdvPPtlO8evrI5D9S6asFRCQ6v+wpiUKbw+vKqyg==",
+ "requires": {
+ "@types/ms": "*"
+ }
+ },
+ "@types/json-schema": {
+ "version": "7.0.11",
+ "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.11.tgz",
+ "integrity": "sha512-wOuvG1SN4Us4rez+tylwwwCV1psiNVOkJeM3AUWUNWg/jDQY2+HE/444y5gc+jBmRqASOm2Oeh5c1axHobwRKQ=="
+ },
+ "@types/mdast": {
+ "version": "3.0.10",
+ "resolved": "https://registry.npmjs.org/@types/mdast/-/mdast-3.0.10.tgz",
+ "integrity": "sha512-W864tg/Osz1+9f4lrGTZpCSO5/z4608eUp19tbozkq2HJK6i3z1kT0H9tlADXuYIb1YYOBByU4Jsqkk75q48qA==",
+ "requires": {
+ "@types/unist": "*"
+ }
+ },
+ "@types/ms": {
+ "version": "0.7.31",
+ "resolved": "https://registry.npmjs.org/@types/ms/-/ms-0.7.31.tgz",
+ "integrity": "sha512-iiUgKzV9AuaEkZqkOLDIvlQiL6ltuZd9tGcW3gwpnX8JbuiuhFlEGmmFXEXkN50Cvq7Os88IY2v0dkDqXYWVgA=="
+ },
+ "@types/unist": {
+ "version": "2.0.6",
+ "resolved": "https://registry.npmjs.org/@types/unist/-/unist-2.0.6.tgz",
+ "integrity": "sha512-PBjIUxZHOuj0R15/xuwJYjFi+KZdNFrehocChv4g5hu6aFroHue8m0lBP0POdK2nKzbw0cgV1mws8+V/JAcEkQ=="
+ },
+ "ansi-regex": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz",
+ "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ=="
+ },
+ "ansi-styles": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz",
+ "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==",
+ "requires": {
+ "color-convert": "^2.0.1"
+ }
+ },
+ "argparse": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz",
+ "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q=="
+ },
+ "bail": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/bail/-/bail-2.0.2.tgz",
+ "integrity": "sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw=="
+ },
+ "big.js": {
+ "version": "6.2.1",
+ "resolved": "https://registry.npmjs.org/big.js/-/big.js-6.2.1.tgz",
+ "integrity": "sha512-bCtHMwL9LeDIozFn+oNhhFoq+yQ3BNdnsLSASUxLciOb1vgvpHsIO1dsENiGMgbb4SkP5TrzWzRiLddn8ahVOQ=="
+ },
+ "ccount": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/ccount/-/ccount-2.0.1.tgz",
+ "integrity": "sha512-eyrF0jiFpY+3drT6383f1qhkbGsLSifNAjA61IUjZjmLCWjItY6LB9ft9YhoDgwfmclB2zhu51Lc7+95b8NRAg=="
+ },
+ "centra": {
+ "version": "2.6.0",
+ "resolved": "https://registry.npmjs.org/centra/-/centra-2.6.0.tgz",
+ "integrity": "sha512-dgh+YleemrT8u85QL11Z6tYhegAs3MMxsaWAq/oXeAmYJ7VxL3SI9TZtnfaEvNDMAPolj25FXIb3S+HCI4wQaQ=="
+ },
+ "character-entities": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/character-entities/-/character-entities-2.0.2.tgz",
+ "integrity": "sha512-shx7oQ0Awen/BRIdkjkvz54PnEEI/EjwXDSIZp86/KKdbafHh1Df/RYGBhn4hbe2+uKC9FnT5UCEdyPz3ai9hQ=="
+ },
+ "cliui": {
+ "version": "8.0.1",
+ "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz",
+ "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==",
+ "requires": {
+ "string-width": "^4.2.0",
+ "strip-ansi": "^6.0.1",
+ "wrap-ansi": "^7.0.0"
+ }
+ },
+ "color-convert": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
+ "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
+ "requires": {
+ "color-name": "~1.1.4"
+ }
+ },
+ "color-name": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz",
+ "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA=="
+ },
+ "colorette": {
+ "version": "2.0.19",
+ "resolved": "https://registry.npmjs.org/colorette/-/colorette-2.0.19.tgz",
+ "integrity": "sha512-3tlv/dIP7FWvj3BsbHrGLJ6l/oKh1O3TcgBqMn+yyCagOxc23fyzDS6HypQbgxWbkpDnf52p1LuR4eWDQ/K9WQ=="
+ },
+ "cuint": {
+ "version": "0.2.2",
+ "resolved": "https://registry.npmjs.org/cuint/-/cuint-0.2.2.tgz",
+ "integrity": "sha512-d4ZVpCW31eWwCMe1YT3ur7mUDnTXbgwyzaL320DrcRT45rfjYxkt5QWLrmOJ+/UEAI2+fQgKe/fCjR8l4TpRgw=="
+ },
+ "debug": {
+ "version": "4.3.4",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz",
+ "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==",
+ "requires": {
+ "ms": "2.1.2"
+ }
+ },
+ "decode-named-character-reference": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/decode-named-character-reference/-/decode-named-character-reference-1.0.2.tgz",
+ "integrity": "sha512-O8x12RzrUF8xyVcY0KJowWsmaJxQbmy0/EtnNtHRpsOcT7dFk5W598coHqBVpmWo1oQQfsCqfCmkZN5DJrZVdg==",
+ "requires": {
+ "character-entities": "^2.0.0"
+ }
+ },
+ "dequal": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/dequal/-/dequal-2.0.3.tgz",
+ "integrity": "sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA=="
+ },
+ "diff": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/diff/-/diff-5.1.0.tgz",
+ "integrity": "sha512-D+mk+qE8VC/PAUrlAU34N+VfXev0ghe5ywmpqrawphmVZc1bEfn56uo9qpyGp1p4xpzOHkSW4ztBd6L7Xx4ACw=="
+ },
+ "emoji-regex": {
+ "version": "8.0.0",
+ "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz",
+ "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A=="
+ },
+ "es2015-i18n-tag": {
+ "version": "1.6.1",
+ "resolved": "https://registry.npmjs.org/es2015-i18n-tag/-/es2015-i18n-tag-1.6.1.tgz",
+ "integrity": "sha512-MYoh9p+JTkgnzBh0MEBON6xUyzdmwT6wzsmmFJvZujGSXiI2kM+3XvFl6+AcIO2eeL6VWgtX9szSiDTMwDxyYA=="
+ },
+ "escalade": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz",
+ "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw=="
+ },
+ "escape-string-regexp": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-5.0.0.tgz",
+ "integrity": "sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw=="
+ },
+ "extend": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz",
+ "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g=="
+ },
+ "fastestsmallesttextencoderdecoder": {
+ "version": "1.0.22",
+ "resolved": "https://registry.npmjs.org/fastestsmallesttextencoderdecoder/-/fastestsmallesttextencoderdecoder-1.0.22.tgz",
+ "integrity": "sha512-Pb8d48e+oIuY4MaM64Cd7OW1gt4nxCHs7/ddPPZ/Ic3sg8yVGM7O9wDvZ7us6ScaUupzM+pfBolwtYhN1IxBIw=="
+ },
+ "ferrum": {
+ "version": "1.9.4",
+ "resolved": "https://registry.npmjs.org/ferrum/-/ferrum-1.9.4.tgz",
+ "integrity": "sha512-ooNerLoIht/dK4CQJux93z/hnt9JysrXniJCI3r6YRgmHeXC57EJ8XaTCT1Gm8LfhIAeWxyJA0O7d/W3pqDYRg==",
+ "requires": {
+ "fastestsmallesttextencoderdecoder": "1.0.22",
+ "lodash.isplainobject": "4.0.6",
+ "xxhashjs": "0.2.2"
+ }
+ },
+ "fs-extra": {
+ "version": "11.1.0",
+ "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-11.1.0.tgz",
+ "integrity": "sha512-0rcTq621PD5jM/e0a3EJoGC/1TC5ZBCERW82LQuwfGnCa1V8w7dpYH1yNu+SLb6E5dkeCBzKEyLGlFrnr+dUyw==",
+ "requires": {
+ "graceful-fs": "^4.2.0",
+ "jsonfile": "^6.0.1",
+ "universalify": "^2.0.0"
+ }
+ },
+ "get-caller-file": {
+ "version": "2.0.5",
+ "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz",
+ "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg=="
+ },
+ "github-slugger": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/github-slugger/-/github-slugger-2.0.0.tgz",
+ "integrity": "sha512-IaOQ9puYtjrkq7Y0Ygl9KDZnrf/aiUJYUpVf89y8kyaxbRG7Y1SrX/jaumrv81vc61+kiMempujsM3Yw7w5qcw=="
+ },
+ "graceful-fs": {
+ "version": "4.2.10",
+ "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.10.tgz",
+ "integrity": "sha512-9ByhssR2fPVsNZj478qUUbKfmL0+t5BDVyjShtyZZLiK7ZDAArFFfopyOTj0M05wE2tJPisA4iTnnXl2YoPvOA=="
+ },
+ "is-buffer": {
+ "version": "2.0.5",
+ "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-2.0.5.tgz",
+ "integrity": "sha512-i2R6zNFDwgEHJyQUtJEk0XFi1i0dPFn/oqjK3/vPCcDeJvW5NQ83V8QbicfF1SupOaB0h8ntgBC2YiE7dfyctQ=="
+ },
+ "is-fullwidth-code-point": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz",
+ "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg=="
+ },
+ "is-plain-obj": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-4.1.0.tgz",
+ "integrity": "sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg=="
+ },
+ "js-yaml": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz",
+ "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==",
+ "requires": {
+ "argparse": "^2.0.1"
+ }
+ },
+ "json-schema": {
+ "version": "0.4.0",
+ "resolved": "https://registry.npmjs.org/json-schema/-/json-schema-0.4.0.tgz",
+ "integrity": "sha512-es94M3nTIfsEPisRafak+HDLfHXnKBhV3vU5eqPcS3flIWqcxJWgXHXiey3YrpaNsanY5ei1VoYEbOzijuq9BA=="
+ },
+ "jsonfile": {
+ "version": "6.1.0",
+ "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz",
+ "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==",
+ "requires": {
+ "graceful-fs": "^4.1.6",
+ "universalify": "^2.0.0"
+ }
+ },
+ "kleur": {
+ "version": "4.1.5",
+ "resolved": "https://registry.npmjs.org/kleur/-/kleur-4.1.5.tgz",
+ "integrity": "sha512-o+NO+8WrRiQEE4/7nwRJhN1HWpVmJm511pBHUxPLtp0BUISzlBplORYSmTclCnJvQq2tKu/sgl3xVpkc7ZWuQQ=="
+ },
+ "lodash.isplainobject": {
+ "version": "4.0.6",
+ "resolved": "https://registry.npmjs.org/lodash.isplainobject/-/lodash.isplainobject-4.0.6.tgz",
+ "integrity": "sha512-oSXzaWypCMHkPC3NvBEaPHf0KsA5mvPrOPgQWDsbg8n7orZ290M0BmC/jgRZ4vcJ6DTAhjrsSYgdsW/F+MFOBA=="
+ },
+ "longest-streak": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/longest-streak/-/longest-streak-3.1.0.tgz",
+ "integrity": "sha512-9Ri+o0JYgehTaVBBDoMqIl8GXtbWg711O3srftcHhZ0dqnETqLaoIK0x17fUw9rFSlK/0NlsKe0Ahhyl5pXE2g=="
+ },
+ "markdown-table": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/markdown-table/-/markdown-table-3.0.3.tgz",
+ "integrity": "sha512-Z1NL3Tb1M9wH4XESsCDEksWoKTdlUafKc4pt0GRwjUyXaCFZ+dc3g2erqB6zm3szA2IUSi7VnPI+o/9jnxh9hw=="
+ },
+ "matchit": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/matchit/-/matchit-1.1.0.tgz",
+ "integrity": "sha512-+nGYoOlfHmxe5BW5tE0EMJppXEwdSf8uBA1GTZC7Q77kbT35+VKLYJMzVNWCHSsga1ps1tPYFtFyvxvKzWVmMA==",
+ "requires": {
+ "@arr/every": "^1.0.0"
+ }
+ },
+ "mdast-builder": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/mdast-builder/-/mdast-builder-1.1.1.tgz",
+ "integrity": "sha512-a3KBk/LmYD6wKsWi8WJrGU/rXR4yuF4Men0JO0z6dSZCm5FrXXWTRDjqK0vGSqa+1M6p9edeuypZAZAzSehTUw==",
+ "requires": {
+ "@types/unist": "^2.0.3"
+ }
+ },
+ "mdast-util-find-and-replace": {
+ "version": "2.2.1",
+ "resolved": "https://registry.npmjs.org/mdast-util-find-and-replace/-/mdast-util-find-and-replace-2.2.1.tgz",
+ "integrity": "sha512-SobxkQXFAdd4b5WmEakmkVoh18icjQRxGy5OWTCzgsLRm1Fu/KCtwD1HIQSsmq5ZRjVH0Ehwg6/Fn3xIUk+nKw==",
+ "requires": {
+ "escape-string-regexp": "^5.0.0",
+ "unist-util-is": "^5.0.0",
+ "unist-util-visit-parents": "^5.0.0"
+ }
+ },
+ "mdast-util-from-markdown": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/mdast-util-from-markdown/-/mdast-util-from-markdown-1.2.0.tgz",
+ "integrity": "sha512-iZJyyvKD1+K7QX1b5jXdE7Sc5dtoTry1vzV28UZZe8Z1xVnB/czKntJ7ZAkG0tANqRnBF6p3p7GpU1y19DTf2Q==",
+ "requires": {
+ "@types/mdast": "^3.0.0",
+ "@types/unist": "^2.0.0",
+ "decode-named-character-reference": "^1.0.0",
+ "mdast-util-to-string": "^3.1.0",
+ "micromark": "^3.0.0",
+ "micromark-util-decode-numeric-character-reference": "^1.0.0",
+ "micromark-util-decode-string": "^1.0.0",
+ "micromark-util-normalize-identifier": "^1.0.0",
+ "micromark-util-symbol": "^1.0.0",
+ "micromark-util-types": "^1.0.0",
+ "unist-util-stringify-position": "^3.0.0",
+ "uvu": "^0.5.0"
+ }
+ },
+ "mdast-util-gfm": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/mdast-util-gfm/-/mdast-util-gfm-2.0.1.tgz",
+ "integrity": "sha512-42yHBbfWIFisaAfV1eixlabbsa6q7vHeSPY+cg+BBjX51M8xhgMacqH9g6TftB/9+YkcI0ooV4ncfrJslzm/RQ==",
+ "requires": {
+ "mdast-util-from-markdown": "^1.0.0",
+ "mdast-util-gfm-autolink-literal": "^1.0.0",
+ "mdast-util-gfm-footnote": "^1.0.0",
+ "mdast-util-gfm-strikethrough": "^1.0.0",
+ "mdast-util-gfm-table": "^1.0.0",
+ "mdast-util-gfm-task-list-item": "^1.0.0",
+ "mdast-util-to-markdown": "^1.0.0"
+ }
+ },
+ "mdast-util-gfm-autolink-literal": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/mdast-util-gfm-autolink-literal/-/mdast-util-gfm-autolink-literal-1.0.2.tgz",
+ "integrity": "sha512-FzopkOd4xTTBeGXhXSBU0OCDDh5lUj2rd+HQqG92Ld+jL4lpUfgX2AT2OHAVP9aEeDKp7G92fuooSZcYJA3cRg==",
+ "requires": {
+ "@types/mdast": "^3.0.0",
+ "ccount": "^2.0.0",
+ "mdast-util-find-and-replace": "^2.0.0",
+ "micromark-util-character": "^1.0.0"
+ }
+ },
+ "mdast-util-gfm-footnote": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/mdast-util-gfm-footnote/-/mdast-util-gfm-footnote-1.0.1.tgz",
+ "integrity": "sha512-p+PrYlkw9DeCRkTVw1duWqPRHX6Ywh2BNKJQcZbCwAuP/59B0Lk9kakuAd7KbQprVO4GzdW8eS5++A9PUSqIyw==",
+ "requires": {
+ "@types/mdast": "^3.0.0",
+ "mdast-util-to-markdown": "^1.3.0",
+ "micromark-util-normalize-identifier": "^1.0.0"
+ }
+ },
+ "mdast-util-gfm-strikethrough": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/mdast-util-gfm-strikethrough/-/mdast-util-gfm-strikethrough-1.0.2.tgz",
+ "integrity": "sha512-T/4DVHXcujH6jx1yqpcAYYwd+z5lAYMw4Ls6yhTfbMMtCt0PHY4gEfhW9+lKsLBtyhUGKRIzcUA2FATVqnvPDA==",
+ "requires": {
+ "@types/mdast": "^3.0.0",
+ "mdast-util-to-markdown": "^1.3.0"
+ }
+ },
+ "mdast-util-gfm-table": {
+ "version": "1.0.6",
+ "resolved": "https://registry.npmjs.org/mdast-util-gfm-table/-/mdast-util-gfm-table-1.0.6.tgz",
+ "integrity": "sha512-uHR+fqFq3IvB3Rd4+kzXW8dmpxUhvgCQZep6KdjsLK4O6meK5dYZEayLtIxNus1XO3gfjfcIFe8a7L0HZRGgag==",
+ "requires": {
+ "@types/mdast": "^3.0.0",
+ "markdown-table": "^3.0.0",
+ "mdast-util-from-markdown": "^1.0.0",
+ "mdast-util-to-markdown": "^1.3.0"
+ }
+ },
+ "mdast-util-gfm-task-list-item": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/mdast-util-gfm-task-list-item/-/mdast-util-gfm-task-list-item-1.0.1.tgz",
+ "integrity": "sha512-KZ4KLmPdABXOsfnM6JHUIjxEvcx2ulk656Z/4Balw071/5qgnhz+H1uGtf2zIGnrnvDC8xR4Fj9uKbjAFGNIeA==",
+ "requires": {
+ "@types/mdast": "^3.0.0",
+ "mdast-util-to-markdown": "^1.3.0"
+ }
+ },
+ "mdast-util-to-markdown": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/mdast-util-to-markdown/-/mdast-util-to-markdown-1.4.0.tgz",
+ "integrity": "sha512-IjXARf/O8VGx/pc5SZ7syfydq1DYL9vd92orsG5U0b4GNCmAvXzu+n7sbzfIKrXwB0AVrYk3NV2kXl0AIi9LCA==",
+ "requires": {
+ "@types/mdast": "^3.0.0",
+ "@types/unist": "^2.0.0",
+ "longest-streak": "^3.0.0",
+ "mdast-util-to-string": "^3.0.0",
+ "micromark-util-decode-string": "^1.0.0",
+ "unist-util-visit": "^4.0.0",
+ "zwitch": "^2.0.0"
+ }
+ },
+ "mdast-util-to-string": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/mdast-util-to-string/-/mdast-util-to-string-3.1.0.tgz",
+ "integrity": "sha512-n4Vypz/DZgwo0iMHLQL49dJzlp7YtAJP+N07MZHpjPf/5XJuHUWstviF4Mn2jEiR/GNmtnRRqnwsXExk3igfFA=="
+ },
+ "micromark": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/micromark/-/micromark-3.1.0.tgz",
+ "integrity": "sha512-6Mj0yHLdUZjHnOPgr5xfWIMqMWS12zDN6iws9SLuSz76W8jTtAv24MN4/CL7gJrl5vtxGInkkqDv/JIoRsQOvA==",
+ "requires": {
+ "@types/debug": "^4.0.0",
+ "debug": "^4.0.0",
+ "decode-named-character-reference": "^1.0.0",
+ "micromark-core-commonmark": "^1.0.1",
+ "micromark-factory-space": "^1.0.0",
+ "micromark-util-character": "^1.0.0",
+ "micromark-util-chunked": "^1.0.0",
+ "micromark-util-combine-extensions": "^1.0.0",
+ "micromark-util-decode-numeric-character-reference": "^1.0.0",
+ "micromark-util-encode": "^1.0.0",
+ "micromark-util-normalize-identifier": "^1.0.0",
+ "micromark-util-resolve-all": "^1.0.0",
+ "micromark-util-sanitize-uri": "^1.0.0",
+ "micromark-util-subtokenize": "^1.0.0",
+ "micromark-util-symbol": "^1.0.0",
+ "micromark-util-types": "^1.0.1",
+ "uvu": "^0.5.0"
+ }
+ },
+ "micromark-core-commonmark": {
+ "version": "1.0.6",
+ "resolved": "https://registry.npmjs.org/micromark-core-commonmark/-/micromark-core-commonmark-1.0.6.tgz",
+ "integrity": "sha512-K+PkJTxqjFfSNkfAhp4GB+cZPfQd6dxtTXnf+RjZOV7T4EEXnvgzOcnp+eSTmpGk9d1S9sL6/lqrgSNn/s0HZA==",
+ "requires": {
+ "decode-named-character-reference": "^1.0.0",
+ "micromark-factory-destination": "^1.0.0",
+ "micromark-factory-label": "^1.0.0",
+ "micromark-factory-space": "^1.0.0",
+ "micromark-factory-title": "^1.0.0",
+ "micromark-factory-whitespace": "^1.0.0",
+ "micromark-util-character": "^1.0.0",
+ "micromark-util-chunked": "^1.0.0",
+ "micromark-util-classify-character": "^1.0.0",
+ "micromark-util-html-tag-name": "^1.0.0",
+ "micromark-util-normalize-identifier": "^1.0.0",
+ "micromark-util-resolve-all": "^1.0.0",
+ "micromark-util-subtokenize": "^1.0.0",
+ "micromark-util-symbol": "^1.0.0",
+ "micromark-util-types": "^1.0.1",
+ "uvu": "^0.5.0"
+ }
+ },
+ "micromark-extension-gfm": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-extension-gfm/-/micromark-extension-gfm-2.0.1.tgz",
+ "integrity": "sha512-p2sGjajLa0iYiGQdT0oelahRYtMWvLjy8J9LOCxzIQsllMCGLbsLW+Nc+N4vi02jcRJvedVJ68cjelKIO6bpDA==",
+ "requires": {
+ "micromark-extension-gfm-autolink-literal": "^1.0.0",
+ "micromark-extension-gfm-footnote": "^1.0.0",
+ "micromark-extension-gfm-strikethrough": "^1.0.0",
+ "micromark-extension-gfm-table": "^1.0.0",
+ "micromark-extension-gfm-tagfilter": "^1.0.0",
+ "micromark-extension-gfm-task-list-item": "^1.0.0",
+ "micromark-util-combine-extensions": "^1.0.0",
+ "micromark-util-types": "^1.0.0"
+ }
+ },
+ "micromark-extension-gfm-autolink-literal": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/micromark-extension-gfm-autolink-literal/-/micromark-extension-gfm-autolink-literal-1.0.3.tgz",
+ "integrity": "sha512-i3dmvU0htawfWED8aHMMAzAVp/F0Z+0bPh3YrbTPPL1v4YAlCZpy5rBO5p0LPYiZo0zFVkoYh7vDU7yQSiCMjg==",
+ "requires": {
+ "micromark-util-character": "^1.0.0",
+ "micromark-util-sanitize-uri": "^1.0.0",
+ "micromark-util-symbol": "^1.0.0",
+ "micromark-util-types": "^1.0.0",
+ "uvu": "^0.5.0"
+ }
+ },
+ "micromark-extension-gfm-footnote": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/micromark-extension-gfm-footnote/-/micromark-extension-gfm-footnote-1.0.4.tgz",
+ "integrity": "sha512-E/fmPmDqLiMUP8mLJ8NbJWJ4bTw6tS+FEQS8CcuDtZpILuOb2kjLqPEeAePF1djXROHXChM/wPJw0iS4kHCcIg==",
+ "requires": {
+ "micromark-core-commonmark": "^1.0.0",
+ "micromark-factory-space": "^1.0.0",
+ "micromark-util-character": "^1.0.0",
+ "micromark-util-normalize-identifier": "^1.0.0",
+ "micromark-util-sanitize-uri": "^1.0.0",
+ "micromark-util-symbol": "^1.0.0",
+ "micromark-util-types": "^1.0.0",
+ "uvu": "^0.5.0"
+ }
+ },
+ "micromark-extension-gfm-strikethrough": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/micromark-extension-gfm-strikethrough/-/micromark-extension-gfm-strikethrough-1.0.4.tgz",
+ "integrity": "sha512-/vjHU/lalmjZCT5xt7CcHVJGq8sYRm80z24qAKXzaHzem/xsDYb2yLL+NNVbYvmpLx3O7SYPuGL5pzusL9CLIQ==",
+ "requires": {
+ "micromark-util-chunked": "^1.0.0",
+ "micromark-util-classify-character": "^1.0.0",
+ "micromark-util-resolve-all": "^1.0.0",
+ "micromark-util-symbol": "^1.0.0",
+ "micromark-util-types": "^1.0.0",
+ "uvu": "^0.5.0"
+ }
+ },
+ "micromark-extension-gfm-table": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/micromark-extension-gfm-table/-/micromark-extension-gfm-table-1.0.5.tgz",
+ "integrity": "sha512-xAZ8J1X9W9K3JTJTUL7G6wSKhp2ZYHrFk5qJgY/4B33scJzE2kpfRL6oiw/veJTbt7jiM/1rngLlOKPWr1G+vg==",
+ "requires": {
+ "micromark-factory-space": "^1.0.0",
+ "micromark-util-character": "^1.0.0",
+ "micromark-util-symbol": "^1.0.0",
+ "micromark-util-types": "^1.0.0",
+ "uvu": "^0.5.0"
+ }
+ },
+ "micromark-extension-gfm-tagfilter": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-extension-gfm-tagfilter/-/micromark-extension-gfm-tagfilter-1.0.1.tgz",
+ "integrity": "sha512-Ty6psLAcAjboRa/UKUbbUcwjVAv5plxmpUTy2XC/3nJFL37eHej8jrHrRzkqcpipJliuBH30DTs7+3wqNcQUVA==",
+ "requires": {
+ "micromark-util-types": "^1.0.0"
+ }
+ },
+ "micromark-extension-gfm-task-list-item": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/micromark-extension-gfm-task-list-item/-/micromark-extension-gfm-task-list-item-1.0.3.tgz",
+ "integrity": "sha512-PpysK2S1Q/5VXi72IIapbi/jliaiOFzv7THH4amwXeYXLq3l1uo8/2Be0Ac1rEwK20MQEsGH2ltAZLNY2KI/0Q==",
+ "requires": {
+ "micromark-factory-space": "^1.0.0",
+ "micromark-util-character": "^1.0.0",
+ "micromark-util-symbol": "^1.0.0",
+ "micromark-util-types": "^1.0.0",
+ "uvu": "^0.5.0"
+ }
+ },
+ "micromark-factory-destination": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/micromark-factory-destination/-/micromark-factory-destination-1.0.0.tgz",
+ "integrity": "sha512-eUBA7Rs1/xtTVun9TmV3gjfPz2wEwgK5R5xcbIM5ZYAtvGF6JkyaDsj0agx8urXnO31tEO6Ug83iVH3tdedLnw==",
+ "requires": {
+ "micromark-util-character": "^1.0.0",
+ "micromark-util-symbol": "^1.0.0",
+ "micromark-util-types": "^1.0.0"
+ }
+ },
+ "micromark-factory-label": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/micromark-factory-label/-/micromark-factory-label-1.0.2.tgz",
+ "integrity": "sha512-CTIwxlOnU7dEshXDQ+dsr2n+yxpP0+fn271pu0bwDIS8uqfFcumXpj5mLn3hSC8iw2MUr6Gx8EcKng1dD7i6hg==",
+ "requires": {
+ "micromark-util-character": "^1.0.0",
+ "micromark-util-symbol": "^1.0.0",
+ "micromark-util-types": "^1.0.0",
+ "uvu": "^0.5.0"
+ }
+ },
+ "micromark-factory-space": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/micromark-factory-space/-/micromark-factory-space-1.0.0.tgz",
+ "integrity": "sha512-qUmqs4kj9a5yBnk3JMLyjtWYN6Mzfcx8uJfi5XAveBniDevmZasdGBba5b4QsvRcAkmvGo5ACmSUmyGiKTLZew==",
+ "requires": {
+ "micromark-util-character": "^1.0.0",
+ "micromark-util-types": "^1.0.0"
+ }
+ },
+ "micromark-factory-title": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/micromark-factory-title/-/micromark-factory-title-1.0.2.tgz",
+ "integrity": "sha512-zily+Nr4yFqgMGRKLpTVsNl5L4PMu485fGFDOQJQBl2NFpjGte1e86zC0da93wf97jrc4+2G2GQudFMHn3IX+A==",
+ "requires": {
+ "micromark-factory-space": "^1.0.0",
+ "micromark-util-character": "^1.0.0",
+ "micromark-util-symbol": "^1.0.0",
+ "micromark-util-types": "^1.0.0",
+ "uvu": "^0.5.0"
+ }
+ },
+ "micromark-factory-whitespace": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/micromark-factory-whitespace/-/micromark-factory-whitespace-1.0.0.tgz",
+ "integrity": "sha512-Qx7uEyahU1lt1RnsECBiuEbfr9INjQTGa6Err+gF3g0Tx4YEviPbqqGKNv/NrBaE7dVHdn1bVZKM/n5I/Bak7A==",
+ "requires": {
+ "micromark-factory-space": "^1.0.0",
+ "micromark-util-character": "^1.0.0",
+ "micromark-util-symbol": "^1.0.0",
+ "micromark-util-types": "^1.0.0"
+ }
+ },
+ "micromark-util-character": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/micromark-util-character/-/micromark-util-character-1.1.0.tgz",
+ "integrity": "sha512-agJ5B3unGNJ9rJvADMJ5ZiYjBRyDpzKAOk01Kpi1TKhlT1APx3XZk6eN7RtSz1erbWHC2L8T3xLZ81wdtGRZzg==",
+ "requires": {
+ "micromark-util-symbol": "^1.0.0",
+ "micromark-util-types": "^1.0.0"
+ }
+ },
+ "micromark-util-chunked": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/micromark-util-chunked/-/micromark-util-chunked-1.0.0.tgz",
+ "integrity": "sha512-5e8xTis5tEZKgesfbQMKRCyzvffRRUX+lK/y+DvsMFdabAicPkkZV6gO+FEWi9RfuKKoxxPwNL+dFF0SMImc1g==",
+ "requires": {
+ "micromark-util-symbol": "^1.0.0"
+ }
+ },
+ "micromark-util-classify-character": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/micromark-util-classify-character/-/micromark-util-classify-character-1.0.0.tgz",
+ "integrity": "sha512-F8oW2KKrQRb3vS5ud5HIqBVkCqQi224Nm55o5wYLzY/9PwHGXC01tr3d7+TqHHz6zrKQ72Okwtvm/xQm6OVNZA==",
+ "requires": {
+ "micromark-util-character": "^1.0.0",
+ "micromark-util-symbol": "^1.0.0",
+ "micromark-util-types": "^1.0.0"
+ }
+ },
+ "micromark-util-combine-extensions": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/micromark-util-combine-extensions/-/micromark-util-combine-extensions-1.0.0.tgz",
+ "integrity": "sha512-J8H058vFBdo/6+AsjHp2NF7AJ02SZtWaVUjsayNFeAiydTxUwViQPxN0Hf8dp4FmCQi0UUFovFsEyRSUmFH3MA==",
+ "requires": {
+ "micromark-util-chunked": "^1.0.0",
+ "micromark-util-types": "^1.0.0"
+ }
+ },
+ "micromark-util-decode-numeric-character-reference": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/micromark-util-decode-numeric-character-reference/-/micromark-util-decode-numeric-character-reference-1.0.0.tgz",
+ "integrity": "sha512-OzO9AI5VUtrTD7KSdagf4MWgHMtET17Ua1fIpXTpuhclCqD8egFWo85GxSGvxgkGS74bEahvtM0WP0HjvV0e4w==",
+ "requires": {
+ "micromark-util-symbol": "^1.0.0"
+ }
+ },
+ "micromark-util-decode-string": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/micromark-util-decode-string/-/micromark-util-decode-string-1.0.2.tgz",
+ "integrity": "sha512-DLT5Ho02qr6QWVNYbRZ3RYOSSWWFuH3tJexd3dgN1odEuPNxCngTCXJum7+ViRAd9BbdxCvMToPOD/IvVhzG6Q==",
+ "requires": {
+ "decode-named-character-reference": "^1.0.0",
+ "micromark-util-character": "^1.0.0",
+ "micromark-util-decode-numeric-character-reference": "^1.0.0",
+ "micromark-util-symbol": "^1.0.0"
+ }
+ },
+ "micromark-util-encode": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-util-encode/-/micromark-util-encode-1.0.1.tgz",
+ "integrity": "sha512-U2s5YdnAYexjKDel31SVMPbfi+eF8y1U4pfiRW/Y8EFVCy/vgxk/2wWTxzcqE71LHtCuCzlBDRU2a5CQ5j+mQA=="
+ },
+ "micromark-util-html-tag-name": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/micromark-util-html-tag-name/-/micromark-util-html-tag-name-1.1.0.tgz",
+ "integrity": "sha512-BKlClMmYROy9UiV03SwNmckkjn8QHVaWkqoAqzivabvdGcwNGMMMH/5szAnywmsTBUzDsU57/mFi0sp4BQO6dA=="
+ },
+ "micromark-util-normalize-identifier": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/micromark-util-normalize-identifier/-/micromark-util-normalize-identifier-1.0.0.tgz",
+ "integrity": "sha512-yg+zrL14bBTFrQ7n35CmByWUTFsgst5JhA4gJYoty4Dqzj4Z4Fr/DHekSS5aLfH9bdlfnSvKAWsAgJhIbogyBg==",
+ "requires": {
+ "micromark-util-symbol": "^1.0.0"
+ }
+ },
+ "micromark-util-resolve-all": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/micromark-util-resolve-all/-/micromark-util-resolve-all-1.0.0.tgz",
+ "integrity": "sha512-CB/AGk98u50k42kvgaMM94wzBqozSzDDaonKU7P7jwQIuH2RU0TeBqGYJz2WY1UdihhjweivStrJ2JdkdEmcfw==",
+ "requires": {
+ "micromark-util-types": "^1.0.0"
+ }
+ },
+ "micromark-util-sanitize-uri": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/micromark-util-sanitize-uri/-/micromark-util-sanitize-uri-1.1.0.tgz",
+ "integrity": "sha512-RoxtuSCX6sUNtxhbmsEFQfWzs8VN7cTctmBPvYivo98xb/kDEoTCtJQX5wyzIYEmk/lvNFTat4hL8oW0KndFpg==",
+ "requires": {
+ "micromark-util-character": "^1.0.0",
+ "micromark-util-encode": "^1.0.0",
+ "micromark-util-symbol": "^1.0.0"
+ }
+ },
+ "micromark-util-subtokenize": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/micromark-util-subtokenize/-/micromark-util-subtokenize-1.0.2.tgz",
+ "integrity": "sha512-d90uqCnXp/cy4G881Ub4psE57Sf8YD0pim9QdjCRNjfas2M1u6Lbt+XZK9gnHL2XFhnozZiEdCa9CNfXSfQ6xA==",
+ "requires": {
+ "micromark-util-chunked": "^1.0.0",
+ "micromark-util-symbol": "^1.0.0",
+ "micromark-util-types": "^1.0.0",
+ "uvu": "^0.5.0"
+ }
+ },
+ "micromark-util-symbol": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/micromark-util-symbol/-/micromark-util-symbol-1.0.1.tgz",
+ "integrity": "sha512-oKDEMK2u5qqAptasDAwWDXq0tG9AssVwAx3E9bBF3t/shRIGsWIRG+cGafs2p/SnDSOecnt6hZPCE2o6lHfFmQ=="
+ },
+ "micromark-util-types": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/micromark-util-types/-/micromark-util-types-1.0.2.tgz",
+ "integrity": "sha512-DCfg/T8fcrhrRKTPjRrw/5LLvdGV7BHySf/1LOZx7TzWZdYRjogNtyNq885z3nNallwr3QUKARjqvHqX1/7t+w=="
+ },
+ "mri": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/mri/-/mri-1.2.0.tgz",
+ "integrity": "sha512-tzzskb3bG8LvYGFF/mDTpq3jpI6Q9wc3LEmBaghu+DdCssd1FakN7Bc0hVNmEyGq1bq3RgfkCb3cmQLpNPOroA=="
+ },
+ "ms": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz",
+ "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w=="
+ },
+ "phin": {
+ "version": "3.7.0",
+ "resolved": "https://registry.npmjs.org/phin/-/phin-3.7.0.tgz",
+ "integrity": "sha512-DqnVNrpYhKGBZppNKprD+UJylMeEKOZxHgPB+ZP6mGzf3uA2uox4Ep9tUm+rUc8WLIdHT3HcAE4X8fhwQA9JKg==",
+ "requires": {
+ "centra": "^2.6.0"
+ }
+ },
+ "picomatch": {
+ "version": "2.3.1",
+ "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz",
+ "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA=="
+ },
+ "polka": {
+ "version": "0.5.2",
+ "resolved": "https://registry.npmjs.org/polka/-/polka-0.5.2.tgz",
+ "integrity": "sha512-FVg3vDmCqP80tOrs+OeNlgXYmFppTXdjD5E7I4ET1NjvtNmQrb1/mJibybKkb/d4NA7YWAr1ojxuhpL3FHqdlw==",
+ "requires": {
+ "@polka/url": "^0.5.0",
+ "trouter": "^2.0.1"
+ }
+ },
+ "readdirp": {
+ "version": "3.6.0",
+ "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz",
+ "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==",
+ "requires": {
+ "picomatch": "^2.2.1"
+ }
+ },
+ "remark-gfm": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/remark-gfm/-/remark-gfm-3.0.1.tgz",
+ "integrity": "sha512-lEFDoi2PICJyNrACFOfDD3JlLkuSbOa5Wd8EPt06HUdptv8Gn0bxYTdbU/XXQ3swAPkEaGxxPN9cbnMHvVu1Ig==",
+ "requires": {
+ "@types/mdast": "^3.0.0",
+ "mdast-util-gfm": "^2.0.0",
+ "micromark-extension-gfm": "^2.0.0",
+ "unified": "^10.0.0"
+ }
+ },
+ "remark-parse": {
+ "version": "10.0.1",
+ "resolved": "https://registry.npmjs.org/remark-parse/-/remark-parse-10.0.1.tgz",
+ "integrity": "sha512-1fUyHr2jLsVOkhbvPRBJ5zTKZZyD6yZzYaWCS6BPBdQ8vEMBCH+9zNCDA6tET/zHCi/jLqjCWtlJZUPk+DbnFw==",
+ "requires": {
+ "@types/mdast": "^3.0.0",
+ "mdast-util-from-markdown": "^1.0.0",
+ "unified": "^10.0.0"
+ }
+ },
+ "remark-stringify": {
+ "version": "10.0.2",
+ "resolved": "https://registry.npmjs.org/remark-stringify/-/remark-stringify-10.0.2.tgz",
+ "integrity": "sha512-6wV3pvbPvHkbNnWB0wdDvVFHOe1hBRAx1Q/5g/EpH4RppAII6J8Gnwe7VbHuXaoKIF6LAg6ExTel/+kNqSQ7lw==",
+ "requires": {
+ "@types/mdast": "^3.0.0",
+ "mdast-util-to-markdown": "^1.0.0",
+ "unified": "^10.0.0"
+ }
+ },
+ "require-directory": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz",
+ "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q=="
+ },
+ "sade": {
+ "version": "1.8.1",
+ "resolved": "https://registry.npmjs.org/sade/-/sade-1.8.1.tgz",
+ "integrity": "sha512-xal3CZX1Xlo/k4ApwCFrHVACi9fBqJ7V+mwhBsuf/1IOKbBy098Fex+Wa/5QMubw09pSZ/u8EY8PWgevJsXp1A==",
+ "requires": {
+ "mri": "^1.1.0"
+ }
+ },
+ "string-width": {
+ "version": "4.2.3",
+ "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz",
+ "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==",
+ "requires": {
+ "emoji-regex": "^8.0.0",
+ "is-fullwidth-code-point": "^3.0.0",
+ "strip-ansi": "^6.0.1"
+ }
+ },
+ "strip-ansi": {
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz",
+ "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==",
+ "requires": {
+ "ansi-regex": "^5.0.1"
+ }
+ },
+ "trough": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/trough/-/trough-2.1.0.tgz",
+ "integrity": "sha512-AqTiAOLcj85xS7vQ8QkAV41hPDIJ71XJB4RCUrzo/1GM2CQwhkJGaf9Hgr7BOugMRpgGUrqRg/DrBDl4H40+8g=="
+ },
+ "trouter": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/trouter/-/trouter-2.0.1.tgz",
+ "integrity": "sha512-kr8SKKw94OI+xTGOkfsvwZQ8mWoikZDd2n8XZHjJVZUARZT+4/VV6cacRS6CLsH9bNm+HFIPU1Zx4CnNnb4qlQ==",
+ "requires": {
+ "matchit": "^1.0.0"
+ }
+ },
+ "unified": {
+ "version": "10.1.2",
+ "resolved": "https://registry.npmjs.org/unified/-/unified-10.1.2.tgz",
+ "integrity": "sha512-pUSWAi/RAnVy1Pif2kAoeWNBa3JVrx0MId2LASj8G+7AiHWoKZNTomq6LG326T68U7/e263X6fTdcXIy7XnF7Q==",
+ "requires": {
+ "@types/unist": "^2.0.0",
+ "bail": "^2.0.0",
+ "extend": "^3.0.0",
+ "is-buffer": "^2.0.0",
+ "is-plain-obj": "^4.0.0",
+ "trough": "^2.0.0",
+ "vfile": "^5.0.0"
+ }
+ },
+ "unist-util-inspect": {
+ "version": "7.0.1",
+ "resolved": "https://registry.npmjs.org/unist-util-inspect/-/unist-util-inspect-7.0.1.tgz",
+ "integrity": "sha512-gEPeSrsYXus8012VJ00p9uZC8D0iogtLLiHlBgvS61hU22KNKduQhMKezJm83viHlLf3TYS2y9SDEFglWPDMKw==",
+ "requires": {
+ "@types/unist": "^2.0.0"
+ }
+ },
+ "unist-util-is": {
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-5.1.1.tgz",
+ "integrity": "sha512-F5CZ68eYzuSvJjGhCLPL3cYx45IxkqXSetCcRgUXtbcm50X2L9oOWQlfUfDdAf+6Pd27YDblBfdtmsThXmwpbQ=="
+ },
+ "unist-util-stringify-position": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-3.0.2.tgz",
+ "integrity": "sha512-7A6eiDCs9UtjcwZOcCpM4aPII3bAAGv13E96IkawkOAW0OhH+yRxtY0lzo8KiHpzEMfH7Q+FizUmwp8Iqy5EWg==",
+ "requires": {
+ "@types/unist": "^2.0.0"
+ }
+ },
+ "unist-util-visit": {
+ "version": "4.1.1",
+ "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-4.1.1.tgz",
+ "integrity": "sha512-n9KN3WV9k4h1DxYR1LoajgN93wpEi/7ZplVe02IoB4gH5ctI1AaF2670BLHQYbwj+pY83gFtyeySFiyMHJklrg==",
+ "requires": {
+ "@types/unist": "^2.0.0",
+ "unist-util-is": "^5.0.0",
+ "unist-util-visit-parents": "^5.1.1"
+ }
+ },
+ "unist-util-visit-parents": {
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-5.1.1.tgz",
+ "integrity": "sha512-gks4baapT/kNRaWxuGkl5BIhoanZo7sC/cUT/JToSRNL1dYoXRFl75d++NkjYk4TAu2uv2Px+l8guMajogeuiw==",
+ "requires": {
+ "@types/unist": "^2.0.0",
+ "unist-util-is": "^5.0.0"
+ }
+ },
+ "universalify": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz",
+ "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ=="
+ },
+ "uvu": {
+ "version": "0.5.6",
+ "resolved": "https://registry.npmjs.org/uvu/-/uvu-0.5.6.tgz",
+ "integrity": "sha512-+g8ENReyr8YsOc6fv/NVJs2vFdHBnBNdfE49rshrTzDWOlUx4Gq7KOS2GD8eqhy2j+Ejq29+SbKH8yjkAqXqoA==",
+ "requires": {
+ "dequal": "^2.0.0",
+ "diff": "^5.0.0",
+ "kleur": "^4.0.3",
+ "sade": "^1.7.3"
+ }
+ },
+ "vfile": {
+ "version": "5.3.6",
+ "resolved": "https://registry.npmjs.org/vfile/-/vfile-5.3.6.tgz",
+ "integrity": "sha512-ADBsmerdGBs2WYckrLBEmuETSPyTD4TuLxTrw0DvjirxW1ra4ZwkbzG8ndsv3Q57smvHxo677MHaQrY9yxH8cA==",
+ "requires": {
+ "@types/unist": "^2.0.0",
+ "is-buffer": "^2.0.0",
+ "unist-util-stringify-position": "^3.0.0",
+ "vfile-message": "^3.0.0"
+ }
+ },
+ "vfile-message": {
+ "version": "3.1.3",
+ "resolved": "https://registry.npmjs.org/vfile-message/-/vfile-message-3.1.3.tgz",
+ "integrity": "sha512-0yaU+rj2gKAyEk12ffdSbBfjnnj+b1zqTBv3OQCTn8yEB02bsPizwdBPrLJjHnK+cU9EMMcUnNv938XcZIkmdA==",
+ "requires": {
+ "@types/unist": "^2.0.0",
+ "unist-util-stringify-position": "^3.0.0"
+ }
+ },
+ "wrap-ansi": {
+ "version": "7.0.0",
+ "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz",
+ "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==",
+ "requires": {
+ "ansi-styles": "^4.0.0",
+ "string-width": "^4.1.0",
+ "strip-ansi": "^6.0.0"
+ }
+ },
+ "xxhashjs": {
+ "version": "0.2.2",
+ "resolved": "https://registry.npmjs.org/xxhashjs/-/xxhashjs-0.2.2.tgz",
+ "integrity": "sha512-AkTuIuVTET12tpsVIQo+ZU6f/qDmKuRUcjaqR+OIvm+aCBsZ95i7UVY5WJ9TMsSaZ0DA2WxoZ4acu0sPH+OKAw==",
+ "requires": {
+ "cuint": "^0.2.2"
+ }
+ },
+ "y18n": {
+ "version": "5.0.8",
+ "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz",
+ "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA=="
+ },
+ "yargs": {
+ "version": "17.6.2",
+ "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.6.2.tgz",
+ "integrity": "sha512-1/9UrdHjDZc0eOU0HxOHoS78C69UD3JRMvzlJ7S79S2nTaWRA/whGCTV8o9e/N/1Va9YIV7Q4sOxD8VV4pCWOw==",
+ "requires": {
+ "cliui": "^8.0.1",
+ "escalade": "^3.1.1",
+ "get-caller-file": "^2.0.5",
+ "require-directory": "^2.1.1",
+ "string-width": "^4.2.3",
+ "y18n": "^5.0.5",
+ "yargs-parser": "^21.1.1"
+ }
+ },
+ "yargs-parser": {
+ "version": "21.1.1",
+ "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz",
+ "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw=="
+ },
+ "zwitch": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/zwitch/-/zwitch-2.0.4.tgz",
+ "integrity": "sha512-bXE4cR/kVZhKZX/RjPEflHaKVhUVl85noU3v6b8apfQEc1x4A+zBxjZ4lN8LqGd6WZ3dl98pY4o717VFmoPp+A=="
+ }
+ }
+}
diff --git a/doc/_static/package.json b/doc/_static/package.json
new file mode 100644
index 00000000..7e3d4e4a
--- /dev/null
+++ b/doc/_static/package.json
@@ -0,0 +1,5 @@
+{
+ "dependencies": {
+ "@adobe/jsonschema2md": "^7.1.5"
+ }
+}
diff --git a/doc/architecture-gc.rst b/doc/architecture-gc.rst
new file mode 100644
index 00000000..b57c857c
--- /dev/null
+++ b/doc/architecture-gc.rst
@@ -0,0 +1,12 @@
+*****************
+``kres-cache-gc``
+*****************
+
+The garbage collector is a simple component which keeps the shared cache from overfilling.
+Every second it estimates cache usage and if over 80%, records get deleted in order to free 10%. (Parameters can be configured.)
+
+The freeing happens in a few passes. First all items are classified by their estimated usefulness, in a simple way based on remaining TTL, type, etc.
+From this histogram it's computed which "level of usefulness" will become the threshold, so that roughly the planned total size gets freed.
+Then all items are passed to collect the set of keys to delete, and finally the deletion is performed.
+As longer transactions can cause issues in LMDB, all passes are split into short batches.
+
diff --git a/doc/architecture-kresd.rst b/doc/architecture-kresd.rst
new file mode 100644
index 00000000..783fbb8a
--- /dev/null
+++ b/doc/architecture-kresd.rst
@@ -0,0 +1,3 @@
+*********
+``kresd``
+********* \ No newline at end of file
diff --git a/doc/architecture-manager.drawio b/doc/architecture-manager.drawio
new file mode 100644
index 00000000..aa8c4e7e
--- /dev/null
+++ b/doc/architecture-manager.drawio
@@ -0,0 +1 @@
+<mxfile host="Electron" modified="2023-02-13T14:53:19.113Z" agent="5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) draw.io/20.8.16 Chrome/106.0.5249.199 Electron/21.4.0 Safari/537.36" etag="AMbarg0B8e5MX17HW2UZ" version="20.8.16" type="device"><diagram name="Page-1" id="veOfMoMBw9sVscwcjaa1">5ZhbU6MwFMc/TR91gADWx17cVUdnnNGZ3fUtQgrRwMEQevHT76GEUqBWdre2dfZFk39Obuf8TkjTI6No/l3SJLwFn4meZfjzHhn3LMs0+w7+y5VFoZxZdiEEkvvaqBLu+RvToqHVjPssrRkqAKF4Uhc9iGPmqZpGpYRZ3WwCoj5rQgPWEu49KtrqD+6rsFD7jlHpl4wHYTmzaeiWiJbGWkhD6sNsTSIXPTKSAKooRfMRE7nzSr8U/b6907pamGSx6tLh+eHx4bb/OibXl9cW4dGgf+Wf6FGmVGR6wxGNcU9Sr1ktSkdIyGKf5WMZPTKchVyx+4R6eesMQ49aqCKBNROLEy7ECARIrMcQo9EwRWMeBw+Q6CEEfWLiDlKuOMSoebgPnJcMp0wqjhG4aRiovOuQCh5sNB/ohidQCqJqRpROLAfreqtozubv+tBcRQaRZhAxJRdoUnY408HUNJtlcGcVG26phWtcWCUWVPMYrMauQoYFHbU/ieBZK4SYCxMe5EkR0hjxtlyByxg+YVDdIC9J9pqxVLVizHxkX1dBqhACiKm4qNRhnYLK5gaWcc1j/8yUWuhEppmCOhnoaLn4qfsvK7/yyqlTVsfz9cbxYp2WIfVeguUSGnAVO8mXvz2wuFvIpMe2+NPShwyVAVNb7MhWUE6MU8N1dWgkE1TxaX1tm0DQw90Bx1VXJjCZpEy1SFnN+vfwWC12WkzgsZXkxSwSA0/BtgRdZV4zJYvMXaMAMiV4zEarQ9vYfGjsIGWtRsaetzOWbEhY97Py9fwrZd0O84p0zCt7czQ759A/BYe08mFwd7XzT+FOqP4Ya3ufWJf3tf1yvUM+7Y58Oofk027xiVtN80vOMTLa/5hRd6+Mki/OqNORUdM6JKROC1Iscp8ubwn559il0fImv/yLSgwyQos3bXF8JJMOJJubrv2fhnK/5eM0S5ic8hSkf4wudO2GC+1Du9C0v/hpYHb9qdI/5Glguv+Lm82DXg3KZe7hKWcHx4HTPA4Ofjdof7VeJEv93vJpsXhOCVjM5NF+pJonrPWJJyxWq4fL4jGiev4lF78B</diagram></mxfile> \ No newline at end of file
diff --git a/doc/architecture-manager.rst b/doc/architecture-manager.rst
new file mode 100644
index 00000000..bf663edd
--- /dev/null
+++ b/doc/architecture-manager.rst
@@ -0,0 +1,57 @@
+****************
+``kres-manager``
+****************
+
+.. note::
+ This guide is intended for advanced users and developers. You don't have to know and understand any of this to use Knot Resolver.
+
+The manager is a component written in Python and a bit of C used for native extension modules. The main goal of the manager is to ensure the system is set up according to a given configuration, provide a user-friendly interface. Performance is only secondary to correctness.
+
+The manager is mostly modelled around config processing pipeline:
+
+.. image:: architecture-manager.svg
+ :width: 100%
+ :alt: Diagram showing a configuration change request processing pipeline inside of the manager. The request goes first through an API server, then through parsing, validation and normalization steps, then into an actual system manager, which commands supervisord and other system components such as kresd.
+
+
+API
+===
+
+The API server is implemented using `aiohttp <https://docs.aiohttp.org/en/stable>`_. This framework provides the application skeleton and manages application runtime. The manager is actually a normal web application with the slight difference that we don't save the data in a database but rather modify state of other processes.
+
+Code of the API server is located only in a `single source code file <https://gitlab.nic.cz/knot/knot-resolver/-/blob/manager/manager/knot_resolver_manager/server.py>`_. It also contains description of the manager's startup procedure.
+
+Config processing
+=================
+
+From the web framework, we receive data as simple strings and we need to parse and validate them. Due to packaging issues in distros, we rolled our own solution not disimilar to Python library `Pydantic <https://docs.pydantic.dev/>`_.
+
+Our tool lets us model config schema similarly to how Python's native dataclasses are constructed. As input, it takes Python's dicts taken from PyYAML or JSON parser. The dict is mapped onto predefined Python classes while enforcing typing rules. If desired, the mapping step is performed multiple times onto different classes, which allows us to process intermediary values such as ``auto``.
+
+There are two relevant places in the source code - `our generic modelling tools <https://gitlab.nic.cz/knot/knot-resolver/-/tree/manager/manager/knot_resolver_manager/utils/modeling>`_ and the actual `configuration data model <https://gitlab.nic.cz/knot/knot-resolver/-/tree/manager/manager/knot_resolver_manager/datamodel>`_. Just next to the data model in the ``templates`` directory, there are Jinja2 templates for generating Lua code from the configuration.
+
+
+Actual manager
+==============
+
+The actual core of the whole application is originally named the manager. It keeps a high-level view of the systems state and performs all necessary operations to change the state to the desired one. In other words, manager is the component handling rolling restarts, config update logic and more.
+
+The code is contained mainly in a `single source code file <https://gitlab.nic.cz/knot/knot-resolver/-/blob/manager/manager/knot_resolver_manager/kres_manager.py>`_.
+
+
+Interactions with supervisord
+=============================
+
+.. note::
+ Let's make a sidestep and let's talk about abstractions. The manager component mentioned above interacts with a general backend (or as we call sometimes call it - a subprocess manager). The idea is that the interactions with the backend are not dependent on the backend's implementation and we can choose which one we want to use. Historically, we had two different backend implementations - systemd and supervisord. However, systemd turned out to be inappropriate, it did not fit our needs, so we removed it. The `abstraction remains <https://gitlab.nic.cz/knot/knot-resolver/-/blob/manager/manager/knot_resolver_manager/kresd_controller/interface.py>`_ though and it should be possible to implement a different subprocess manager if it turns out useful. Please note though, the abstraction might be somewhat leaky in practice as there is only one implementation.
+
+Communication with supervisord happens on pretty much all possible levels. We edit its configuration file, we use its XMLRPC API, we use Unix signals and we even attach to it from within its Python runtime. The interface is honestly a bit messy and we had to use all we could to make it user friendly.
+
+First, we `generate supervisord's configuration file <https://gitlab.nic.cz/knot/knot-resolver/-/blob/manager/manager/knot_resolver_manager/kresd_controller/supervisord/supervisord.conf.j2>`_. The configuration file sets stage for further communication by specifying location of the pidfile and API Unix socket. It prepares configuration for subprocesses and most significantly, it loads our custom extensions.
+
+`The extensions <https://gitlab.nic.cz/knot/knot-resolver/-/tree/manager/manager/knot_resolver_manager/kresd_controller/supervisord/plugin>`_ don't use a lot of code. There are four of them - the simplest one provides a speedier XMLRPC API for starting processes, it removes delays that are not necessary for our usecase. Another one implements systemd's ``sd_notify()`` API for supervisord, so we can track the lifecycle of ``kresd``s more precisely. Another extension changes the way logging works and the last extension monitors the lifecycle of the manager and forwards some signals.
+
+.. note::
+ The extensions mentioned above use monkeypatching to achieve their design goals. We settled for this approach, because supervisord's codebase appears mostly stable. The code we patch has not been changed for years. Other option would be forking supervisord and vendoring it. We decided against that mainly due to packaging complications it would cause with major Linux distributions.
+
+For executing subprocesses, we don't actually change the configuration file, we only use XMLRPC API and tell supervisord to start already configured programs. For one specific call though, we use our extension instead of the build-in method of starting processes as it is significantly faster. \ No newline at end of file
diff --git a/doc/architecture-manager.svg b/doc/architecture-manager.svg
new file mode 100644
index 00000000..4408bfb5
--- /dev/null
+++ b/doc/architecture-manager.svg
@@ -0,0 +1,3 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
+<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1" width="771px" height="201px" viewBox="-0.5 -0.5 771 201"><defs/><g><rect x="150" y="0" width="620" height="200" fill="none" stroke="rgb(0, 0, 0)" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe flex-end; justify-content: unsafe center; width: 672px; height: 1px; padding-top: 24px; margin-left: 124px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">manager</div></div></div></foreignObject><text x="460" y="24" fill="rgb(0, 0, 0)" font-family="Helvetica" font-size="12px" text-anchor="middle">manager</text></switch></g><path d="M 30 100 L 173.63 100" fill="none" stroke="rgb(0, 0, 0)" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 178.88 100 L 171.88 103.5 L 173.63 100 L 171.88 96.5 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-miterlimit="10" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 1px; height: 1px; padding-top: 100px; margin-left: 100px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 11px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: nowrap;">config change<br />request</div></div></div></foreignObject><text x="100" y="103" fill="rgb(0, 0, 0)" font-family="Helvetica" font-size="11px" text-anchor="middle">config change...</text></switch></g><ellipse cx="15" cy="77.5" rx="7.5" ry="7.5" fill="none" stroke="rgb(0, 0, 0)" pointer-events="all"/><path d="M 15 85 L 15 110 M 15 90 L 0 90 M 15 90 L 30 90 M 15 110 L 0 130 M 15 110 L 30 130" fill="none" stroke="rgb(0, 0, 0)" stroke-miterlimit="10" pointer-events="all"/><path d="M 220 100 L 253.63 100" fill="none" stroke="rgb(0, 0, 0)" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 258.88 100 L 251.88 103.5 L 253.63 100 L 251.88 96.5 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-miterlimit="10" pointer-events="all"/><rect x="180" y="70" width="40" height="60" fill="none" stroke="rgb(0, 0, 0)" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 38px; height: 1px; padding-top: 100px; margin-left: 181px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">API</div></div></div></foreignObject><text x="200" y="104" fill="rgb(0, 0, 0)" font-family="Helvetica" font-size="12px" text-anchor="middle">API</text></switch></g><path d="M 320 100 L 353.63 100" fill="none" stroke="rgb(0, 0, 0)" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 358.88 100 L 351.88 103.5 L 353.63 100 L 351.88 96.5 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-miterlimit="10" pointer-events="all"/><rect x="260" y="70" width="60" height="60" fill="none" stroke="rgb(0, 0, 0)" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 58px; height: 1px; padding-top: 100px; margin-left: 261px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">parsing</div></div></div></foreignObject><text x="290" y="104" fill="rgb(0, 0, 0)" font-family="Helvetica" font-size="12px" text-anchor="middle">parsing</text></switch></g><path d="M 480 100 L 513.63 100" fill="none" stroke="rgb(0, 0, 0)" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 518.88 100 L 511.88 103.5 L 513.63 100 L 511.88 96.5 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-miterlimit="10" pointer-events="all"/><rect x="360" y="70" width="120" height="60" fill="none" stroke="rgb(0, 0, 0)" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 118px; height: 1px; padding-top: 100px; margin-left: 361px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">validation &amp; normalization</div></div></div></foreignObject><text x="420" y="104" fill="rgb(0, 0, 0)" font-family="Helvetica" font-size="12px" text-anchor="middle">validation &amp; normali...</text></switch></g><rect x="620" y="20" width="120" height="60" fill="none" stroke="rgb(0, 0, 0)" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 118px; height: 1px; padding-top: 50px; margin-left: 621px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">supervisord</div></div></div></foreignObject><text x="680" y="54" fill="rgb(0, 0, 0)" font-family="Helvetica" font-size="12px" text-anchor="middle">supervisord</text></switch></g><path d="M 580 100 L 600.03 100 L 600.03 50 L 613.63 50" fill="none" stroke="rgb(0, 0, 0)" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 618.88 50 L 611.88 53.5 L 613.63 50 L 611.88 46.5 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-miterlimit="10" pointer-events="all"/><path d="M 580 100 L 600.03 100 L 600.03 150 L 613.63 150" fill="none" stroke="rgb(0, 0, 0)" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 618.88 150 L 611.88 153.5 L 613.63 150 L 611.88 146.5 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-miterlimit="10" pointer-events="all"/><rect x="520" y="70" width="60" height="60" fill="none" stroke="rgb(0, 0, 0)" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 58px; height: 1px; padding-top: 100px; margin-left: 521px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">manager</div></div></div></foreignObject><text x="550" y="104" fill="rgb(0, 0, 0)" font-family="Helvetica" font-size="12px" text-anchor="middle">manager</text></switch></g><rect x="620" y="120" width="120" height="60" fill="none" stroke="rgb(0, 0, 0)" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 118px; height: 1px; padding-top: 150px; margin-left: 621px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">kresd config generation</div></div></div></foreignObject><text x="680" y="154" fill="rgb(0, 0, 0)" font-family="Helvetica" font-size="12px" text-anchor="middle">kresd config generat...</text></switch></g></g><switch><g requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"/><a transform="translate(0,-5)" xlink:href="https://www.diagrams.net/doc/faq/svg-export-text-problems" target="_blank"><text text-anchor="middle" font-size="10px" x="50%" y="100%">Text is not SVG - cannot display</text></a></switch></svg> \ No newline at end of file
diff --git a/doc/architecture-schema.drawio b/doc/architecture-schema.drawio
new file mode 100644
index 00000000..c58a4b9b
--- /dev/null
+++ b/doc/architecture-schema.drawio
@@ -0,0 +1 @@
+<mxfile host="www.diagrameditor.com" modified="2023-02-13T13:13:34.892Z" agent="Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36" etag="Tj-oP8z-b8JEGXK27IVH" version="12.1.3" type="device" pages="1"><diagram id="fziuQaxv5VFMaYUHGL-k" name="Page-1">7Vlbb5swGP01eezENSGPbZptmlZpUqXu8uaAA14MRsaQsF8/G0zAkNIkLQlaF1WKffwZ2+ccm8/NxFyEu08UxMED8SCeGJq3m5j3E8PQ9emcfwkkLxF7apeAT5Eng2rgEf2BEtQkmiIPJkogIwQzFKugS6IIukzBAKVkq4atCVZHjYEPO8CjC3AX/Y48FpSoY8xq/DNEfsDaCw5BFSxXkgTAI9sGZC4n5oISwspSuFtALMireCn7fXymdT8xCiN2TIdsY66fwiewZpuHmy9LB/7arm4cOTeWVwuGHl+/rBLKAuKTCOBljd5RkkYeFE/VeO13GsZVfEQiEVB3+0pIzHFdxEHGcqkvSBnhUMBCLFu7i5HrS0hKXdizgsoUgPqQ9cSZZZxYXmMASdUnSELIaM4DKMSAoUyVH0gX+fs42fWWUpA3AmKCIpY0nvxNADxAbgjDkm6Q22HW0uykcF4ox69qjYXUUGGDEywxfzeWsEZhCXN6kiX6w4exRHUYvwNP2KPwhOWoIs/7PdEfPpAn9HfjCWMUntA1VWSn3xP94QN5ohwxAziVJCRpDGmGEkK9jl1UM2wDxOBjDArFtjyZPFLlDFIGd726VLtEJaTKy7Z1WmfJw1QLGild1e2QkA06z2DLGMkOclOaFY8UlTXCeEEwocWMzPUaTl2X4wmjZAMbLd5svtLEJDyQBPvu5+9G48jdqI9jN1onndCGdoUT2jzDX4oXhjCb4pbTzTa8v8ZxUWj7S3/ppmD0xg/kMOsfdBg3Fs1/iBl9MOyq/lM+rqjc7+R8y1oua8Nbc5gLS/eK0fae3XoFlhOVvWqTvdrjL6Q0nXldJKex/3v8kh4f5gLWvTK1rtG6Pr+Mx1+6ytm98cN4XGrTyNtDEAEf0o71r5yz6weS9j12kaTd7FA1MaaYiS2JMl70RXFDYeLpVQMfp9F2dUrbqeghTp1LUmodTakxUkrNdvJ1bUrtDqW+e3WWLHNkLOmzZ523JuJQLu3l8jIlOKnaVrQ23t6OzQ4dpjlBTKVTfTXLPID/AYz8iCMup5kfv+adoBe5AN/KhhB5XpFiHNJLVVRM6dDLH4MVxHfA3fhFfHsSrxdab73D5gf+03JAZ2MwnZ2OzjElLkwSkZBQCM8VrJl4SegtBewXqiWvVnzeRsCWfs5g+vFq/QNomc3UPyOby78=</diagram></mxfile> \ No newline at end of file
diff --git a/doc/architecture-schema.svg b/doc/architecture-schema.svg
new file mode 100644
index 00000000..e32251ba
--- /dev/null
+++ b/doc/architecture-schema.svg
@@ -0,0 +1,3 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
+<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1" width="472px" height="172px" viewBox="-0.5 -0.5 472 172"><defs/><g><path d="M 200 40 L 200 50 L 200 93.63" fill="none" stroke="rgb(0, 0, 0)" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 200 98.88 L 196.5 91.88 L 200 93.63 L 203.5 91.88 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-miterlimit="10" pointer-events="all"/><path d="M 320 40 L 320 50 L 320 93.63" fill="none" stroke="rgb(0, 0, 0)" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 320 98.88 L 316.5 91.88 L 320 93.63 L 323.5 91.88 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-miterlimit="10" pointer-events="all"/><path d="M 440 40 L 440 70 L 440 93.63" fill="none" stroke="rgb(0, 0, 0)" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 440 98.88 L 436.5 91.88 L 440 93.63 L 443.5 91.88 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-miterlimit="10" pointer-events="all"/><path d="M 60 40 L 60 60 L 60 93.63" fill="none" stroke="rgb(0, 0, 0)" stroke-miterlimit="10" pointer-events="stroke"/><path d="M 60 98.88 L 56.5 91.88 L 60 93.63 L 63.5 91.88 Z" fill="rgb(0, 0, 0)" stroke="rgb(0, 0, 0)" stroke-miterlimit="10" pointer-events="all"/><rect x="0" y="0" width="470" height="40" fill="rgb(255, 255, 255)" stroke="rgb(0, 0, 0)" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 468px; height: 1px; padding-top: 20px; margin-left: 1px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">supervisord</div></div></div></foreignObject><text x="235" y="24" fill="rgb(0, 0, 0)" font-family="Helvetica" font-size="12px" text-anchor="middle">supervisord</text></switch></g><path d="M 100 100 Q 100 70 130 70 Q 160 70 160 46.37" fill="none" stroke="#d79b00" stroke-miterlimit="10" stroke-dasharray="3 3" pointer-events="stroke"/><path d="M 160 41.12 L 163.5 48.12 L 160 46.37 L 156.5 48.12 Z" fill="#d79b00" stroke="#d79b00" stroke-miterlimit="10" pointer-events="all"/><path d="M 100 140 Q 100 150 140 150 Q 180 150 180 146.37" fill="none" stroke="#d79b00" stroke-miterlimit="10" stroke-dasharray="3 3" pointer-events="stroke"/><path d="M 180 141.12 L 183.5 148.12 L 180 146.37 L 176.5 148.12 Z" fill="#d79b00" stroke="#d79b00" stroke-miterlimit="10" pointer-events="all"/><path d="M 100 140 Q 100 160 200 160 Q 300 160 300 146.37" fill="none" stroke="#d79b00" stroke-miterlimit="10" stroke-dasharray="3 3" pointer-events="stroke"/><path d="M 300 141.12 L 303.5 148.12 L 300 146.37 L 296.5 148.12 Z" fill="#d79b00" stroke="#d79b00" stroke-miterlimit="10" pointer-events="all"/><path d="M 100 140 Q 100 170 255 170 Q 410 170 410 146.37" fill="none" stroke="#d79b00" stroke-miterlimit="10" stroke-dasharray="3 3" pointer-events="stroke"/><path d="M 410 141.12 L 413.5 148.12 L 410 146.37 L 406.5 148.12 Z" fill="#d79b00" stroke="#d79b00" stroke-miterlimit="10" pointer-events="all"/><rect x="0" y="100" width="120" height="40" fill="rgb(255, 255, 255)" stroke="rgb(0, 0, 0)" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 118px; height: 1px; padding-top: 120px; margin-left: 1px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">manager</div></div></div></foreignObject><text x="60" y="124" fill="rgb(0, 0, 0)" font-family="Helvetica" font-size="12px" text-anchor="middle">manager</text></switch></g><rect x="160" y="100" width="80" height="40" fill="rgb(255, 255, 255)" stroke="rgb(0, 0, 0)" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 78px; height: 1px; padding-top: 120px; margin-left: 161px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><div>kresd1</div></div></div></div></foreignObject><text x="200" y="124" fill="rgb(0, 0, 0)" font-family="Helvetica" font-size="12px" text-anchor="middle">kresd1</text></switch></g><rect x="280" y="100" width="80" height="40" fill="rgb(255, 255, 255)" stroke="rgb(0, 0, 0)" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 78px; height: 1px; padding-top: 120px; margin-left: 281px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><div>kresd2</div></div></div></div></foreignObject><text x="320" y="124" fill="rgb(0, 0, 0)" font-family="Helvetica" font-size="12px" text-anchor="middle">kresd2</text></switch></g><rect x="390" y="100" width="80" height="40" fill="rgb(255, 255, 255)" stroke="rgb(0, 0, 0)" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 78px; height: 1px; padding-top: 120px; margin-left: 391px;"><div data-drawio-colors="color: rgb(0, 0, 0); " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">gc</div></div></div></foreignObject><text x="430" y="124" fill="rgb(0, 0, 0)" font-family="Helvetica" font-size="12px" text-anchor="middle">gc</text></switch></g><rect x="110" y="70" width="40" height="20" fill="none" stroke="none" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 38px; height: 1px; padding-top: 80px; margin-left: 111px;"><div data-drawio-colors="color: #d79b00; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(215, 155, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;"><font>controls<br /></font></div></div></div></foreignObject><text x="130" y="84" fill="#d79b00" font-family="Helvetica" font-size="12px" text-anchor="middle">control...</text></switch></g><rect x="10" y="60" width="40" height="20" fill="none" stroke="none" pointer-events="all"/><g transform="translate(-0.5 -0.5)"><switch><foreignObject pointer-events="none" width="100%" height="100%" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility" style="overflow: visible; text-align: left;"><div xmlns="http://www.w3.org/1999/xhtml" style="display: flex; align-items: unsafe center; justify-content: unsafe center; width: 38px; height: 1px; padding-top: 70px; margin-left: 11px;"><div data-drawio-colors="color: #000000; " style="box-sizing: border-box; font-size: 0px; text-align: center;"><div style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.2; pointer-events: all; white-space: normal; overflow-wrap: normal;">process tree</div></div></div></foreignObject><text x="30" y="74" fill="#000000" font-family="Helvetica" font-size="12px" text-anchor="middle">process...</text></switch></g></g><switch><g requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"/><a transform="translate(0,-5)" xlink:href="https://www.diagrams.net/doc/faq/svg-export-text-problems" target="_blank"><text text-anchor="middle" font-size="10px" x="50%" y="100%">Text is not SVG - cannot display</text></a></switch></svg> \ No newline at end of file
diff --git a/doc/architecture.rst b/doc/architecture.rst
new file mode 100644
index 00000000..10fadf62
--- /dev/null
+++ b/doc/architecture.rst
@@ -0,0 +1,48 @@
+*******************
+System architecture
+*******************
+
+As mentioned in the :ref:`getting started section <gettingstarted-intro>`, Knot Resolver is split into several components, namely the manager, ``kresd`` and the garbage collector. In addition to these custom components, we also rely on `supervisord <http://supervisord.org/>`_.
+
+.. image:: architecture-schema.svg
+ :width: 100%
+ :alt: Diagram showing process tree and contol relationship between Knot Resolver components. Supervisord is a parent to all processes, namely manager, kresd instances and gc. Manager on the other hand controls every other component and what it does.
+
+
+There are two different control structures in place. Semantically, the manager controls every other component in Knot Resolver. It processes configuration and passes it onto every other component. As a user you will always interact with the manager (or kresd). At the same time though, the manager is not the root of the process hierarchy, Supervisord sits at the top of the process tree and runs everything else.
+
+.. note::
+ The rationale for this inverted process hierarchy is mainly stability. Supervisord sits at the top because it is a reliable and stable software we can depend upon. It also does not process user input and its therefore shielded from data processing bugs. This way, any component in Knot Resolver can crash and restart without impacting the rest of the system.
+
+
+Knot Resolver startup
+=====================
+
+The inverted process hierarchy complicates Resolver's launch procedure. You might notice it when reading manager's logs just after start. What happens on cold start is:
+
+1. Manager starts, reads its configuration and generates new supervisord configuration. Then, it starts supervisord by using ``exec``.
+2. Supervisord loads it's configuration, loads our extensions and start a new instance of manager.
+3. Manager starts again, this time as a child of supervisord. As this is desired state, it loads the configuration again and commands supervisord that it should start new instances of ``kresd``.
+
+
+Failure handling
+================
+
+Knot Resolver is designed to handle failures automatically. Anything except for supervisord will automatically restart. If a failure is irrecoverable, all processes will stop and nothing will be left behind in a half-broken state. While a total failure like this should never happen, it is possible and you should not rely on single instance of Knot Resolver for a highly-available system.
+
+.. note::
+ The ability to restart most of the components without downtime means, that Knot Resolver is able to transparently apply updates while running.
+
+
+Individual components
+=====================
+
+You can learn more about architecture of individual Resolver components in the following chapters.
+
+.. toctree::
+ :titlesonly:
+ :maxdepth: 1
+
+ architecture-manager
+ architecture-kresd
+ architecture-gc \ No newline at end of file
diff --git a/doc/build.rst b/doc/build.rst
index 09b314dd..30f1d77b 100644
--- a/doc/build.rst
+++ b/doc/build.rst
@@ -18,13 +18,63 @@ Beware that some 64-bit systems with LuaJIT 2.1 may be affected by
$ git clone --recursive https://gitlab.nic.cz/knot/knot-resolver.git
+Building with apkg
+------------------
+
+Knot Resolver uses `apkg tool <https://pkg.labs.nic.cz/pages/apkg/>`_ for upstream packaging.
+It allows build packages localy for supported distributions, which it then installs.
+``apkg`` also takes care of dependencies itself.
+
+First, you need to install and setup ``apkg``.
+
+.. tip::
+ Install ``apkg`` with `pipx <https://pypa.github.io/pipx/>`_ to avoid version conflicts.
+
+.. code-block:: bash
+
+ $ pip3 install apkg
+ $ apkg system-setup
+
+Clone and change dir to ``knot-resolver`` git repository.
+
+.. code-block:: bash
+
+ $ git clone --recursive https://gitlab.nic.cz/knot/knot-resolver.git
+ $ cd knot-resolver
+
+.. tip:: The ``apkg status`` command can be used to find out some useful information, such as whether the current distribution is supported.
+
+When ``apkg`` is ready, a package can be built and installed.
+
+.. code-block:: bash
+
+ # takes care of dependencies
+ apkg build-dep
+
+ # build package
+ apkg build
+
+ # (build and) install package, builds package when it is not already built
+ apkg install
+
+After that Knot Resolver should be installed.
+
+Building with Meson
+-------------------
+
+Knot Resolver uses `Meson Build system <https://mesonbuild.com/>`_.
+Shell snippets below should be sufficient for basic usage
+but users unfamiliar with Meson might want to read introductory
+article `Using Meson <https://mesonbuild.com/Quick-guide.html>`_.
+
+
Dependencies
-------------
+~~~~~~~~~~~~
.. note:: This section lists basic requirements. Individual modules
might have additional build or runtime dependencies.
-The following dependencies are needed to build and run Knot Resolver:
+The following dependencies are needed to build and run Knot Resolver with core functions:
.. csv-table::
:header: "Requirement", "Notes"
@@ -39,6 +89,20 @@ The following dependencies are needed to build and run Knot Resolver:
"lmdb", "Memory-mapped database for cache"
"GnuTLS", "TLS"
+Additional dependencies are needed to build and run Knot Resolver with ``manager``:
+All dependencies are also listed in `pyproject.toml <https://gitlab.nic.cz/knot/knot-resolver/-/blob/manager/manager/pyproject.toml>`_ which is our authoritative source.
+
+.. csv-table::
+ :header: "Requirement", "Notes"
+
+ "python3_ >=3.6.8", "Python language interpreter"
+ "Jinja2_", "Template engine for Python"
+ "PyYAML_", "YAML framework for Python"
+ "aiohttp_", "HTTP Client/Server for Python."
+ "prometheus-client_", "Prometheus client for Python"
+ "typing-extensions_", "Compatibility module for Python"
+
+
There are also *optional* packages that enable specific functionality in Knot
Resolver:
@@ -57,7 +121,7 @@ Resolver:
"cmocka_", "``unit tests``", "Unit testing framework."
"dnsdist_", "``proxyv2 test``", "DNS proxy server"
"Doxygen_", "``documentation``", "Generating API documentation."
- "Sphinx_ and sphinx_rtd_theme_", "``documentation``", "Building this
+ "Sphinx_, sphinx-tabs_ and sphinx_rtd_theme_", "``documentation``", "Building this
documentation."
"Texinfo_", "``documentation``", "Generating this documentation in Info
format."
@@ -79,9 +143,6 @@ Resolver:
to configure dependencies manually (i.e. ``libknot_CFLAGS`` and
``libknot_LIBS``).
-Packaged dependencies
-~~~~~~~~~~~~~~~~~~~~~
-
.. note:: Some build dependencies can be found in
`home:CZ-NIC:knot-resolver-build
<https://build.opensuse.org/project/show/home:CZ-NIC:knot-resolver-build>`_.
@@ -99,27 +160,23 @@ here's an overview for several platforms.
* **Mac OS X** - the dependencies can be obtained from `Homebrew formula <https://formulae.brew.sh/formula/knot-resolver>`_.
Compilation
------------
+~~~~~~~~~~~
-.. note::
+Folowing meson command creates new build directory named ``build_dir``, configures installation path to ``/tmp/kr`` and enables static build (to allow installation to non-standard path).
+You can also configure some :ref:`build-options`, in this case enable ``manager``, which is disabled by default.
- Knot Resolver uses `Meson Build system <https://mesonbuild.com/>`_.
- Shell snippets below should be sufficient for basic usage
- but users unfamiliar with Meson Build might want to read introductory
- article `Using Meson <https://mesonbuild.com/Quick-guide.html>`_.
+.. code-block:: bash
-Following example script will:
+ $ meson build_dir --prefix=/tmp/kr --default-library=static -Dmanager=enabled
- - create new build directory named ``build_dir``
- - configure installation path ``/tmp/kr``
- - enable static build (to allow installation to non-standard path)
- - build Knot Resolver
- - install it into the previously configured path
+After that it is possible to build and install Knot Resolver.
.. code-block:: bash
$ meson setup build_dir --prefix=/tmp/kr --default-library=static
$ ninja -C build_dir
+
+ # install Knot Resolver into the previously configured '/tmp/kr' path
$ ninja install -C build_dir
At this point you can execute the newly installed binary using path ``/tmp/kr/sbin/kresd``.
@@ -128,6 +185,8 @@ At this point you can execute the newly installed binary using path ``/tmp/kr/sb
possible when using luajit package from Homebrew due to `#37169
<https://github.com/Homebrew/homebrew-core/issues/37169>`_.
+.. _build-options:
+
Build options
~~~~~~~~~~~~~
@@ -215,6 +274,7 @@ Recommended build options for packagers:
* ``-Dsystemd_files=enabled`` for systemd unit files
* ``-Ddoc=enabled`` for offline documentation (see :ref:`build-html-doc`)
* ``-Dinstall_kresd_conf=enabled`` to install default config file
+* ``-Dmanager=enabled`` to force build of the manager and its features
* ``-Dclient=enabled`` to force build of kresc
* ``-Dunit_tests=enabled`` to force build of unit tests
@@ -268,6 +328,7 @@ For development, it's possible to build the container directly from your git tre
.. _Doxygen: https://www.doxygen.nl/manual/index.html
.. _breathe: https://github.com/michaeljones/breathe
.. _Sphinx: http://sphinx-doc.org/
+.. _sphinx-tabs: https://sphinx-tabs.readthedocs.io
.. _sphinx_rtd_theme: https://pypi.python.org/pypi/sphinx_rtd_theme
.. _Texinfo: https://www.gnu.org/software/texinfo/
.. _pkg-config: https://www.freedesktop.org/wiki/Software/pkg-config/
@@ -289,3 +350,9 @@ For development, it's possible to build the container directly from your git tre
.. _clang-tidy: http://clang.llvm.org/extra/clang-tidy/index.html
.. _luacov: https://lunarmodules.github.io/luacov/
.. _lcov: http://ltp.sourceforge.net/coverage/lcov.php
+.. _python3: https://www.python.org/
+.. _Jinja2: https://jinja.palletsprojects.com/
+.. _PyYAML: https://pyyaml.org/
+.. _aiohttp: https://docs.aiohttp.org/
+.. _prometheus-client: https://github.com/prometheus/client_python
+.. _typing-extensions: https://pypi.org/project/typing-extensions/
diff --git a/doc/conf.py b/doc/conf.py
index 53e5e38c..8f9c9ab9 100644
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -13,7 +13,13 @@ if os.environ.get('READTHEDOCS', None) == 'True':
subprocess.call('doxygen')
# Add any Sphinx extension module names here, as strings.
-extensions = ['sphinx.ext.todo', 'sphinx.ext.viewcode', 'breathe']
+extensions = [
+ 'sphinx.ext.todo',
+ 'sphinx.ext.viewcode',
+ 'sphinx_tabs.tabs',
+ 'breathe',
+ 'sphinx_mdinclude',
+]
# Breathe configuration
breathe_projects = {"libkres": "doxyxml"}
@@ -93,3 +99,10 @@ texinfo_documents = [
('index', 'knot-resolver', u'Knot Resolver', u'CZ.NIC Labs',
'Knot Resolver', 'Caching DNS resolver.', 'Network services'),
]
+
+# reStructuredText that will be included at the beginning of every source file that is read.
+# This is a possible place to add substitutions that should be available in every file.
+rst_prolog = """
+.. |yaml| replace:: YAML
+.. |lua| replace:: Lua
+"""
diff --git a/doc/config-lua-overview.rst b/doc/config-lua-overview.rst
new file mode 100644
index 00000000..d5316c04
--- /dev/null
+++ b/doc/config-lua-overview.rst
@@ -0,0 +1,87 @@
+.. _config-syntax:
+
+Syntax
+======
+
+The configuration file syntax allows you to specify different kinds of data:
+
+ - ``group.option = 123456``
+ - ``group.option = "string value"``
+ - ``group.command(123456, "string value")``
+ - ``group.command({ key1 = "value1", key2 = 222, key3 = "third value" })``
+ - ``globalcommand(a_parameter_1, a_parameter_2, a_parameter_3, etc)``
+ - ``-- any text after -- sign is ignored till end of line``
+
+Following **configuration file snippet** starts listening for unencrypted and also encrypted DNS queries on IP address 192.0.2.1, and sets cache size.
+
+.. code-block:: lua
+
+ -- this is a comment: listen for unencrypted queries
+ net.listen('192.0.2.1')
+ -- another comment: listen for queries encrypted using TLS on port 853
+ net.listen('192.0.2.1', 853, { kind = 'tls' })
+ -- 10 MB cache is suitable for a very small deployment
+ cache.size = 10 * MB
+
+.. tip::
+ When copy&pasting examples from this manual please pay close
+ attention to brackets and also line ordering - order of lines matters.
+
+ The configuration language is in fact Lua script, so you can use full power
+ of this programming language. See article
+ `Learn Lua in 15 minutes`_ for a syntax overview.
+
+When you modify configuration file on disk restart resolver process to get
+changes into effect. See chapter :ref:`systemd-zero-downtime-restarts` if even short
+outages are not acceptable for your deployment.
+
+.. [#] If you decide to run binary ``/usr/sbin/kresd`` manually (instead of
+ using systemd) do not forget to specify ``-c`` option with path to
+ configuration file, otherwise ``kresd`` will read file named ``config`` from
+ its current working directory.
+
+Documentation Conventions
+=========================
+
+Besides text configuration file, Knot Resolver also supports interactive and dynamic configuration using scripts or external systems, which is described in chapter :ref:`runtime-cfg`. Through this manual we present examples for both usage types - static configuration in a text file (see above) and also the interactive mode.
+
+The **interactive prompt** is denoted by ``>``, so all examples starting with ``>`` character are transcripts of user (or script) interaction with Knot Resolver and resolver's responses. For example:
+
+.. code-block:: lua
+
+ > -- this is a comment entered into interactive prompt
+ > -- comments have no effect here
+ > -- the next line shows a command entered interactively and its output
+ > log_level()
+ 'notice'
+ > -- the previous line without > character is output from log_level() command
+
+Following example demonstrates how to interactively list all currently loaded modules, and includes multi-line output:
+
+.. code-block:: lua
+
+ > modules.list()
+ {
+ 'iterate',
+ 'validate',
+ 'cache',
+ 'ta_update',
+ 'ta_signal_query',
+ 'policy',
+ 'priming',
+ 'detect_time_skew',
+ 'detect_time_jump',
+ 'ta_sentinel',
+ 'edns_keepalive',
+ 'refuse_nord',
+ 'watchdog',
+ }
+
+
+Before we dive into configuring features, let us explain modularization basics.
+
+.. include:: ../daemon/bindings/modules.rst
+
+Now you know what configuration file to modify, how to read examples and what modules are so you are ready for a real configuration work!
+
+.. _`Learn Lua in 15 minutes`: http://tylerneylon.com/a/learn-lua/ \ No newline at end of file
diff --git a/doc/config-lua.rst b/doc/config-lua.rst
new file mode 100644
index 00000000..bcdf2fee
--- /dev/null
+++ b/doc/config-lua.rst
@@ -0,0 +1,23 @@
+
+Advanced configuration (Lua)
+============================
+
+Knot Resolver can be configured declaratively by using YAML files or YAML/JSON HTTP API. However, there is another option. The actual worker processes (the ``kresd`` executable) speaks a different configuration language, it internally uses the Lua runtime and the respective programming language.
+
+Essentially, the declarative configuration is only used for validation and as an external interface. After validation, a Lua configuration is generated and passed into individual ``kresd`` instances. You can see the generated configuration files within the Resolver's working directory or you can manually run the conversion of declarative configuration with the ``kresctl convert`` command.
+
+.. warning::
+ While there are no plans of ever removing the Lua configuration, we do not guarantee absence of backwards incompatible changes. Starting with Knot Resolver version 6 and later, we consider the Lua interface internal and a subject to change. While we don't have any breaking changes planned for the foreseeable future, they might come.
+
+ **Therefore, use this only when you don't have any other option. And please let us know about it and we might try to accomodate your usecase in the declarative configuration.**
+
+.. toctree::
+ :maxdepth: 2
+
+ config-lua-overview
+ config-network
+ config-performance
+ config-policy
+ config-logging-monitoring
+ config-dnssec
+ config-experimental \ No newline at end of file
diff --git a/doc/config-overview.rst b/doc/config-overview.rst
index 0aec51cc..f90719d1 100644
--- a/doc/config-overview.rst
+++ b/doc/config-overview.rst
@@ -4,95 +4,39 @@
Configuration Overview
**********************
-Configuration file is named ``/etc/knot-resolver/kresd.conf`` and is read when
-you execute Knot Resolver using systemd commands described in section
-:ref:`quickstart-startup`. [#]_
+Configuration file is by default named ``/etc/knot-resolver/config.yml``.
+Different configuration file can be loaded by using command line option
+``-c / --config``.
-.. _config-syntax:
Syntax
======
-The configuration file syntax allows you to specify different kinds of data:
+The configuration file uses `YAML format version 1.1 <https://yaml.org/spec/1.1/>`_.
+To quickly learn about the format, you can have a look at `Learn YAML in Y minutes <https://learnxinyminutes.com/docs/yaml/>`_.
- - ``group.option = 123456``
- - ``group.option = "string value"``
- - ``group.command(123456, "string value")``
- - ``group.command({ key1 = "value1", key2 = 222, key3 = "third value" })``
- - ``globalcommand(a_parameter_1, a_parameter_2, a_parameter_3, etc)``
- - ``-- any text after -- sign is ignored till end of line``
-Following **configuration file snippet** starts listening for unencrypted and also encrypted DNS queries on IP address 192.0.2.1, and sets cache size.
+Schema
+======
-.. code-block:: lua
+The configuration has to pass a validation step before being used. The validation mainly
+checks for conformance to our :ref:`configuration-schema`.
- -- this is a comment: listen for unencrypted queries
- net.listen('192.0.2.1')
- -- another comment: listen for queries encrypted using TLS on port 853
- net.listen('192.0.2.1', 853, { kind = 'tls' })
- -- 10 MB cache is suitable for a very small deployment
- cache.size = 10 * MB
.. tip::
- When copy&pasting examples from this manual please pay close
- attention to brackets and also line ordering - order of lines matters.
-
- The configuration language is in fact Lua script, so you can use full power
- of this programming language. See article
- `Learn Lua in 15 minutes`_ for a syntax overview.
-
-When you modify configuration file on disk restart resolver process to get
-changes into effect. See chapter :ref:`systemd-zero-downtime-restarts` if even short
-outages are not acceptable for your deployment.
-
-.. [#] If you decide to run binary ``/usr/sbin/kresd`` manually (instead of
- using systemd) do not forget to specify ``-c`` option with path to
- configuration file, otherwise ``kresd`` will read file named ``config`` from
- its current working directory.
-
-Documentation Conventions
-=========================
-
-Besides text configuration file, Knot Resolver also supports interactive and dynamic configuration using scripts or external systems, which is described in chapter :ref:`runtime-cfg`. Through this manual we present examples for both usage types - static configuration in a text file (see above) and also the interactive mode.
-
-The **interactive prompt** is denoted by ``>``, so all examples starting with ``>`` character are transcripts of user (or script) interaction with Knot Resolver and resolver's responses. For example:
+ Whenever a configuration is loaded and the validation fails, we attempt to log a detailed
+ error message explaining what the problem was. For example, it could look like the following:
-.. code-block:: lua
+ .. code_block::
+ ERROR:knot_resolver_manager.server:multiple configuration errors detected:
+ [/management/interface] invalid port number 66000
+ [/logging/level] 'noticed' does not match any of the expected values ('crit', 'err', 'warning', 'notice', 'info', 'debug')
- > -- this is a comment entered into interactive prompt
- > -- comments have no effect here
- > -- the next line shows a command entered interactively and its output
- > log_level()
- 'notice'
- > -- the previous line without > character is output from log_level() command
+ If you happen to find a rejected configuration with unhelpful or confusing error message, please report it as a bug.
-Following example demonstrates how to interactively list all currently loaded modules, and includes multi-line output:
-.. code-block:: lua
-
- > modules.list()
- {
- 'iterate',
- 'validate',
- 'cache',
- 'ta_update',
- 'ta_signal_query',
- 'policy',
- 'priming',
- 'detect_time_skew',
- 'detect_time_jump',
- 'ta_sentinel',
- 'edns_keepalive',
- 'refuse_nord',
- 'watchdog',
- }
-
-
-Before we dive into configuring features, let us explain modularization basics.
-
-.. include:: ../daemon/bindings/modules.rst
-
-Now you know what configuration file to modify, how to read examples and what modules are so you are ready for a real configuration work!
-
-.. _`Learn Lua in 15 minutes`: http://tylerneylon.com/a/learn-lua/
+.. tip::
+ An easy way to see the complete configuration structure is to look at the `JSON schema <https://json-schema.org/>`_ represention.
+ The raw JSON schema is available at `this link <_static/config.schema.json>`_ (valid only for the version of resolver this documentation was generated for).
+ For the schema readability, some graphical visualizer can be used, for example `this one <https://json-schema.app/>`_.
diff --git a/doc/config-schema.rst b/doc/config-schema.rst
new file mode 100644
index 00000000..769587d2
--- /dev/null
+++ b/doc/config-schema.rst
@@ -0,0 +1,42 @@
+Configuration schema
+====================
+
+
+The configuration schema describes the structure of accepted configuration files (or objects via the API). While originally specified in Python source code, it can be visualized as a `JSON schema <https://json-schema.org/>`_.
+
+Getting the JSON schema
+-----------------------
+
+1. The JSON schema can be obtained from a running Resolver by sending a HTTP GET request to the path ``/schema`` on the management socket (by default a Unix socket at ``/var/run/knot-resolver/manager.sock``).
+2. The ``kresctl schema`` command outputs the schema of the currently installed version as well. It does not require a running resolver.
+3. JSON schema for the most recent Knot Resolver version can be `downloaded here <_static/config.schema.json>`_.
+
+Validating you configuration
+----------------------------
+
+As mentioned above, the JSON schema is NOT used to validate the configuration in the Knot Resolver. It's the other way around, the validation process can generate JSON schema that can help you understand the configuration structure. Some validation steps are however dynamic (for example resolving of interface names) and can not be expressed using JSON schema and cannot be even completed without running full Resolver.
+
+.. note::
+ When using the API to change configuration in runtime, your change can be rejected by the validation step even though Knot Resolver would start just fine with the given changed configuration. Some validation steps within the Resolver are dynamic and they are dependent on both your previous configuration and the new one. For example, if you try to change the management socket, the validation will fail even though the new provided address is perfectly valid. Chaning the management socket while running is not supported.
+
+Most of the validation is however static and you can use the ``kresctl validate`` command to check your configuration file for most errors before actually running the Resolver.
+
+
+Interactive visualization
+-------------------------
+
+The following visualization is interactive and offers good overview of the configuration structure.
+
+.. raw:: html
+
+ <a href="_static/schema_doc.html" target="_blank">Open in a new tab.</a>
+ <iframe src="_static/schema_doc.html" width="100%" style="height: 30vh;"></iframe>
+
+
+Text-based configuration schema description
+-------------------------------------------
+
+Following, you can find the JSON schema flattened textual representation. It's not meant to be read top-to-bottom, however it can be used as a quick lookup reference.
+
+.. mdinclude:: config-schema-body.md
+
diff --git a/doc/deployment-advanced-no-manager.rst b/doc/deployment-advanced-no-manager.rst
new file mode 100644
index 00000000..31512fe2
--- /dev/null
+++ b/doc/deployment-advanced-no-manager.rst
@@ -0,0 +1,73 @@
+.. SPDX-License-Identifier: GPL-3.0-or-later
+
+
+.. include:: deployment-warning.rst
+
+.. _advanced-no-manager:
+
+*************************
+Usage without the manager
+*************************
+
+
+There are a few downsides to using the Knot Resolver without the manager:.
+
+* Configuration is a imperative Lua script and can't be properly validated without actually running it.
+* ``kresd`` is single-threaded so you need to manage multiple processes manually.
+* Restarts without downtime after configuration change are only your responsibility.
+
+
+.. _advanced-no-manager-startup:
+
+=======
+Startup
+=======
+
+The older way to start Knot Resolver is to run single instance of its resolving daemon manualy using ``kresd@`` systemd integration.
+The daemon is single thread process.
+
+.. code-block:: bash
+
+ $ sudo systemctl start kresd@1.service
+
+.. tip::
+
+ For more information about ``systemd`` integration see ``man kresd.systemd``.
+
+
+.. _advanced-no-manager-config:
+
+=============
+Configuration
+=============
+
+You can configure ``kresd`` by pasting your Lua code into ``/etc/knot-resolver/kresd.conf`` configuration script.
+The resolver's daemon is preconfigure to load this script when using ``kresd@`` systemd integration.
+
+.. note::
+
+ The configuration language is in fact Lua script, so you can use full power
+ of this programming language. See article
+ `Learn Lua in 15 minutes <http://tylerneylon.com/a/learn-lua/>`_ for a syntax overview.
+
+The first thing you need to configure are the network interfaces to listen to.
+
+The following example instructs the resolver to receive standard unencrypted DNS queries on IP addresses ``192.0.2.1`` and ``2001:db8::1``.
+Encrypted DNS queries are accepted using DNS-over-TLS protocol on all IP addresses configured on network interface ``eth0``, TCP port ``853``.
+
+.. code-block:: lua
+
+ -- unencrypted DNS on port 53 is default
+ net.listen('192.0.2.1')
+ net.listen('2001:db8::1')
+
+ net.listen(net.eth0, 853, { kind = 'tls' })
+
+
+Complete configurations files examples can be found `here <https://gitlab.nic.cz/knot/knot-resolver/tree/master/etc/config>`_.
+The example configuration files are also installed as documentation files, typically in directory ``/usr/share/doc/knot-resolver/examples/`` (their location may be different based on your Linux distribution).
+
+.. note::
+
+ When copy&pasting examples please pay close
+ attention to brackets and also line ordering - order of lines matters. \ No newline at end of file
diff --git a/doc/deployment-advanced.rst b/doc/deployment-advanced.rst
new file mode 100644
index 00000000..8b39b23f
--- /dev/null
+++ b/doc/deployment-advanced.rst
@@ -0,0 +1,9 @@
+********
+Advanced
+********
+
+.. toctree::
+ :maxdepth: 2
+
+ deployment-advanced-no-manager
+ deployment-no-systemd \ No newline at end of file
diff --git a/doc/deployment-docker.rst b/doc/deployment-docker.rst
new file mode 100644
index 00000000..787a9b7a
--- /dev/null
+++ b/doc/deployment-docker.rst
@@ -0,0 +1,21 @@
+******
+Docker
+******
+
+
+.. note::
+
+ Before version 6, our Docker images were not meant to be used in production. This is no longer the case and with the introduction of ``kres-manager``, Knot Resolver runs in containers without any issues.
+
+An official Docker image can be found on `Docker Hub <https://hub.docker.com/r/cznic/knot-resolver>`_. The image contains Knot Resolver as if it was installed from our official distro packages.
+
+.. code-block:: bash
+
+ docker run --rm -ti -P docker.io/cznic/knot-resolver
+
+The configuration file is located at ``/etc/knot-resolver/config.yml`` and the cache is at ``/var/cache/knot-resolver``. We recommend configuring a persistent cache across container restarts.
+
+.. warning::
+
+ While the container image contains normal installation of Knot Resolver and there shouldn't be any differences between running it natively and in a container, we (the developers) do not have any experience using the Docker image in production. Especially, beware of running the DNS resolver with a software defined network (i.e. in Kubernetes). There will likely be some performance penalties for doing so. We haven't done any measurements comparing different types of installations so we don't know the performance differences. If you have done some measurements yourself, please reach out to us and we will share it here with everyone else.
+ \ No newline at end of file
diff --git a/doc/deployment-manual.rst b/doc/deployment-manual.rst
new file mode 100644
index 00000000..d53a1692
--- /dev/null
+++ b/doc/deployment-manual.rst
@@ -0,0 +1,15 @@
+******
+Manual
+******
+
+The Knot Resolver can be started with the command ``knot-resolver``. You can provide an optional argument ``--config path/to/config.yml`` to load a different than default configuration file.
+
+The resolver does not have any external runtime dependencies and it should be able to run in most environments. It should be possible to wrap it with any container technology.
+
+
+Multiple instances on a single server
+=====================================
+
+The only limitation for running multiple instances of Knot Resolver is that all instances must have a different runtime directory. There are however safeguards in place that should prevent accidental runtime directory conflicts.
+
+It is possible to share cache between multiple instances, just make sure that all instances have the same cache config and there is only a single garbage collector running (disable it in all but one config file). \ No newline at end of file
diff --git a/doc/config-no-systemd-privileges.rst b/doc/deployment-no-systemd-privileges.rst
index e2c2ab9c..78e11444 100644
--- a/doc/config-no-systemd-privileges.rst
+++ b/doc/deployment-no-systemd-privileges.rst
@@ -1,5 +1,7 @@
.. SPDX-License-Identifier: GPL-3.0-or-later
+.. include:: deployment-warning.rst
+
Privileges and capabilities
===========================
diff --git a/doc/config-no-systemd-processes.rst b/doc/deployment-no-systemd-processes.rst
index 59aed1b0..362e8546 100644
--- a/doc/config-no-systemd-processes.rst
+++ b/doc/deployment-no-systemd-processes.rst
@@ -1,5 +1,7 @@
.. SPDX-License-Identifier: GPL-3.0-or-later
+.. include:: deployment-warning.rst
+
Process management
==================
diff --git a/doc/config-no-systemd.rst b/doc/deployment-no-systemd.rst
index a8cbb096..6cb2bb36 100644
--- a/doc/config-no-systemd.rst
+++ b/doc/deployment-no-systemd.rst
@@ -2,15 +2,17 @@
.. _usage-without-systemd:
-*********************
-Usage without systemd
-*********************
+.. include:: deployment-warning.rst
+
+*****************************************
+Usage without systemd and without manager
+*****************************************
.. tip:: Our upstream packages use systemd integration, which is the recommended
way to run kresd. This section is only relevant if you choose to use kresd
without systemd integration.
-Knot Resolver is designed to be a single process without the use of threads.
+``kresd`` is designed to be a single process without the use of threads.
While the cache is shared, the individual processes are independent. This
approach has several benefits, but it also comes with a few downsides, in
particular:
@@ -31,7 +33,7 @@ mind when configuring and running kresd without systemd integration.
.. toctree::
:maxdepth: 2
- config-no-systemd-processes
- config-no-systemd-privileges
+ deployment-no-systemd-processes
+ deployment-no-systemd-privileges
.. _`#529`: https://gitlab.nic.cz/knot/knot-resolver/issues/529
diff --git a/doc/deployment-systemd.rst b/doc/deployment-systemd.rst
new file mode 100644
index 00000000..3b6c3c69
--- /dev/null
+++ b/doc/deployment-systemd.rst
@@ -0,0 +1,19 @@
+*******
+Systemd
+*******
+
+In the default installation, Knot Resolver contains systemd integration and starting it on such system usually involves only one command.
+
+.. code-block::
+
+ systemctl enable --now knot-resolver.service
+
+
+If you don't have systemd service file for Knot Resolver already installed in your system, you can create one manually with the folling content:
+
+
+.. literalinclude:: ../systemd/knot-resolver.service.in
+
+.. note::
+
+ Replace words surrounded by ``@`` to some real values (i.e. ``@user@`` to a user you want Knot Resolver to run as). \ No newline at end of file
diff --git a/doc/deployment-warning.rst b/doc/deployment-warning.rst
new file mode 100644
index 00000000..117d388c
--- /dev/null
+++ b/doc/deployment-warning.rst
@@ -0,0 +1,10 @@
+.. warning::
+
+ This page is intended for experienced users only. If you follow these
+ instructions, you are not protected from footguns elimited with the
+ introduction of the ``kres-manager``. However, if you want to continue
+ using Knot Resolver the same as before the version ``6.0.0`` this is a chapter
+ for you.
+
+ For new and less experienced users, we recommend using the newer approach
+ starting in the :ref:`Getting Started <gettingstarted-startup>` chapter. \ No newline at end of file
diff --git a/doc/gettingstarted-config.rst b/doc/gettingstarted-config.rst
new file mode 100644
index 00000000..8e81ffbd
--- /dev/null
+++ b/doc/gettingstarted-config.rst
@@ -0,0 +1,263 @@
+.. SPDX-License-Identifier: GPL-3.0-or-later
+
+.. _gettingstarted-config:
+
+*************
+Configuration
+*************
+
+Easiest way to configure Knot Resolver is to put YAML configuration in ``/etc/knot-resolver/config.yml`` file.
+
+You can start exploring the configuration by continuing in this chapter or look at the complete :ref:`configuration <configuration-chapter>` documentation.
+
+.. contents::
+ :depth: 1
+ :local:
+
+Complete examples of configuration files can be found `here <https://gitlab.nic.cz/knot/knot-resolver/tree/master/etc/config>`_.
+Examples are also installed as documentation files, typically in ``/usr/share/doc/knot-resolver/examples/`` directory (location may be different based on your Linux distribution).
+
+.. tip::
+
+ You can use :ref:`kresctl <manager-client>` utility to **validate** your configuration before pushing it into the running resolver.
+ It should help prevent many typos in the configuration.
+
+ .. code-block::
+
+ $ kresctl validate /etc/knot-resolver/config.yml
+
+If you update the configuration file while Knot Resolver is running, you can force the resolver to **reload** it by invoking a ``systemd`` reload command.
+
+.. code-block::
+
+ $ systemctl reload knot-resolver.service
+
+.. note::
+
+ **Reloading configuration** can fail even when your configuration is valid, because some options cannot be changed while running. You can always find an explanation of the error in the log accesed by the ``journalctl -eu knot-resolver`` command.
+
+===============================
+Listening on network interfaces
+===============================
+
+The first thing you will probably want to configure are the network interfaces to listen to.
+The following example instructs the resolver to receive standard unencrypted DNS queries on ``192.0.2.1`` and ``2001:db8::1`` IP addresses.
+Encrypted DNS queries using ``DNS-over-TLS`` protocol are accepted on all IP addresses of ``eth0`` network interface, TCP port ``853``.
+
+.. code-block:: yaml
+
+ network:
+ listen:
+ - interface: ['192.0.2.1', '2001:db8::1'] # port 53 is default
+ - interface: 'eth0'
+ port: 853
+ kind: 'dot' # DNS-over-TLS
+
+For more details look at the :ref:`network configuration <config-network>`.
+
+.. warning::
+
+ On machines with multiple IP addresses on the same interface avoid listening on wildcards ``0.0.0.0`` or ``::``.
+ Knot Resolver could answer from different IP addresses if the network address ranges overlap, and clients would refuse such a response.
+
+
+.. _examle-internal:
+
+==========================
+Example: Internal Resolver
+==========================
+
+This is an example of typical configuration for company-internal resolver which is not accessible from outside of company network.
+
+^^^^^^^^^^^^^^^^^^^^^
+Internal-only domains
+^^^^^^^^^^^^^^^^^^^^^
+
+An internal-only domain is a domain not accessible from the public Internet.
+In order to resolve internal-only domains a query policy has to be added to forward queries to a correct internal server.
+This configuration will forward two listed domains to a DNS server with IP address ``192.0.2.44``.
+
+.. code-block:: yaml
+
+ policy:
+
+
+.. .. code-block:: lua
+
+.. -- define list of internal-only domains
+.. internalDomains = policy.todnames({'company.example', 'internal.example'})
+
+.. -- forward all queries belonging to domains in the list above to IP address '192.0.2.44'
+.. policy.add(policy.suffix(policy.FLAGS({'NO_CACHE'}), internalDomains))
+.. policy.add(policy.suffix(policy.STUB({'192.0.2.44'}), internalDomains))
+
+See chapter :ref:`dns-graft` for more details.
+
+
+.. _examle-isp:
+
+=====================
+Example: ISP Resolver
+=====================
+
+The following configuration is typical for Internet Service Providers who offer DNS resolver
+service to their own clients in their own network. Please note that running a *public DNS resolver*
+is more complicated and not covered by this example.
+
+^^^^^^^^^^^^^^^^^^^^^^
+Limiting client access
+^^^^^^^^^^^^^^^^^^^^^^
+
+With exception of public resolvers, a DNS resolver should resolve only queries sent by clients in its own network. This restriction limits attack surface on the resolver itself and also for the rest of the Internet.
+
+In a situation where access to DNS resolver is not limited using IP firewall, you can implement access restrictions which combines query source information with :ref:`policy rules <mod-policy>`.
+Following configuration allows only queries from clients in subnet ``192.0.2.0/24`` and refuses all the rest.
+
+.. code-block:: yaml
+
+ view:
+
+ policy:
+
+.. .. code-block:: lua
+
+.. modules.load('view')
+
+.. -- whitelist queries identified by subnet
+.. view:addr('192.0.2.0/24', policy.all(policy.PASS))
+
+.. -- drop everything that hasn't matched
+.. view:addr('0.0.0.0/0', policy.all(policy.DROP))
+
+^^^^^^^^^^^^^^^^^^^^^^^^
+TLS server configuration
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+Today clients are demanding secure transport for DNS queries between client machine and DNS resolver.
+The recommended way to achieve this is to start DNS-over-TLS server and accept also encrypted queries.
+
+First step is to enable TLS on listening interfaces:
+
+.. code-block:: yaml
+
+ network:
+ listen:
+ - interface: ['192.0.2.1', '2001:db8::1']
+ kind: 'dot' # DNS-over-TLS, port 853 is default
+
+By default a self-signed certificate is generated.
+Second step is then obtaining and configuring your own TLS certificates signed by a trusted CA.
+Once the certificate was obtained a path to certificate files can be specified:
+
+.. code-block:: yaml
+
+ network:
+ tls:
+ cert-file: '/etc/knot-resolver/server-cert.pem'
+ key-file: '/etc/knot-resolver/server-key.pem'
+
+^^^^^^^^^^^^^^^^^^^^^^^^^
+Mandatory domain blocking
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Some jurisdictions mandate blocking access to certain domains.
+This can be achieved using following :ref:`policy rule <mod-policy>`:
+
+.. code-block:: yaml
+
+ policy:
+
+.. .. code-block:: lua
+
+.. policy.add(
+.. policy.suffix(policy.DENY,
+.. policy.todnames({'example.com.', 'blocked.example.net.'})))
+
+
+.. _examle-personal:
+
+==========================
+Example: Personal Resolver
+==========================
+
+DNS queries can be used to gather data about user behavior.
+Knot Resolver can be configured to forward DNS queries elsewhere,
+and to protect them from eavesdropping by TLS encryption.
+
+.. warning::
+
+ Latest research has proven that encrypting DNS traffic is not sufficient to protect privacy of users.
+ For this reason we recommend all users to use full VPN instead of encrypting *just* DNS queries.
+ Following configuration is provided **only for users who cannot encrypt all their traffic**.
+ For more information please see following articles:
+
+ - Simran Patil and Nikita Borisov. 2019. What can you learn from an IP? (`slides <https://irtf.org/anrw/2019/slides-anrw19-final44.pdf>`_, `the article itself <https://dl.acm.org/authorize?N687437>`_)
+ - `Bert Hubert. 2019. Centralised DoH is bad for Privacy, in 2019 and beyond <https://labs.ripe.net/Members/bert_hubert/centralised-doh-is-bad-for-privacy-in-2019-and-beyond>`_
+
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Forwarding over TLS protocol (DNS-over-TLS)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Forwarding over TLS protocol protects DNS queries sent out by resolver.
+It can be configured using :ref:`TLS forwarding <tls-forwarding>` which provides methods for authentication.
+.. It can be configured using :ref:`policy.TLS_FORWARD <tls-forwarding>` which provides methods for authentication.
+See list of `DNS Privacy Test Servers`_ supporting DNS-over-TLS to test your configuration.
+
+Read more on :ref:`tls-forwarding`.
+
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Forwarding to multiple targets
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+With the use of slice function, it is possible to split the
+.. With the use of :any:`policy.slice` function, it is possible to split the
+entire DNS namespace into distinct "slices". When used in conjunction with
+:ref:`TLS forwarding <tls-forwarding>`, it's possible to forward different queries to different
+.. :ref:`policy.TLS_FORWARD <tls-forwarding>`, it's possible to forward different queries to different
+remote resolvers. As a result no single remote resolver will get complete list
+of all queries performed by this client.
+
+.. warning::
+
+ Beware that this method has not been scientifically tested and there might be
+ types of attacks which will allow remote resolvers to infer more information about the client.
+ Again: If possible encrypt **all** your traffic and not just DNS queries!
+
+.. code-block:: yaml
+
+ policy:
+ # TODO
+
+.. .. code-block:: lua
+
+.. policy.add(policy.slice(
+.. policy.slice_randomize_psl(),
+.. policy.TLS_FORWARD({{'192.0.2.1', hostname='res.example.com'}}),
+.. policy.TLS_FORWARD({
+.. -- multiple servers can be specified for a single slice
+.. -- the one with lowest round-trip time will be used
+.. {'193.17.47.1', hostname='odvr.nic.cz'},
+.. {'185.43.135.1', hostname='odvr.nic.cz'},
+.. })
+.. ))
+
+^^^^^^^^^^^^^^^^^^^^
+Non-persistent cache
+^^^^^^^^^^^^^^^^^^^^
+
+Knot Resolver's cache contains data clients queried for.
+If you are concerned about attackers who are able to get access to your
+computer system in power-off state and your storage device is not secured by
+encryption you can move the cache to tmpfs_.
+See chapter :ref:`cache_persistence`.
+
+.. .. raw:: html
+
+.. <h2>Next steps</h2>
+
+.. Congratulations! Your resolver is now up and running and ready for queries. For
+.. serious deployments do not forget to read :ref:`configuration-chapter` and
+.. :ref:`operation-chapter` chapters.
+
+.. _`DNS Privacy Test Servers`: https://dnsprivacy.org/wiki/display/DP/DNS+Privacy+Test+Servers
+.. _tmpfs: https://en.wikipedia.org/wiki/Tmpfs \ No newline at end of file
diff --git a/doc/gettingstarted-install.rst b/doc/gettingstarted-install.rst
new file mode 100644
index 00000000..9c4c434a
--- /dev/null
+++ b/doc/gettingstarted-install.rst
@@ -0,0 +1,43 @@
+.. SPDX-License-Identifier: GPL-3.0-or-later
+
+.. _gettingstarted-install:
+
+************
+Installation
+************
+
+As a first step, configure your system to use upstream repositories which have
+the **latest version** of Knot Resolver. Follow the instructions below for your
+distribution.
+
+.. note:: Please note that the packages available in distribution repositories of Debian and Ubuntu are outdated. Make sure to follow these steps to use our upstream repositories.
+
+.. tabs::
+
+ .. code-tab:: bash Debian/Ubuntu
+
+ $ wget https://secure.nic.cz/files/knot-resolver/knot-resolver-release.deb
+ $ sudo dpkg -i knot-resolver-release.deb
+ $ sudo apt update
+ $ sudo apt install -y knot-resolver
+
+ .. code-tab:: bash CentOS 7+
+
+ $ sudo yum install -y epel-release
+ $ sudo yum install -y knot-resolver
+
+ .. code-tab:: bash Fedora
+
+ $ sudo dnf install -y knot-resolver
+
+ .. code-tab:: bash Arch Linux
+
+ $ sudo pacman -S knot-resolver
+
+**openSUSE Leap/Tumbleweed**
+
+Add the `OBS <https://en.opensuse.org/Portal:Build_Service>`_ package repository `home:CZ-NIC:knot-resolver-latest <https://software.opensuse.org/download.html?project=home%3ACZ-NIC%3Aknot-resolver-latest&package=knot-resolver>`_ to your system.
+
+.. note::
+
+ If for some reason you need to **install Knot Resolver from source**, check out :ref:`building from sources <build>` documentation for developers.
diff --git a/doc/quickstart-startup.rst b/doc/gettingstarted-startup.rst
index 5a381a3b..7593aa14 100644
--- a/doc/quickstart-startup.rst
+++ b/doc/gettingstarted-startup.rst
@@ -1,27 +1,37 @@
.. SPDX-License-Identifier: GPL-3.0-or-later
-.. _quickstart-startup:
+.. _gettingstarted-startup:
*******
Startup
*******
-The simplest way to run single instance of
-Knot Resolver is to use provided Knot Resolver's Systemd integration:
+The main way to run Knot Resolver is to use provided integration with ``systemd``.
.. code-block:: bash
- $ sudo systemctl start kresd@1.service
+ $ sudo systemctl start knot-resolver.service
-See logs and status of running instance with ``systemctl status kresd@1.service`` command. For more information about Systemd integration see ``man kresd.systemd``.
+See logs and status of running instance with ``systemctl status knot-resolver.service`` command.
+For more information about systemd integration see ``man knot-resolver.systemd``.
.. warning::
- ``kresd@*.service`` is not enabled by default, thus Knot Resolver won't start automatically after reboot.
- To start and enable service in one command use ``systemctl enable --now kresd@1.service``
+ ``knot-resolver.service`` is not enabled by default, thus Knot Resolver won't start automatically after reboot.
+ To start and enable service in one command use ``systemctl enable --now knot-resolver.service``
+Unfortunately, for some cases (typically Docker and minimalistic systems), ``systemd`` is not available, therefore it is not possible to use ``knot-resolver.service``.
+If you have this problem, look at :ref:`usage without systemd <config-no-systemd>` section.
+
+.. note::
+
+ If for some reason you need to use Knot Resolver as it was before version 6, check out :ref:`usage without the manager <advanced-no-manager>`
+ Otherwise, it is recommended to stick to this chapter.
+
+===============
First DNS query
===============
+
After installation and first startup, Knot Resolver's default configuration accepts queries on loopback interface. This allows you to test that the installation and service startup were successful before continuing with configuration.
For instance, you can use DNS lookup utility ``kdig`` to send DNS queries. The ``kdig`` command is provided by following packages:
diff --git a/doc/index.rst b/doc/index.rst
index f5d9d423..255a7603 100644
--- a/doc/index.rst
+++ b/doc/index.rst
@@ -4,54 +4,70 @@
Knot Resolver
#############
-Knot Resolver is a minimalistic implementation of a caching validating DNS resolver.
-Modular architecture keeps the core tiny and efficient,
-and it provides a state-machine like API for extensions.
+Welcome to Knot Resolver's documentation!
+Knot Resolver is an opensource implementation of a caching validating DNS resolver.
+Modular architecture keeps the core tiny and efficient, and it also provides a state-machine like API for extensions.
+
+If you are a new user, please start with chapter for :ref:`getting started <gettingstarted>`.
.. toctree::
- :caption: Quick Start
- :name: quickstart
+ :caption: Getting Started
+ :name: gettingstarted-chapter
:maxdepth: 1
- quickstart-install
- quickstart-startup
- quickstart-config
-
-.. _configuration-chapter:
+ gettingstarted-install
+ gettingstarted-startup
+ gettingstarted-config
.. toctree::
:caption: Configuration
- :name: users
+ :name: configuration-chapter
:maxdepth: 3
config-overview
- config-network
- config-performance
- config-policy
- config-logging-monitoring
- config-dnssec
- config-experimental
- config-no-systemd
+ config-schema
+ usecase-network-interfaces
+ config-lua
+
+.. toctree::
+ :caption: Deployment
+ :name: deployment-chapter
+ :maxdepth: 1
+
+ deployment-systemd
+ deployment-manual
+ deployment-docker
+ deployment-advanced
-.. _operation-chapter:
+.. toctree::
+ :caption: Management
+ :name: management-chapter
+ :maxdepth: 1
+
+ manager-api
+ manager-client
.. toctree::
- :caption: Operation
+ :caption: For operators
+ :name: operators-chapter
:maxdepth: 1
+ upgrading-to-6
upgrading
NEWS
+
.. toctree::
- :caption: Developers
- :name: developers
+ :caption: For developers
+ :name: developers-chapter
:maxdepth: 2
+ architecture
build
- modules-http-custom-services
lib
modules_api
worker_api
+ modules-http-custom-services
Indices and tables
diff --git a/doc/kresctl.8.in b/doc/kresctl.8.in
new file mode 100644
index 00000000..5d586500
--- /dev/null
+++ b/doc/kresctl.8.in
@@ -0,0 +1,67 @@
+.TH "kresctl" "8" "@date@" "CZ.NIC" "Knot Resolver @version@"
+.\"
+.\" kresctl.8 -- Knot Resolver control tool manpage
+.\"
+.\" Copyright (c) CZ.NIC. All rights reserved.
+.\"
+.\" SPDX-License-Identifier: GPL-3.0-or-later
+.\"
+.\"
+.SH "NAME"
+.B kresctl
+\- Control Knot Resolver @version@, the full caching DNSSEC-enabled resolver
+.SH "SYNOPSIS"
+.B kresctl
+.RB [ \-s | \-\-socket
+.IR API_SOCKET ]
+.IR <command>
+.IR <args>
+.SH "DESCRIPTION"
+.B \fIkresctl\fR is a control tool for Knot Resolver, a DNSSEC-enabled full caching resolver.
+.P
+
+\fBkresctl\fR may be used to control and inspect the configuration of running
+Knot Resolver via its HTTP API.
+
+Full documentation is available at
+\fIhttps://knot-resolver.readthedocs.io\fR or in package documentation
+(available as knot-resolver-doc package in most distributions.
+.SH OPTIONS
+The available options are:
+.TP
+.B \-s\fI <api_socket>\fR, \fB\-\-socket \fI<api_socket>
+
+Specify how to connect to a running Knot Resolver. Accepts path to Unix-domain
+socket or \fIhost:port\fR. Defaults to \fI/var/run/knot-resolver/manager.sock\fR
+
+Some commands do not require communication with the running resolver. In such
+cases, the value of this option is ignored and the command may succeed even
+if the socket is invalid.
+.B \-h, --help
+Print help message and exit.
+.SH COMMANDS
+.TP
+.B config [-d|--delete] [--stdin] \fIjson_ptr\fR \fI[new_value]\fR
+Get or modify resolver's runtime configuration.
+
+\fIjson_ptr\fR follows \fBRFC 6901\fR and specifies, which part of the configuration
+will the tool operate on. This argument is required. When not given any additional
+options or arguments, prints the current configured value. Equivalent to GET method
+in the underlying HTTP API.
+
+When \fB-d\fR or \fB--delete\fR is provided, \fBkresctl\fB tries to remove
+the runtime configuration subtree. The operation fails if it renders the
+configuration invalid. Equivalent to the DELETE method in the HTTP API.
+
+When \fB--stdin\fR or \fInew_value\fR are provided, the new value (from the
+argument or from reading stdin) is put into the configuration at the given
+\fIjson_ptr\fR. The operation fails if it renders the configuration invalid.
+Equivalent to PUT method in the underlying HTTP API.
+.TP
+.B stop
+Gracefully stops the running resolver.
+.SH "SEE ALSO"
+\fBkresd(8)\fR, @man_seealso_systemd@\fIhttps://knot-resolver.readthedocs.io/en/v@version@/\fR
+.SH "AUTHORS"
+.B kresd
+developers are mentioned in the AUTHORS file in the distribution.
diff --git a/doc/manager-api.rst b/doc/manager-api.rst
new file mode 100644
index 00000000..59271090
--- /dev/null
+++ b/doc/manager-api.rst
@@ -0,0 +1,111 @@
+.. SPDX-License-Identifier: GPL-3.0-or-later
+
+.. _manager-api:
+
+********
+HTTP API
+********
+
+===================
+Management HTTP API
+===================
+
+You can use HTTP API to dynamically change configuration of already running Knot Resolver.
+By default the API is configured as UNIX domain socket ``manager.sock`` located in the resolver's rundir (typically ``/run/knot-resolver/``).
+This socket is used by ``kresctl`` utility in default.
+
+The API setting can be changed only in ``/etc/knot-resolver/config.yml`` configuration file:
+
+.. code-block:: yaml
+
+ management:
+ interface: 127.0.0.1@5000
+ # or use unix socket instead of inteface
+ # unix-socket: /my/new/socket.sock
+
+First version of configuration API endpoint is available on ``/v1/config`` HTTP endpoint.
+Configuration API supports following HTTP request methods:
+
+================================ =========================
+HTTP request methods Operation
+================================ =========================
+**GET** ``/v1/config[/path]`` returns current configuration with an ETag
+**PUT** ``/v1/config[/path]`` upsert (try update, if does not exists, insert), appends to array
+**PATCH** ``/v1/config[/path]`` update property using `JSON Patch <https://jsonpatch.com/>`_
+**DELETE** ``/v1/config[/path]`` delete an existing property or list item at given index
+================================ =========================
+
+.. note::
+
+ Managemnet API has other useful endpoints (metrics, schema, ...), see the detailed :ref:`API documentation <manager-api>`.
+
+**path:**
+ Determines specific configuration option or configuration subtree on that path.
+ Items in lists and dictionaries are reachable using indexes ``/list-name/{index}/`` and keys ``/dict-name/{key}/``.
+
+**payload:**
+ JSON or YAML encoding is used for configuration payload.
+
+.. note::
+
+ Some configuration options cannot be configured via the API for stability and security reasons(e.g. API configuration itself).
+ In the case of an attempt to configure such an option, the operation is rejected.
+
+
+-----------------------------------
+
+
+
+
+===================================
+Dynamically changing configuration
+===================================
+
+Knot Resolver Manager is capable of dynamically changing its configuration via an HTTP API or by reloading its config file. Both methods are equivalent in terms of its capabilities. The ``kresctl`` utility uses the HTTP API and provides a convinient command line interface.
+
+Reloading configuration file
+============================
+
+To reload the configuration file, send the ``SIGHUP`` signal to the Manager process. The original configuration file will be read again, validated and in case of no errors, the changes will be applied.
+
+Note: You can also send ``SIGHUP`` to the top-level process, to the supervisord. Normally, supervisord would stop all processes and reload its configuration when it receives SIGHUP. However, we have eliminated this footgun in order to prevent anyone from accidentally shutting down the whole resolver. Instead, the signal is only forwarded to the Manager.
+
+
+HTTP API
+========
+
+Listen address
+--------------
+
+By default, the Manager exposes its HTTP API on a Unix socket at ``FIXME``. However, you can change where it listens by changing the ``management.interface`` config option. To use ``kresctl``, you have to tell it this value.
+
+
+List of API endpoints
+---------------------
+
+- ``GET /schema`` returns JSON schema of the configuration data model
+- ``GET /schema/ui`` redirect to an external website with the JSON schema visualization
+- ``GET /metrics`` provides Prometheus metrics
+- ``GET /`` static response that could be used to determine, whether the Manager is running
+- ``POST /stop`` gracefully stops the Manager, empty request body
+- ``{GET,PUT,DELETE,PATCH} /v1/config`` allows reading and modifying current configuration
+
+
+Config modification endpoint (v1)
+---------------------------------
+
+Note: The ``v1`` version qualifier is there for future-proofing. We don't have any plans at the moment to change the API any time soon. If that happens, we will support both old and new API versions for the some transition period.
+
+The API by default expects JSON, but can also parse YAML when the ``Content-Type`` header is set to ``application/yaml`` or ``text/vnd.yaml``. The return value is always a JSON with ``Content-Type: application/json``. The schema of input and output is always a subtree of the configuration data model which is described by the JSON schema exposed at ``/schema``.
+
+The API can operate on any configuration subtree by specifying a `JSON pointer <https://www.rfc-editor.org/rfc/rfc6901>`_ in the URL path (property names and list indices joined with ``/``). For example, to get the number of worker processes, you can send ``GET`` request to ``v1/config/workers``.
+
+The different HTTP methods perform different modifications of the configuration:
+
+- ``GET`` return subtree of the current configuration
+- ``PUT`` set property
+- ``DELETE`` removes the given property or list item at the given index
+- ``PATCH`` updates the configuration using `JSON Patch <https://jsonpatch.com/>`_
+
+To prevent race conditions when changing configuration from multiple clients simultaneously, every response from the Manager has an ``ETag`` header set. Requests then accept ``If-Match`` and ``If-None-Match`` headers with the latest ``ETag`` value and the corresponding request processing fails with HTTP error code 412 (precondition failed).
+
diff --git a/doc/manager-client.rst b/doc/manager-client.rst
new file mode 100644
index 00000000..1d202d8b
--- /dev/null
+++ b/doc/manager-client.rst
@@ -0,0 +1,202 @@
+.. SPDX-License-Identifier: GPL-3.0-or-later
+
+.. _manager-client:
+
+***************
+kresctl utility
+***************
+
+.. program:: kresctl
+
+Command-line utility that helps communicate with the :ref:`management API <manager-api>`.
+It also provides tooling to work with declarative configuration (:option:`validate`, :option:`convert`).
+
+.. option:: -h, --help
+
+ Shows help message.
+ It can be also used with every :ref:`command <manager-client-commands>` for its help message.
+
+
+================================
+Connecting to the management API
+================================
+
+Most :ref:`commands <manager-client-commands>` require connection to the :ref:`management API <manager-api>`.
+With default Knot Resolver configuration, ``kresctl`` should communicate with the resolver withou need to specify :option:`--socket` option.
+If not, this option must be set for each command.
+
+.. option:: -s <socket>, --socket <socket>
+
+ :default: "./manager.sock"
+
+ Optional, path to Unix-domain socket or network interface of the :ref:`management API <manager-api>`.
+
+.. code-block:: bash
+
+ $ kresctl --socket http://127.0.0.1@5000 {command} # network interface, port 5000
+ $ kresctl --socket /path/to/socket.sock {command} # unix-domain socket location
+
+.. _manager-client-commands:
+
+========
+Commands
+========
+
+The following possitional arguments determine what kind of command will be executed.
+Only one of these arguments can be selected during the execution of a single ``krestctl`` command.
+
+
+.. option:: config
+
+ Performs operations on the running resolver's configuration.
+ Requires connection to the management API.
+
+
+ **Operations:**
+
+ Use one of the following operations to be performed on the configuration.
+
+
+ .. option:: get
+
+ Get current configuration from the resolver.
+
+ .. option:: -p <path>, --path <path>
+
+ Optional, path (JSON pointer, RFC6901) to the configuration resources.
+ By default, the entire configuration is selected.
+
+ .. option:: --json, --yaml
+
+ :default: :option:`--json`
+
+ Get configuration data in JSON or YAML format.
+
+ .. option:: <file>
+
+ Optional, path to the file where to save exported configuration data.
+ If not specified, data will be printed.
+
+
+ .. option:: set
+
+ Set new configuration for the resolver.
+
+ .. option:: -p <path>, --path <path>
+
+ Optional, path (JSON pointer, RFC6901) to the configuration resources.
+ By default, the entire configuration is selected.
+
+ .. option:: --json, --yaml
+
+ :default: :option:`--json`
+
+ Set configuration data in JSON or YAML format.
+
+ .. option:: [ <file> | <value> ]
+
+ Optional, path to file with new configuraion or new configuration value.
+ If not specified, value will be readed from stdin.
+
+
+ .. option:: delete
+
+ Delete given configuration property or list item at the given index.
+
+ .. option:: -p <path>, --path <path>
+
+ Optional, path (JSON pointer, RFC6901) to the configuration resources.
+ By default, the entire configuration is selected.
+
+
+ This command reads current ``network`` configuration subtree from the resolver and exports it to file in YAML format.
+
+ .. code-block:: bash
+
+ $ kresctl config get --yaml -p /network ./network-config.yaml
+
+ Next command changes workers configuration to ``8``.
+
+ .. code-block:: bash
+
+ $ kresctl config set -p /workers 8
+
+.. option:: metrics
+
+ Reads agregated metrics data in Propmetheus format directly from the running resolver.
+ Requires connection to the management API.
+
+ .. option:: <file>
+
+ Optional, file where to export Prometheus metrics.
+ If not specified, the metrics are printed.
+
+ .. code-block:: bash
+
+ $ kresctl metrics ./metrics/data.txt
+
+
+.. option:: schema
+
+
+ Shows JSON-schema repersentation of the Knot Resolver's configuration.
+
+ .. option:: -l, --live
+
+ Get configuration JSON-schema from the running resolver.
+ Requires connection to the management API.
+
+ .. option:: <file>
+
+ Optional, file where to export JSON-schema.
+ If not specified, the JSON-schema is printed.
+
+ .. code-block:: bash
+
+ $ kresctl schema --live ./mydir/config-schema.json
+
+
+.. option:: validate
+
+ Validates configuration in JSON or YAML format.
+
+ .. option:: <input_file>
+
+ File with configuration in YAML or JSON format.
+
+ .. code-block:: bash
+
+ $ kresctl validate input-config.json
+
+
+.. option:: convert
+
+ Converts JSON or YAML configuration to Lua script.
+
+ .. option:: <input_file>
+
+ File with configuration in YAML or JSON format.
+
+ .. option:: <output_file>
+
+ Optional, output file for converted configuration in Lua script.
+ If not specified, converted configuration is printed.
+
+ .. code-block:: bash
+
+ $ kresctl convert input-config.yaml output-script.lua
+
+
+.. option:: reload
+
+ Tells the resolver to reload YAML configuration file.
+ Old processes are replaced by new ones (with updated configuration) using rolling restarts.
+ So there will be no DNS service unavailability during reload operation.
+ Requires connection to the management API.
+
+
+.. option:: stop
+
+ Tells the resolver to shutdown everthing.
+ No process will run after this command.
+ Requires connection to the management API.
diff --git a/doc/manager-dev.rst b/doc/manager-dev.rst
new file mode 100644
index 00000000..1e68a99b
--- /dev/null
+++ b/doc/manager-dev.rst
@@ -0,0 +1,115 @@
+.. SPDX-License-Identifier: GPL-3.0-or-later
+
+===========================
+Manager's development guide
+===========================
+
+In this guide, we will setup a development environment, discuss tooling and high-level code architecture.
+
+
+Development environment
+=======================
+
+The Manager is written in Python 3 with the goal of supporting multiple versions of Python available in current Linux distributions. For example, at the time of writing, this means we support Python 3.6 and newer. These compatibility requirements also force us not to rely heavily on modern runtime libraries such as Pydantic.
+
+Tools
+-----
+
+To start working on the Manager, you need to install the following tools:
+
+- Python, preferably the oldest supported version. You can use `pyenv <https://github.com/pyenv/pyenv>`_ to install and manage multiple Python versions on your system. Alternatively, some distros ship packages for older Python versions as well.
+- `Poetry <https://python-poetry.org/>`_. We use it to manage our dependencies and virtual environments.
+
+
+First run of the Manager from source
+------------------------------------
+
+1. clone `the Knot Resolver repository <https://gitlab.nic.cz/knot/knot-resolver>`_
+2. enter the directory ``manager/`` in the repository, all following tasks will be performed from within that directory
+3. run ``poetry env use $(which python3.6)`` to configure Poetry to use a different Python interpreter than the default
+4. run ``poetry install`` to install all dependencies into a newly created virtual environment
+5. run ``./poe run`` to run the Manager in dev mode (Ctrl+C to exit)
+
+Helper scripts
+--------------
+
+In the previous section, you saw the use of the ``./poe`` command. `PoeThePoet <https://github.com/nat-n/poethepoet>`_ is a task runner which we use to simplify invoking common commands. You can run it by invoking ``./poe``, or you can install it system-wide via ``pip install poethepoet`` and invoke it just by calling ``poe`` (without the leading ``./``). When invoked globally, you don't have to worry about virtual environments and such, PoeThePoet figures that out for you and commands always run in the appropriate virtual environment.
+
+To list the available commands, you can run ``poe help``. The most important ones for everyday development are:
+
+- ``poe run`` to compile ``kresd`` and run the Manager
+- ``poe run-debug`` same as ``run``, but also injects ``debugpy`` into the process to allow remote debugging on port 5678
+- ``poe kresctl`` to run the Manager's CLI tool
+- ``poe check`` to run static code analysis (enforced by our CI)
+- ``poe test`` to run unit tests (enforced by our CI)
+- ``poe format`` to autoformat the source code
+
+
+The commands are defined in the ``pyproject.toml`` file.
+
+
+Code editor
+-----------
+
+Feel free to use any text editor you like. However, we recommend using `Visual Studio Code <https://code.visualstudio.com/>`_ with `Pylance <https://marketplace.visualstudio.com/items?itemName=ms-python.vscode-pylance>`_ extension. That's what we use to work on the Manager and we know that it works really well for us. Just make sure to configure the extension so that it uses Poetry's virtual environment. We have a helper for that - ``poe config-vscode``, but your mileage may vary when using it.
+
+
+Code structure
+==============
+
+The Manager's code is split into several distinct logical components:
+
+- controllers
+ - the HTTP API server (*the server*, ``server.py``)
+ - high-level coordinator of ``kresd``'s (*the manager*, ``kres_manager.py``)
+ - subprocess controller for launching and stopping ``kresd`` processes (*the subprocess controller*, ``kresd_controller/``)
+- data
+ - schema validation and definition (*the datamodel*, ``datamodel/``)
+ - utilities, mainly general schema validation and parsing logic (*utils*, ``utils/``)
+- ``kresctl`` utility (*kresctl*, ``cli/``)
+
+When running, *the server* receives all inputs from the outside, passes them onto *the manager*, which applies the requested changes through the use of *the subprocess controller*. In all stages, we use *the datamodel* to pass current configuration around.
+
+
+The subprocess controllers
+--------------------------
+
+Internally, the subprocess controllers are hidden behind an interface and there can be multiple implementations. In practice, there is only one and that is `supervisord <http://supervisord.org>`_. Historically, we tried to support systemd as well, but due to privilege escalation issues, we started focusing only on supervisord.
+
+The supervisord subprocess controller actually extends supervisord with new functionality, especially it reimplements ``sd_notify`` semantics from systemd. Supervisord is extended through loading plugins, which in turn modify few internal components of supervisord. Due to the maturity of the supervisord project, we believe this will be reasonably stable even with updates for supervisord.
+
+We want to have the Manager restarted if it fails, so that one mishandled API request can't bring everything down. We want the subprocess controllers to control the execution of the Manager and restart it, if needed. Therefore, there is a circular dependency. To solve it, the subprocess controller implementations are allowed to ``exec()`` into anything else while starting. To give an example of how the startup works with supervisord:
+
+1. *the server* loads the config, initiates *the manager* and *the supervisord subprocess controller*
+2. *the supervisord subprocess controller* detects, that there is no supervisord running at the moment, generates new supervisord config and exec's supervisord
+3. supervisord starts, loads its config and starts *the server* again
+4. *the server* loads the config, initiates *the manager* and *the supervisord subprocess controller*
+5. *the supervisord subprocess controller* detects, that there is a supervisord instance running, generates new config for it and reloads it
+6. *the manager* starts new workers based on the initial configuration
+7. *the server* makes it's API available to use and the Manager is fully running
+
+
+Processing of config change requests
+------------------------------------
+
+1. a change request is received by *the server*
+2. the raw text input is parsed and verified into a configuration object using *the datamodel*
+3. *the manager* is asked to apply new configuration
+4. *the manager* starts a canary process with the new config (Lua config generated from the configration object), monitoring for failures
+5. *the manager* restarts all ``kresd`` instances one by one
+6. *the server* returns a success
+
+
+Packaging
+=========
+
+Packaging is handled by `apkg <https://apkg.readthedocs.io/en/latest/>`_ cooperating with Poetry. To allow for backwards compatibility with Python tooling not supporting `PEP-517 <https://peps.python.org/pep-0517/>`_, we generate ``setup.py`` file with the command ``poe gen-setuppy``, so our project is compatible with ``setuptools`` as well.
+
+
+Testing
+=======
+
+The manager has two suits of tests - unit tests and packaging tests, all residing in the ``manager/tests/`` directory. The units tests are run by `pytest <https://docs.pytest.org/>`_, while the packaging tests are distro specific and are using `apkg test <https://apkg.readthedocs.io/en/latest/commands/#test>`_.
+
+
+
diff --git a/doc/manager-kresctl.rst b/doc/manager-kresctl.rst
new file mode 100644
index 00000000..a5c2e143
--- /dev/null
+++ b/doc/manager-kresctl.rst
@@ -0,0 +1,20 @@
+===============
+kresctl utility
+===============
+
+This command-line utility allows you to configure and control running Knot Resolver.
+For that it uses the above mentioned HTTP API.
+
+For example, folowing command changes the number of ``kresd`` workers to 4.
+
+.. code-block::
+
+ $ kresctl config /workers 4
+
+The utility can also help with configuration **validation** and with configuration format **conversion**.
+For more information read full :ref:`kresctl documentation <manager-client>` or use ``kresctl --help`` command.
+
+.. note::
+
+ With no changes in management configuration, ``kresctl`` should work out of the box.
+ In other case there is ``-s`` argument to specify path to HTTP API endpoint. \ No newline at end of file
diff --git a/doc/manager-no-systemd.rst b/doc/manager-no-systemd.rst
new file mode 100644
index 00000000..23f6465e
--- /dev/null
+++ b/doc/manager-no-systemd.rst
@@ -0,0 +1,7 @@
+.. SPDX-License-Identifier: GPL-3.0-or-later
+
+.. _manager-no-systemd:
+
+*********************
+Usage without systemd
+*********************
diff --git a/doc/meson.build b/doc/meson.build
index 72a8a7b0..494985ce 100644
--- a/doc/meson.build
+++ b/doc/meson.build
@@ -18,6 +18,12 @@ man_kresd = configure_file(
)
install_man(man_kresd)
+man_kresctl = configure_file(
+ input: 'kresctl.8.in',
+ output: 'kresctl.8',
+ configuration: man_config,
+)
+install_man(man_kresctl)
# html and info documentation
if get_option('doc') == 'enabled'
@@ -32,22 +38,35 @@ if get_option('doc') == 'enabled'
# python dependencies: breathe, sphinx_rtd_theme
python_breathe = run_command('python3', '-c', 'import breathe', check: false)
if python_breathe.returncode() != 0
- # some distros might use python2 sphinx
python_breathe = run_command('python2', '-c', 'import breathe', check: false)
if python_breathe.returncode() != 0
error('missing doc dependency: python breathe')
- else
- python_sphinx_rtd_theme = run_command('python2', '-c', 'import sphinx_rtd_theme', check: false)
- if python_sphinx_rtd_theme.returncode() != 0
- error('missing doc dependency: python sphinx_rtd_theme')
- endif
endif
+ python = 'python2'
else
- python_sphinx_rtd_theme = run_command('python3', '-c', 'import sphinx_rtd_theme', check: false)
- if python_sphinx_rtd_theme.returncode() != 0
- error('missing doc dependency: python sphinx_rtd_theme')
- endif
+ python = 'python3'
+ endif
+
+ python_sphinx_rtd_theme = run_command(python, '-c', 'import sphinx_rtd_theme', check: false)
+ if python_sphinx_rtd_theme.returncode() != 0
+ error('missing doc dependency: python sphinx_rtd_theme')
+ endif
+
+ python_sphinx_tabs = run_command(python, '-c', 'import sphinx_tabs', check: false)
+ if python_sphinx_tabs.returncode() != 0
+ error('missing doc dependency: python python-sphinx-tabs')
endif
+
+ jsonschema2md = find_program('jsonschema2md')
+ if not jsonschema2md.found()
+ error('missing doc dependency: python jsonschema2md')
+ endif
+
+ jsonschemaforhumans = run_command(python, '-c', 'import json_schema_for_humans', check: false)
+ if jsonschemaforhumans.returncode() != 0
+ error('missing doc dependency: python json-schema-for-humans')
+ endif
+
message('------------------------')
# install html docs
@@ -66,14 +85,15 @@ if get_option('doc') == 'enabled'
endif
endif
+
make_doc = find_program('../scripts/make-doc.sh')
run_target(
'doc',
- command: make_doc,
+ command: make_doc
)
run_target(
'doc-strict',
- command: [make_doc, '-W'],
+ command: [make_doc, '-W']
)
diff --git a/doc/quickstart-config.rst b/doc/quickstart-config.rst
deleted file mode 100644
index df0fed4e..00000000
--- a/doc/quickstart-config.rst
+++ /dev/null
@@ -1,209 +0,0 @@
-.. SPDX-License-Identifier: GPL-3.0-or-later
-
-.. _quickstart-config:
-
-*************
-Configuration
-*************
-
-.. contents::
- :depth: 1
- :local:
-
-.. note::
-
- When copy&pasting examples from this manual please pay close
- attention to brackets and also line ordering - order of lines matters.
-
- The configuration language is in fact Lua script, so you can use full power
- of this programming language. See article
- `Learn Lua in 15 minutes`_ for a syntax overview.
-
-Easiest way to configure Knot Resolver is to paste your configuration into
-configuration file ``/etc/knot-resolver/kresd.conf``.
-Complete configurations files for examples in this chapter
-can be found `here <https://gitlab.nic.cz/knot/knot-resolver/tree/master/etc/config>`_.
-The example configuration files are also installed as documentation files, typically in directory ``/usr/share/doc/knot-resolver/examples/`` (their location may be different based on your Linux distribution).
-Detailed configuration of daemon and implemented modules can be found in configuration reference:
-
-
-Listening on network interfaces
-===============================
-
-Network interfaces to listen on and supported protocols are configured using :func:`net.listen()` function.
-
-The following configuration instructs Knot Resolver to receive standard unencrypted DNS queries on IP addresses `192.0.2.1` and `2001:db8::1`. Encrypted DNS queries are accepted using DNS-over-TLS protocol on all IP addresses configured on network interface `eth0`, TCP port 853.
-
-.. code-block:: lua
-
- -- unencrypted DNS on port 53 is default
- net.listen('192.0.2.1')
- net.listen('2001:db8::1')
- net.listen(net.eth0, 853, { kind = 'tls' })
-
-.. warning::
-
- On machines with multiple IP addresses on the same interface avoid listening on wildcards ``0.0.0.0`` or ``::``.
- Knot Resolver could answer from different IP addresses if the network address ranges
- overlap, and clients would refuse such a response.
-
-
-Scenario: Internal Resolver
-===========================
-
-This is an example of typical configuration for company-internal resolver which is not accessible from outside of company network.
-
-Internal-only domains
-^^^^^^^^^^^^^^^^^^^^^
-
-An internal-only domain is a domain not accessible from the public Internet.
-In order to resolve internal-only domains a query policy has to be added to forward queries to a correct internal server.
-This configuration will forward two listed domains to a DNS server with IP address ``192.0.2.44``.
-
-.. code-block:: lua
-
- -- define list of internal-only domains
- internalDomains = policy.todnames({'company.example', 'internal.example'})
-
- -- forward all queries belonging to domains in the list above to IP address '192.0.2.44'
- policy.add(policy.suffix(policy.FLAGS({'NO_CACHE'}), internalDomains))
- policy.add(policy.suffix(policy.STUB({'192.0.2.44'}), internalDomains))
-
-See chapter :ref:`dns-graft` for more details.
-
-
-.. _ispresolver:
-
-Scenario: ISP Resolver
-======================
-
-The following configuration is typical for Internet Service Providers who offer DNS resolver
-service to their own clients in their own network. Please note that running a *public DNS resolver*
-is more complicated and not covered by this quick start guide.
-
-Limiting client access
-^^^^^^^^^^^^^^^^^^^^^^
-With exception of public resolvers, a DNS resolver should resolve only queries sent by clients in its own network. This restriction limits attack surface on the resolver itself and also for the rest of the Internet.
-
-In a situation where access to DNS resolver is not limited using IP firewall, you can implement access restrictions using the :ref:`view module <mod-view>` which combines query source information with :ref:`policy rules <mod-policy>`.
-Following configuration allows only queries from clients in subnet 192.0.2.0/24 and refuses all the rest.
-
-.. code-block:: lua
-
- modules.load('view')
-
- -- whitelist queries identified by subnet
- view:addr('192.0.2.0/24', policy.all(policy.PASS))
-
- -- drop everything that hasn't matched
- view:addr('0.0.0.0/0', policy.all(policy.DROP))
-
-TLS server configuration
-^^^^^^^^^^^^^^^^^^^^^^^^
-Today clients are demanding secure transport for DNS queries between client machine and DNS resolver. The recommended way to achieve this is to start DNS-over-TLS server and accept also encrypted queries.
-
-First step is to enable TLS on listening interfaces:
-
-.. code-block:: lua
-
- net.listen('192.0.2.1', 853, { kind = 'tls' })
- net.listen('2001::db8:1', 853, { kind = 'tls' })
-
-By default a self-signed certificate is generated.
-Second step is then obtaining and configuring your own TLS certificates
-signed by a trusted CA. Once the certificate was obtained a path to certificate files can be specified using function :func:`net.tls()`:
-
-.. code-block:: lua
-
- net.tls("/etc/knot-resolver/server-cert.pem", "/etc/knot-resolver/server-key.pem")
-
-
-Mandatory domain blocking
-^^^^^^^^^^^^^^^^^^^^^^^^^
-
-Some jurisdictions mandate blocking access to certain domains. This can be achieved using following :ref:`policy rule <mod-policy>`:
-
-.. code-block:: lua
-
- policy.add(
- policy.suffix(policy.DENY,
- policy.todnames({'example.com.', 'blocked.example.net.'})))
-
-
-
-.. _personalresolver:
-
-Scenario: Personal Resolver
-===========================
-
-DNS queries can be used to gather data about user behavior.
-Knot Resolver can be configured to forward DNS queries elsewhere,
-and to protect them from eavesdropping by TLS encryption.
-
-.. warning::
-
- Latest research has proven that encrypting DNS traffic is not sufficient to protect privacy of users.
- For this reason we recommend all users to use full VPN instead of encrypting *just* DNS queries.
- Following configuration is provided **only for users who cannot encrypt all their traffic**.
- For more information please see following articles:
-
- - Simran Patil and Nikita Borisov. 2019. What can you learn from an IP? (`slides <https://irtf.org/anrw/2019/slides-anrw19-final44.pdf>`_, `the article itself <https://dl.acm.org/authorize?N687437>`_)
- - `Bert Hubert. 2019. Centralised DoH is bad for Privacy, in 2019 and beyond <https://labs.ripe.net/Members/bert_hubert/centralised-doh-is-bad-for-privacy-in-2019-and-beyond>`_
-
-
-Forwarding over TLS protocol (DNS-over-TLS)
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-Forwarding over TLS protocol protects DNS queries sent out by resolver.
-It can be configured using :ref:`policy.TLS_FORWARD <tls-forwarding>` function which provides methods for authentication.
-See list of `DNS Privacy Test Servers`_ supporting DNS-over-TLS to test your configuration.
-
-Read more on :ref:`tls-forwarding`.
-
-
-Forwarding to multiple targets
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-With the use of :any:`policy.slice` function, it is possible to split the
-entire DNS namespace into distinct "slices". When used in conjunction with
-:ref:`policy.TLS_FORWARD <tls-forwarding>`, it's possible to forward different queries to different
-remote resolvers. As a result no single remote resolver will get complete list
-of all queries performed by this client.
-
-.. warning::
-
- Beware that this method has not been scientifically tested and there might be
- types of attacks which will allow remote resolvers to infer more information about the client.
- Again: If possible encrypt **all** your traffic and not just DNS queries!
-
-.. code-block:: lua
-
- policy.add(policy.slice(
- policy.slice_randomize_psl(),
- policy.TLS_FORWARD({{'192.0.2.1', hostname='res.example.com'}}),
- policy.TLS_FORWARD({
- -- multiple servers can be specified for a single slice
- -- the one with lowest round-trip time will be used
- {'193.17.47.1', hostname='odvr.nic.cz'},
- {'185.43.135.1', hostname='odvr.nic.cz'},
- })
- ))
-
-Non-persistent cache
-^^^^^^^^^^^^^^^^^^^^
-Knot Resolver's cache contains data clients queried for.
-If you are concerned about attackers who are able to get access to your
-computer system in power-off state and your storage device is not secured by
-encryption you can move the cache to tmpfs_.
-See chapter :ref:`cache_persistence`.
-
-
-.. raw:: html
-
- <h2>Next steps</h2>
-
-Congratulations! Your resolver is now up and running and ready for queries. For
-serious deployments do not forget to read :ref:`configuration-chapter` and
-:ref:`operation-chapter` chapters.
-
-.. _`Learn Lua in 15 minutes`: http://tylerneylon.com/a/learn-lua/
-.. _`DNS Privacy Test Servers`: https://dnsprivacy.org/wiki/display/DP/DNS+Privacy+Test+Servers
-.. _tmpfs: https://en.wikipedia.org/wiki/Tmpfs
diff --git a/doc/quickstart-install.rst b/doc/quickstart-install.rst
deleted file mode 100644
index 329fb630..00000000
--- a/doc/quickstart-install.rst
+++ /dev/null
@@ -1,73 +0,0 @@
-.. SPDX-License-Identifier: GPL-3.0-or-later
-
-.. _quickstart-intro:
-
-Welcome to Knot Resolver Quick Start Guide! This chapter will guide you through first installation and basic setup recommended for your use-case.
-
-Before we start let us explain basic conventions used in this text:
-
-This is Linux/Unix shell command to be executed and an output from this command:
-
-.. code-block:: bash
-
- $ echo "This is output!"
- This is output!
- $ echo "We use sudo to execute commands as root:"
- We use sudo to execute commands as root:
- $ sudo id
- uid=0(root) gid=0(root) groups=0(root)
-
-Snippets from Knot Resolver's configuration file **do not start with $ sign** and look like this:
-
-.. code-block:: lua
-
- -- this is a comment
- -- following line will start listening on IP address 192.0.2.1 port 53
- net.listen('192.0.2.1')
-
-
-.. _quickstart-install:
-
-************
-Installation
-************
-
-As a first step, configure your system to use upstream repositories which have
-the **latest version** of Knot Resolver. Follow the instructions below for your
-distribution.
-
-**Debian/Ubuntu**
-
-.. note:: Please note that the packages available in distribution repositories
- of Debian and Ubuntu are outdated. Make sure to follow these steps to use
- our upstream repositories.
-
-.. code-block:: bash
-
- $ wget https://secure.nic.cz/files/knot-resolver/knot-resolver-release.deb
- $ sudo dpkg -i knot-resolver-release.deb
- $ sudo apt update
- $ sudo apt install -y knot-resolver
-
-**CentOS 7+**
-
-.. code-block:: bash
-
- $ sudo yum install -y epel-release
- $ sudo yum install -y knot-resolver
-
-**Fedora**
-
-.. code-block:: bash
-
- $ sudo dnf install -y knot-resolver
-
-**Arch Linux**
-
-.. code-block:: bash
-
- $ sudo pacman -S knot-resolver
-
-
-**openSUSE Leap / Tumbleweed**
-Add the `OBS <https://en.opensuse.org/Portal:Build_Service>`_ package repository `home:CZ-NIC:knot-resolver-latest <https://software.opensuse.org/download.html?project=home%3ACZ-NIC%3Aknot-resolver-latest&package=knot-resolver>`_ to your system.
diff --git a/doc/requirements.txt b/doc/requirements.txt
index 3da3c237..32d347be 100644
--- a/doc/requirements.txt
+++ b/doc/requirements.txt
@@ -1,2 +1,6 @@
Sphinx>=3.0.0
+sphinx-tabs
breathe
+jsonschema2md
+json-schema-for-humans
+sphinx_mdinclude \ No newline at end of file
diff --git a/doc/upgrading-to-6.rst b/doc/upgrading-to-6.rst
new file mode 100644
index 00000000..b8aa0831
--- /dev/null
+++ b/doc/upgrading-to-6.rst
@@ -0,0 +1,41 @@
+.. SPDX-License-Identifier: GPL-3.0-or-later
+
+.. _gettingstarted-startup:
+
+*****************************
+Upgrading to 6.0.0 from 5.x.x
+*****************************
+
+Version 6 of Knot Resolver brings one significant change - it introduces *Knot Resolver Manager* - a new way for interacting with Knot Resolver. The Manager brings several new features:
+
+* **new declarative configuration**
+* HTTP API to change configuration on the fly without downtime
+* it hides complexities of running multiple instances of ``kresd``
+
+Now, you might be worried about the future of ``kresd``. No worries, you can use ``kresd`` directly the same way you did before, nothing changes there right now. However, in the long run, we might make breaking changes in the way ``kresd`` is configured and using it directly is from now on considered advanced.
+
+With the release of version 6, there is a new way to configure and control your running ``kresd`` instances
+so that you don't have to configure multiple systemd services. The new Knot Resolver Manager handles it for you.
+In the table below, you can find comparison of how things were done before and how they can be done now.
+
+
+Command rosetta
+===============
+
+In the table below, you can compare the way Knot Resolver was used before and how it can be used now.
+
+========================================== =========================================================================================== ==================================================================
+Task How to do it now How it was done before
+========================================== =========================================================================================== ==================================================================
+start resolver ``systemctl start knot-resolver`` ``systemctl start kresd@1``
+stop resolver ``systemctl stop knot-resolver`` ``systemctl stop kresd@1``
+start resolver with 4 worker processes set ``/workers`` to 4 in the config file manually start 4 services by ``systemctl start kresd@{1,2,3,4}``
+rolling restart after updating config ``systemctl reload knot-resolver`` (or use API or ``kresctl``) manually restart individual ``kresd@`` services one by one
+open logs of all instances ``journalctl -u knot-resolver`` ``journalctl -u system-kresd.slice``
+open log of a single kresd instances ``journalctl -u knot-resolver _PID=xxx`` ``journalctl -u kresd@1``
+updating config programatically use HTTP API or ``kresctl`` command write a custom tool to generate new config and restart ``kresd``'s
+handling errors during config changes HTTP API just reports error, resolver keeps running with previous config custom tools for every user
+validate new config ``kresctl validate path/to/new/config.yml`` (not fully bullet proof), then try to run it run ``kresd`` with the config and see if it fails
+look at the Lua config ``kresctl convert path/to/new/config.yml`` ``cat /path/to/config.conf``
+gather metrics point Prometheus etc. at the single HTTP API collect metrics manually from all individual processes
+========================================== =========================================================================================== ================================================================== \ No newline at end of file
diff --git a/doc/upgrading.rst b/doc/upgrading.rst
index 56655faf..e630e9e7 100644
--- a/doc/upgrading.rst
+++ b/doc/upgrading.rst
@@ -29,6 +29,11 @@ newer versions when they are released.
.. _`supervisord`: http://supervisord.org/
+5.x to 6.0
+==========
+
+* `detailed upgrade guide <upgrading-to-6>`
+
5.4 to 5.5
==========
diff --git a/doc/usecase-internal-resolver.rst b/doc/usecase-internal-resolver.rst
new file mode 100644
index 00000000..90cfc6e0
--- /dev/null
+++ b/doc/usecase-internal-resolver.rst
@@ -0,0 +1,24 @@
+.. SPDX-License-Identifier: GPL-3.0-or-later
+
+.. _usecase-internal-resolver:
+
+*****************
+Internal Resolver
+*****************
+
+When running the resolver for the local network, not much has to be changed and the configuration looks essentially the same as when running locally.
+
+.. code-block:: yaml
+
+ rundir: /var/run/knot-resolver
+ workers: auto # run as many worker processes as there are available CPU cores
+ management:
+ unix-socket: /var/run/knot-resolver/manager.sock
+ cache:
+ storage: /var/cache/knot-resolver
+ size-max: 100MB
+ network:
+ listen:
+ - interface: 'eth0'
+ port: 53
+ kind: 'dns'
diff --git a/doc/usecase-isp-resolver.rst b/doc/usecase-isp-resolver.rst
new file mode 100644
index 00000000..5812fd23
--- /dev/null
+++ b/doc/usecase-isp-resolver.rst
@@ -0,0 +1,7 @@
+.. SPDX-License-Identifier: GPL-3.0-or-later
+
+.. _usecase-isp-resolver:
+
+************
+ISP Resolver
+************
diff --git a/doc/usecase-network-interfaces.rst b/doc/usecase-network-interfaces.rst
new file mode 100644
index 00000000..657fb652
--- /dev/null
+++ b/doc/usecase-network-interfaces.rst
@@ -0,0 +1,42 @@
+.. SPDX-License-Identifier: GPL-3.0-or-later
+
+.. _usecase-network-interfaces:
+
+*******************************
+Listening on network interfaces
+*******************************
+
+The first thing you will probably need to configure are the network interfaces to listen to.
+
+The following configuration instructs Knot Resolver to receive standard unencrypted DNS queries on IP addresses `192.0.2.1` and `2001:db8::1`.
+Encrypted DNS queries are accepted using DNS-over-TLS protocol on all IP addresses configured on network interface `eth0`, TCP port 853.
+
+.. tabs::
+
+ .. group-tab:: |yaml|
+
+ .. code-block:: yaml
+
+ network:
+ listen:
+ - interface: ['192.0.2.1', '2001:db8::1'] # unencrypted DNS on port 53 is default
+ - interface: 'eth0'
+ port: 853
+ kind: 'dot'
+
+ .. group-tab:: |lua|
+
+ Network interfaces to listen on and supported protocols are configured using :func:`net.listen()` function.
+
+ .. code-block:: lua
+
+ -- unencrypted DNS on port 53 is default
+ net.listen('192.0.2.1')
+ net.listen('2001:db8::1')
+ net.listen(net.eth0, 853, { kind = 'tls' })
+
+.. warning::
+
+ On machines with multiple IP addresses on the same interface avoid listening on wildcards ``0.0.0.0`` or ``::``.
+ Knot Resolver could answer from different IP addresses if the network address ranges
+ overlap, and clients would refuse such a response. \ No newline at end of file
diff --git a/doc/usecase-personal-resolver.rst b/doc/usecase-personal-resolver.rst
new file mode 100644
index 00000000..d51d039b
--- /dev/null
+++ b/doc/usecase-personal-resolver.rst
@@ -0,0 +1,22 @@
+.. SPDX-License-Identifier: GPL-3.0-or-later
+
+.. _usecase-personal-resolver:
+
+*****************
+Personal Resolver
+*****************
+
+For local usage on a single system, configuration like the following should be sufficient. Equivalent configuration is the default and should be packaged by your distribution of choice.
+
+.. code-block:: yaml
+
+ rundir: /var/run/knot-resolver
+ workers: 1
+ management:
+ unix-socket: /var/run/knot-resolver/manager.sock
+ cache:
+ storage: /var/cache/knot-resolver
+ size-max: 10MB
+ network:
+ listen:
+ - interface: 127.0.0.1@53 \ No newline at end of file
diff --git a/etc/meson.build b/etc/meson.build
index 31859baf..2fbacf64 100644
--- a/etc/meson.build
+++ b/etc/meson.build
@@ -27,10 +27,8 @@ if install_root_keys
)
endif
-
subdir('config')
-
install_data(
sources: etc_files,
install_dir: etc_dir
diff --git a/lib/cache/api.c b/lib/cache/api.c
index 116d775e..37dff26e 100644
--- a/lib/cache/api.c
+++ b/lib/cache/api.c
@@ -26,6 +26,7 @@
#include "lib/generic/trie.h"
#include "lib/resolve.h"
#include "lib/rplan.h"
+#include "lib/rules/api.h"
#include "lib/utils.h"
#include "lib/cache/impl.h"
@@ -34,8 +35,6 @@
* - Reconsider when RRSIGs are put in and retrieved from the cache.
* Currently it's always done, which _might_ be spurious, depending
* on how kresd will use the returned result.
- * There's also the "problem" that kresd ATM does _not_ ask upstream
- * with DO bit in some cases.
*/
@@ -126,8 +125,7 @@ int kr_cache_open(struct kr_cache *cache, const struct kr_cdb_api *api, struct k
* LMDB only restricts our env without changing the in-file maxsize.
* That is worked around by reopening (found no other reliable way). */
cache->api->close(cache->db, &cache->stats);
- struct kr_cdb_opts opts2;
- memcpy(&opts2, opts, sizeof(opts2));
+ struct kr_cdb_opts opts2 = *opts;
opts2.maxsize = 0;
ret = cache->api->open(&cache->db, &cache->stats, &opts2, mm);
}
@@ -177,7 +175,7 @@ int kr_cache_commit(struct kr_cache *cache)
return kr_error(EINVAL);
}
if (cache->api->commit) {
- return cache_op(cache, commit);
+ return cache_op(cache, commit, true);
}
return kr_ok();
}
@@ -317,6 +315,19 @@ int cache_peek(kr_layer_t *ctx, knot_pkt_t *pkt)
{
struct kr_request *req = ctx->req;
struct kr_query *qry = req->current_query;
+
+ /* TODO: review when to run this? We want to process rules here
+ * even when some of the cache exit-conditions happen. NO_CACHE in particular. */
+ if (!req->options.PASSTHRU_LEGACY && !qry->flags.CACHE_TRIED) {
+ int ret = kr_rule_local_data_answer(qry, pkt);
+ if (ret < 0)
+ ctx->state = KR_STATE_FAIL;
+ if (ret != 0) {
+ qry->flags.CACHE_TRIED = true;
+ return ctx->state;
+ }
+ }
+
/* We first check various exit-conditions and then call the _real function. */
if (!kr_cache_is_open(&req->ctx->cache)
@@ -328,6 +339,7 @@ int cache_peek(kr_layer_t *ctx, knot_pkt_t *pkt)
}
/* ATM cache only peeks for qry->sname and that would be useless
* to repeat on every iteration, so disable it from now on.
+ * Note that xNAME causes a followup kr_query, so cache will get re-tried.
* LATER(optim.): assist with more precise QNAME minimization. */
qry->flags.CACHE_TRIED = true;
diff --git a/lib/cache/api.h b/lib/cache/api.h
index 0abe9202..454f629e 100644
--- a/lib/cache/api.h
+++ b/lib/cache/api.h
@@ -105,7 +105,10 @@ KR_EXPORT
int kr_cache_clear(struct kr_cache *cache);
-/* ** This interface is temporary. ** */
+/* ** This interface is temporary. **
+ * _peek_exact() doesn't look e.g. at signed wildcards
+ * or at local data defined in rules
+ * */
struct kr_cache_p {
uint32_t time; /**< The time of inception. */
diff --git a/lib/cache/cdb_api.h b/lib/cache/cdb_api.h
index fcca8a9a..41130b62 100644
--- a/lib/cache/cdb_api.h
+++ b/lib/cache/cdb_api.h
@@ -4,6 +4,7 @@
#pragma once
+#include <stdbool.h>
#include <stdint.h>
#include <libknot/db/db.h>
@@ -12,6 +13,7 @@
struct kr_cdb_opts {
const char *path; /*!< Cache URI path. */
size_t maxsize; /*!< Suggested cache size in bytes; pass 0 to keep unchanged/default. */
+ bool is_cache; /*!< Some behavior changes based on use case. TODO: details. */
};
struct kr_cdb_stats {
@@ -30,6 +32,7 @@ struct kr_cdb_stats {
uint64_t match_miss;
uint64_t read_leq;
uint64_t read_leq_miss;
+ uint64_t read_less;
double usage_percent;
};
@@ -54,8 +57,11 @@ struct kr_cdb_api {
int (*count)(kr_cdb_pt db, struct kr_cdb_stats *stat);
int (*clear)(kr_cdb_pt db, struct kr_cdb_stats *stat);
- /** Run after a row of operations to release transaction/lock if needed. */
- int (*commit)(kr_cdb_pt db, struct kr_cdb_stats *stat);
+ /** Run after a row of operations to release transaction/lock if needed.
+ * \param accept true=commit / false=abort
+ * \return error code - accepting RW transactions can fail with LMDB.
+ */
+ int (*commit)(kr_cdb_pt db, struct kr_cdb_stats *stat, bool accept);
/* Data access */
@@ -83,6 +89,12 @@ struct kr_cdb_api {
int (*read_leq)(kr_cdb_pt db, struct kr_cdb_stats *stat,
knot_db_val_t *key, knot_db_val_t *val);
+ /** Less-than search (lexicographic ordering).
+ * On successful return, key->data and val->data point to DB-owned data.
+ * return: > 0 for less, < 0 kr_error */
+ int (*read_less)(kr_cdb_pt db, struct kr_cdb_stats *stat,
+ knot_db_val_t *key, knot_db_val_t *val);
+
/** Return estimated space usage (0--100). */
double (*usage_percent)(kr_cdb_pt db);
diff --git a/lib/cache/cdb_lmdb.c b/lib/cache/cdb_lmdb.c
index 80c73729..a9c29f9a 100644
--- a/lib/cache/cdb_lmdb.c
+++ b/lib/cache/cdb_lmdb.c
@@ -17,6 +17,10 @@
#include "lib/cache/cdb_api.h"
#include "lib/utils.h"
+/// A hacky way allowing usual usage of kr_log_error(MDB, ...)
+/// while differentiating between cache and rules in the produced logs.
+#define LOG_GRP_MDB (env->is_cache ? LOG_GRP_CACHE : LOG_GRP_RULES)
+#define LOG_GRP_MDB_TAG (env->is_cache ? LOG_GRP_CACHE_TAG : LOG_GRP_RULES_TAG)
/* Defines */
#define LMDB_DIR_MODE 0770
@@ -42,6 +46,8 @@ struct lmdb_env
MDB_cursor *ro_curs;
} txn;
+ bool is_cache; /**< cache vs. rules; from struct kr_cdb_opts::is_cache */
+
/* Cached part of struct stat for data.mdb. */
dev_t st_dev;
ino_t st_ino;
@@ -70,10 +76,11 @@ static inline kr_cdb_pt env2db(struct lmdb_env *env)
return (kr_cdb_pt)env;
}
-static int cdb_commit(kr_cdb_pt db, struct kr_cdb_stats *stats);
+static int cdb_commit(kr_cdb_pt db, struct kr_cdb_stats *stats, bool accept);
+static void txn_abort(struct lmdb_env *env);
/** @brief Convert LMDB error code. */
-static int lmdb_error(int error)
+static int lmdb_error(struct lmdb_env *env, int error)
{
switch (error) {
case MDB_SUCCESS:
@@ -83,9 +90,12 @@ static int lmdb_error(int error)
case ENOSPC:
case MDB_MAP_FULL:
case MDB_TXN_FULL:
+ // For now, ENOSPC is a hard error for rules; easiest to log here.
+ if (!env->is_cache)
+ kr_log_error(MDB, "LMDB error: %s\n", mdb_strerror(error));
return kr_error(ENOSPC);
default:
- kr_log_error(CACHE, "LMDB error: %s\n", mdb_strerror(error));
+ kr_log_error(MDB, "LMDB error: %s\n", mdb_strerror(error));
return kr_error(error);
}
}
@@ -104,17 +114,17 @@ static inline MDB_val val_knot2mdb(knot_db_val_t v)
* It's much lighter than reopen_env(). */
static int refresh_mapsize(struct lmdb_env *env)
{
- int ret = cdb_commit(env2db(env), NULL);
- if (!ret) ret = lmdb_error(mdb_env_set_mapsize(env->env, 0));
+ int ret = cdb_commit(env2db(env), NULL, true);
+ if (!ret) ret = lmdb_error(env, mdb_env_set_mapsize(env->env, 0));
if (ret) return ret;
MDB_envinfo info;
- ret = lmdb_error(mdb_env_info(env->env, &info));
+ ret = lmdb_error(env, mdb_env_info(env->env, &info));
if (ret) return ret;
env->mapsize = info.me_mapsize;
if (env->mapsize != env->st_size) {
- kr_log_info(CACHE, "suspicious size of cache file '%s'"
+ kr_log_info(MDB, "suspicious size of file '%s'"
": file size %zu != LMDB map size %zu\n",
env->mdb_data_path, (size_t)env->st_size, env->mapsize);
}
@@ -126,10 +136,10 @@ static void clear_stale_readers(struct lmdb_env *env)
int cleared;
int ret = mdb_reader_check(env->env, &cleared);
if (ret != MDB_SUCCESS) {
- kr_log_error(CACHE, "failed to clear stale reader locks: "
+ kr_log_error(MDB, "failed to clear stale reader locks: "
"LMDB error %d %s\n", ret, mdb_strerror(ret));
} else if (cleared != 0) {
- kr_log_info(CACHE, "cleared %d stale reader locks\n", cleared);
+ kr_log_info(MDB, "cleared %d stale reader locks\n", cleared);
}
}
@@ -158,7 +168,7 @@ retry:
}
if (unlikely(ret == MDB_MAP_RESIZED)) {
- kr_log_info(CACHE, "detected size increased by another process\n");
+ kr_log_info(MDB, "detected size increased by another process\n");
ret = refresh_mapsize(env);
if (ret == 0)
goto retry;
@@ -194,7 +204,7 @@ static int txn_get(struct lmdb_env *env, MDB_txn **txn, bool rdonly)
*txn = env->txn.rw;
kr_assert(*txn);
}
- return lmdb_error(ret);
+ return lmdb_error(env, ret);
}
/* Get an active RO txn and return it. */
@@ -205,7 +215,7 @@ static int txn_get(struct lmdb_env *env, MDB_txn **txn, bool rdonly)
ret = txn_get_noresize(env, FLAG_RENEW, &env->txn.ro);
}
if (ret != MDB_SUCCESS) {
- return lmdb_error(ret);
+ return lmdb_error(env, ret);
}
env->txn.ro_active = true;
*txn = env->txn.ro;
@@ -213,13 +223,18 @@ static int txn_get(struct lmdb_env *env, MDB_txn **txn, bool rdonly)
return kr_ok();
}
-static int cdb_commit(kr_cdb_pt db, struct kr_cdb_stats *stats)
+static int cdb_commit(kr_cdb_pt db, struct kr_cdb_stats *stats, bool accept)
{
struct lmdb_env *env = db2env(db);
+ if (!accept) {
+ txn_abort(env);
+ return kr_ok();
+ }
+
int ret = kr_ok();
if (env->txn.rw) {
if (stats) stats->commit++;
- ret = lmdb_error(mdb_txn_commit(env->txn.rw));
+ ret = lmdb_error(env, mdb_txn_commit(env->txn.rw));
env->txn.rw = NULL; /* the transaction got freed even in case of errors */
} else if (env->txn.ro && env->txn.ro_active) {
mdb_txn_reset(env->txn.ro);
@@ -236,9 +251,11 @@ static int txn_curs_get(struct lmdb_env *env, MDB_cursor **curs, struct kr_cdb_s
return kr_error(EINVAL);
if (env->txn.ro_curs_active)
goto success;
- /* Only in a read-only txn; TODO: it's a bit messy/coupled */
+ /* Only in a read-only txn; TODO: it's a bit messy/coupled
+ * At least for rules we don't do the auto-commit feature. */
if (env->txn.rw) {
- int ret = cdb_commit(env2db(env), stats);
+ if (!env->is_cache) return kr_error(EINPROGRESS);
+ int ret = cdb_commit(env2db(env), stats, true);
if (ret) return ret;
}
MDB_txn *txn = NULL;
@@ -250,7 +267,7 @@ static int txn_curs_get(struct lmdb_env *env, MDB_cursor **curs, struct kr_cdb_s
} else {
ret = mdb_cursor_open(txn, env->dbi, &env->txn.ro_curs);
}
- if (ret) return lmdb_error(ret);
+ if (ret) return lmdb_error(env, ret);
env->txn.ro_curs_active = true;
success:
kr_assert(env->txn.ro_curs_active && env->txn.ro && env->txn.ro_active
@@ -294,7 +311,7 @@ static void cdb_close_env(struct lmdb_env *env, struct kr_cdb_stats *stats)
/* Get rid of any transactions. */
txn_free_ro(env);
- cdb_commit(env2db(env), stats);
+ cdb_commit(env2db(env), stats, env->is_cache);
mdb_env_sync(env->env, 1);
stats->close++;
@@ -313,7 +330,7 @@ static int cdb_open_env(struct lmdb_env *env, const char *path, const size_t map
stats->open++;
ret = mdb_env_create(&env->env);
- if (ret != MDB_SUCCESS) return lmdb_error(ret);
+ if (ret != MDB_SUCCESS) return lmdb_error(env, ret);
env->mdb_data_path = kr_absolutize_path(path, "data.mdb");
if (!env->mdb_data_path) {
@@ -336,9 +353,11 @@ static int cdb_open_env(struct lmdb_env *env, const char *path, const size_t map
if (ret != MDB_SUCCESS) goto error_mdb;
}
- /* Cache doesn't require durability, we can be
- * loose with the requirements as a tradeoff for speed. */
- const unsigned flags = MDB_WRITEMAP | MDB_MAPASYNC | MDB_NOTLS;
+ const unsigned flags = env->is_cache
+ /* Cache doesn't require durability, we can be
+ * loose with the requirements as a tradeoff for speed. */
+ ? MDB_WRITEMAP | MDB_NOTLS | MDB_MAPASYNC
+ : MDB_WRITEMAP | MDB_NOTLS;
ret = mdb_env_open(env->env, path, flags, LMDB_FILE_MODE);
if (ret != MDB_SUCCESS) goto error_mdb;
@@ -367,14 +386,19 @@ static int cdb_open_env(struct lmdb_env *env, const char *path, const size_t map
ret = mdb_txn_begin(env->env, NULL, 0, &txn);
if (ret != MDB_SUCCESS) goto error_mdb;
- ret = mdb_dbi_open(txn, NULL, 0, &env->dbi);
+
+ //FIXME: perhaps we want MDB_DUPSORT in future,
+ // but for that we'd have to avoid MDB_RESERVE.
+ // (including a proper assertion, instead of sometimes-crash inside lmdb)
+ const unsigned dbi_flags = 0; //is_cache ? 0 : MDB_DUPSORT;
+ ret = mdb_dbi_open(txn, NULL, dbi_flags, &env->dbi);
if (ret != MDB_SUCCESS) {
mdb_txn_abort(txn);
goto error_mdb;
}
#if !defined(__MACOSX__) && !(defined(__APPLE__) && defined(__MACH__))
- if (size_requested) {
+ if (size_requested && env->is_cache) { // prealloc makes no sense for rules
ret = posix_fallocate(fd, 0, MAX(env->mapsize, env->st_size));
} else {
ret = 0;
@@ -383,7 +407,7 @@ static int cdb_open_env(struct lmdb_env *env, const char *path, const size_t map
/* POSIX says this can happen when the feature isn't supported by the FS.
* We haven't seen this happen on Linux+glibc but it was reported on
* Linux+musl and FreeBSD. */
- kr_log_info(CACHE, "space pre-allocation failed and ignored; "
+ kr_log_info(MDB, "space pre-allocation failed and ignored; "
"your (file)system probably doesn't support it.\n");
} else if (ret != 0) {
mdb_txn_abort(txn);
@@ -403,7 +427,7 @@ static int cdb_open_env(struct lmdb_env *env, const char *path, const size_t map
return kr_ok();
error_mdb:
- ret = lmdb_error(ret);
+ ret = lmdb_error(env, ret);
error_sys:
free_const(env->mdb_data_path);
stats->close++;
@@ -424,6 +448,8 @@ static int cdb_init(kr_cdb_pt *db, struct kr_cdb_stats *stats,
if (!env) {
return kr_error(ENOMEM);
}
+ env->is_cache = opts->is_cache;
+
int ret = cdb_open_env(env, opts->path, opts->maxsize, stats);
if (ret != 0) {
free(env);
@@ -457,7 +483,7 @@ static int cdb_count(kr_cdb_pt db, struct kr_cdb_stats *stats)
return stat.ms_entries;
} else {
txn_abort(env);
- return lmdb_error(ret);
+ return lmdb_error(env, ret);
}
}
@@ -467,7 +493,7 @@ static int reopen_env(struct lmdb_env *env, struct kr_cdb_stats *stats, const si
const char *path;
int ret = mdb_env_get_path(env->env, &path);
if (ret != MDB_SUCCESS) {
- return lmdb_error(ret);
+ return lmdb_error(env, ret);
}
auto_free char *path_copy = strdup(path);
cdb_close_env(env, stats);
@@ -485,7 +511,7 @@ static int cdb_check_health(kr_cdb_pt db, struct kr_cdb_stats *stats)
}
if (st.st_dev != env->st_dev || st.st_ino != env->st_ino) {
- kr_log_debug(CACHE, "cache file has been replaced, reopening\n");
+ kr_log_debug(MDB, "LMDB file has been replaced, reopening\n");
int ret = reopen_env(env, stats, 0); // we accept mapsize from the new file
return ret == 0 ? 1 : ret;
}
@@ -494,7 +520,7 @@ static int cdb_check_health(kr_cdb_pt db, struct kr_cdb_stats *stats)
* contrary to methods based on mdb_env_info(). */
if (st.st_size == env->st_size)
return kr_ok();
- kr_log_info(CACHE, "detected size change (by another instance?) of file '%s'"
+ kr_log_info(MDB, "detected size change (by another instance?) of file '%s'"
": file size %zu -> file size %zu\n",
env->mdb_data_path, (size_t)env->st_size, (size_t)st.st_size);
env->st_size = st.st_size; // avoid retrying in cycle even if we fail
@@ -549,15 +575,15 @@ static int cdb_clear(kr_cdb_pt db, struct kr_cdb_stats *stats)
MDB_txn *txn = NULL;
int ret = txn_get(env, &txn, false);
if (ret == kr_ok()) {
- ret = lmdb_error(mdb_drop(txn, env->dbi, 0));
- if (ret == kr_ok()) {
- ret = cdb_commit(db, stats);
+ ret = lmdb_error(env, mdb_drop(txn, env->dbi, 0));
+ if (ret == kr_ok() && env->is_cache) {
+ ret = cdb_commit(db, stats, true);
}
if (ret == kr_ok()) {
return ret;
}
}
- kr_log_info(CACHE, "clearing error, falling back\n");
+ kr_log_info(MDB, "clearing error, falling back\n");
}
/* Fallback: we'll remove the database files and reopen.
* Other instances can continue to use the removed lmdb,
@@ -565,12 +591,12 @@ static int cdb_clear(kr_cdb_pt db, struct kr_cdb_stats *stats)
/* We are about to switch to a different file, so end all txns, to be sure. */
txn_free_ro(env);
- (void) cdb_commit(db, stats);
+ (void) cdb_commit(db, stats, env->is_cache);
const char *path = NULL;
int ret = mdb_env_get_path(env->env, &path);
if (ret != MDB_SUCCESS) {
- return lmdb_error(ret);
+ return lmdb_error(env, ret);
}
auto_free char *mdb_lockfile = kr_strcatdup(2, path, "/lock.mdb");
auto_free char *lockfile = kr_strcatdup(2, path, "/krcachelock");
@@ -581,7 +607,7 @@ static int cdb_clear(kr_cdb_pt db, struct kr_cdb_stats *stats)
/* Find if we get a lock on lockfile. */
const int lockfile_fd = lockfile_get(lockfile);
if (lockfile_fd < 0) {
- kr_log_error(CACHE, "clearing failed to get ./krcachelock (%s); retry later\n",
+ kr_log_error(MDB, "clearing failed to get ./krcachelock (%s); retry later\n",
kr_strerror(lockfile_fd));
/* As we're out of space (almost certainly - mdb_drop didn't work),
* we will retry on the next failing write operation. */
@@ -597,7 +623,7 @@ static int cdb_clear(kr_cdb_pt db, struct kr_cdb_stats *stats)
ret = kr_ok();
// else pass some other error
} else {
- kr_log_debug(CACHE, "clear: identical files, unlinking\n");
+ kr_log_debug(MDB, "clear: identical files, unlinking\n");
// coverity[toctou]
unlink(env->mdb_data_path);
unlink(mdb_lockfile);
@@ -607,7 +633,7 @@ static int cdb_clear(kr_cdb_pt db, struct kr_cdb_stats *stats)
/* Environment updated, release lockfile. */
int lrerr = lockfile_release(lockfile_fd);
if (lrerr) {
- kr_log_error(CACHE, "failed to release ./krcachelock: %s\n",
+ kr_log_error(MDB, "failed to release ./krcachelock: %s\n",
kr_strerror(lrerr));
}
return ret;
@@ -635,7 +661,7 @@ static int cdb_readv(kr_cdb_pt db, struct kr_cdb_stats *stats,
} else {
txn_abort(env);
}
- ret = lmdb_error(ret);
+ ret = lmdb_error(env, ret);
if (ret == kr_error(ENOSPC)) {
/* we're likely to be forced to cache clear anyway */
ret = kr_error(ENOENT);
@@ -661,7 +687,7 @@ static int cdb_write(struct lmdb_env *env, MDB_txn **txn, const knot_db_val_t *k
/* We don't try to recover from MDB_TXN_FULL. */
if (ret != MDB_SUCCESS) {
txn_abort(env);
- return lmdb_error(ret);
+ return lmdb_error(env, ret);
}
/* Update the result. */
@@ -705,7 +731,7 @@ static int cdb_remove(kr_cdb_pt db, struct kr_cdb_stats *stats,
MDB_val _key = val_knot2mdb(keys[i]);
MDB_val val = { 0, NULL };
stats->remove++;
- ret = lmdb_error(mdb_del(txn, env->dbi, &_key, &val));
+ ret = lmdb_error(env, mdb_del(txn, env->dbi, &_key, &val));
if (ret == kr_ok())
deleted++;
else if (ret == KNOT_ENOENT) {
@@ -735,7 +761,7 @@ static int cdb_match(kr_cdb_pt db, struct kr_cdb_stats *stats,
ret = mdb_cursor_open(txn, env->dbi, &cur);
if (ret != 0) {
txn_abort(env);
- return lmdb_error(ret);
+ return lmdb_error(env, ret);
}
MDB_val cur_key = val_knot2mdb(*key);
@@ -747,7 +773,7 @@ static int cdb_match(kr_cdb_pt db, struct kr_cdb_stats *stats,
if (ret != MDB_NOTFOUND) {
txn_abort(env);
}
- return lmdb_error(ret);
+ return lmdb_error(env, ret);
}
int results = 0;
@@ -771,7 +797,7 @@ static int cdb_match(kr_cdb_pt db, struct kr_cdb_stats *stats,
mdb_cursor_close(cur);
if (ret != MDB_SUCCESS && ret != MDB_NOTFOUND) {
txn_abort(env);
- return lmdb_error(ret);
+ return lmdb_error(env, ret);
} else if (results == 0) {
stats->match_miss++;
}
@@ -818,7 +844,34 @@ failure:
} else {
txn_abort(env);
}
- return lmdb_error(ret);
+ return lmdb_error(env, ret);
+}
+
+static int cdb_read_less(kr_cdb_pt db, struct kr_cdb_stats *stats,
+ knot_db_val_t *key, knot_db_val_t *val)
+{
+ if (kr_fails_assert(db && key && key->data && val))
+ return kr_error(EINVAL);
+ struct lmdb_env *env = db2env(db);
+ MDB_cursor *curs = NULL;
+ int ret = txn_curs_get(env, &curs, stats);
+ if (ret) return ret;
+
+ MDB_val key2_m = val_knot2mdb(*key);
+ MDB_val val2_m = { 0, NULL };
+ stats->read_less++;
+ ret = mdb_cursor_get(curs, &key2_m, &val2_m, MDB_PREV);
+ if (!ret) {
+ /* finalize the output */
+ *key = val_mdb2knot(key2_m);
+ *val = val_mdb2knot(val2_m);
+ return 1;
+ } else if (ret == MDB_NOTFOUND) {
+ // stats->read_less++; // seems a pointless stat
+ } else {
+ txn_abort(env);
+ }
+ return lmdb_error(env, ret);
}
static double cdb_usage_percent(kr_cdb_pt db)
@@ -858,11 +911,9 @@ const struct kr_cdb_api *kr_cdb_lmdb(void)
cdb_init, cdb_deinit, cdb_count, cdb_clear, cdb_commit,
cdb_readv, cdb_writev, cdb_remove,
cdb_match,
- cdb_read_leq,
- cdb_usage_percent,
- cdb_get_maxsize,
+ cdb_read_leq, cdb_read_less,
+ cdb_usage_percent, cdb_get_maxsize,
cdb_check_health,
};
-
return &api;
}
diff --git a/lib/cache/cdb_lmdb.h b/lib/cache/cdb_lmdb.h
index 988fccf0..6eb64e04 100644
--- a/lib/cache/cdb_lmdb.h
+++ b/lib/cache/cdb_lmdb.h
@@ -7,6 +7,7 @@
#include "lib/cache/cdb_api.h"
#include "lib/defines.h"
+/** Get API implementation for LMDB. */
KR_EXPORT KR_CONST
const struct kr_cdb_api *kr_cdb_lmdb(void);
diff --git a/lib/cache/entry_pkt.c b/lib/cache/entry_pkt.c
index 884bfaa8..9499b69a 100644
--- a/lib/cache/entry_pkt.c
+++ b/lib/cache/entry_pkt.c
@@ -125,12 +125,9 @@ void stash_pkt(const knot_pkt_t *pkt, const struct kr_query *qry,
}
-int answer_from_pkt(kr_layer_t *ctx, knot_pkt_t *pkt, uint16_t type,
+int answer_from_pkt(struct kr_query *qry, knot_pkt_t *pkt, uint16_t type,
const struct entry_h *eh, const void *eh_bound, uint32_t new_ttl)
{
- struct kr_request *req = ctx->req;
- struct kr_query *qry = req->current_query;
-
const uint16_t msgid = knot_wire_get_id(pkt->wire);
/* Ensure the wire buffer is large enough. Strategy: fit and at least double. */
@@ -139,8 +136,8 @@ int answer_from_pkt(kr_layer_t *ctx, knot_pkt_t *pkt, uint16_t type,
if (pkt_len > pkt->max_size) {
pkt->max_size = MIN(KNOT_WIRE_MAX_PKTSIZE,
MAX(pkt->max_size * 2, pkt_len));
- mm_free(&ctx->req->pool, pkt->wire); /* no-op, but... */
- pkt->wire = mm_alloc(&ctx->req->pool, pkt->max_size);
+ mm_free(&qry->request->pool, pkt->wire); /* no-op, but... */
+ pkt->wire = mm_alloc(&qry->request->pool, pkt->max_size);
pkt->compr.wire = pkt->wire;
/* TODO: ^^ nicer way how to replace knot_pkt_t::wire ? */
}
diff --git a/lib/cache/entry_rr.c b/lib/cache/entry_rr.c
index 3239e7e5..5fcdf880 100644
--- a/lib/cache/entry_rr.c
+++ b/lib/cache/entry_rr.c
@@ -28,10 +28,7 @@ void rdataset_dematerialize(const knot_rdataset_t *rds, uint8_t * restrict data)
(void)data; // silence analyzers
}
-/** Materialize a knot_rdataset_t from cache with given TTL.
- * Return the number of bytes consumed or an error code.
- */
-static int rdataset_materialize(knot_rdataset_t * restrict rds, const uint8_t * const data,
+int rdataset_materialize(knot_rdataset_t * restrict rds, const uint8_t * const data,
const uint8_t *data_bound, knot_mm_t *pool)
{
if (kr_fails_assert(rds && data && data_bound && data_bound > data && !rds->rdata
diff --git a/lib/cache/impl.h b/lib/cache/impl.h
index 305f36eb..9b5cb2f4 100644
--- a/lib/cache/impl.h
+++ b/lib/cache/impl.h
@@ -274,7 +274,7 @@ void stash_pkt(const knot_pkt_t *pkt, const struct kr_query *qry,
* This assumes the TTL is OK and entry_h_consistent, but it may still return error.
* On success it handles all the rest, incl. qry->flags.
*/
-int answer_from_pkt(kr_layer_t *ctx, knot_pkt_t *pkt, uint16_t type,
+int answer_from_pkt(struct kr_query *qry, knot_pkt_t *pkt, uint16_t type,
const struct entry_h *eh, const void *eh_bound, uint32_t new_ttl);
@@ -328,6 +328,11 @@ static inline int rdataset_dematerialized_size(const uint8_t *data, uint16_t *rd
/** Serialize an rdataset. It may be NULL as short-hand for empty. */
void rdataset_dematerialize(const knot_rdataset_t *rds, uint8_t * restrict data);
+/** Materialize a knot_rdataset_t from cache.
+ * Return the number of bytes consumed or an error code. */
+int rdataset_materialize(knot_rdataset_t * restrict rds, const uint8_t * const data,
+ const uint8_t *data_bound, knot_mm_t *pool);
+
/** Partially constructed answer when gathering RRsets from cache. */
struct answer {
@@ -362,7 +367,7 @@ int entry2answer(struct answer *ans, int id,
/** Prepare answer packet to be filled by RRs (without RR data in wire). */
int pkt_renew(knot_pkt_t *pkt, const knot_dname_t *name, uint16_t type);
-/** Append RRset + its RRSIGs into the current section (*shallow* copy), with given rank.
+/** Append RRset + its RRSIGs into the current section (*shallow* copy).
*
* \note it works with empty set as well (skipped)
* \note pkt->wire is not updated in any way
@@ -370,7 +375,7 @@ int pkt_renew(knot_pkt_t *pkt, const knot_dname_t *name, uint16_t type);
* \note Whole RRsets are put into the pseudo-packet;
* normal parsed packets would only contain single-RR sets.
*/
-int pkt_append(knot_pkt_t *pkt, const struct answer_rrset *rrset, uint8_t rank);
+int pkt_append(knot_pkt_t *pkt, const struct answer_rrset *rrset);
diff --git a/lib/cache/knot_pkt.c b/lib/cache/knot_pkt.c
index 31fa7e9b..4b28891e 100644
--- a/lib/cache/knot_pkt.c
+++ b/lib/cache/knot_pkt.c
@@ -11,8 +11,8 @@
int pkt_renew(knot_pkt_t *pkt, const knot_dname_t *name, uint16_t type)
{
- /* Update packet question if needed. */
- if (!knot_dname_is_equal(knot_pkt_qname(pkt), name)
+ /* Clear the packet if needed. */
+ if (pkt->rrset_count != 0 || !knot_dname_is_equal(knot_pkt_qname(pkt), name)
|| knot_pkt_qtype(pkt) != type || knot_pkt_qclass(pkt) != KNOT_CLASS_IN) {
int ret = kr_pkt_recycle(pkt);
if (ret) return kr_error(ret);
@@ -55,7 +55,7 @@ static int pkt_alloc_space(knot_pkt_t *pkt, int count)
return kr_ok();
}
-int pkt_append(knot_pkt_t *pkt, const struct answer_rrset *rrset, uint8_t rank)
+int pkt_append(knot_pkt_t *pkt, const struct answer_rrset *rrset)
{
/* allocate space, to be sure */
int rrset_cnt = (rrset->set.rr->rrs.count > 0) + (rrset->sig_rds.count > 0);
@@ -69,7 +69,7 @@ int pkt_append(knot_pkt_t *pkt, const struct answer_rrset *rrset, uint8_t rank)
/* allocate rank */
uint8_t *rr_rank = mm_alloc(&pkt->mm, sizeof(*rr_rank));
if (!rr_rank) return kr_error(ENOMEM);
- *rr_rank = (i == 0) ? rank : (KR_RANK_OMIT | KR_RANK_AUTH);
+ *rr_rank = (i == 0) ? rrset->set.rank : (KR_RANK_OMIT | KR_RANK_AUTH);
/* rank for RRSIGs isn't really useful: ^^ */
if (i == 0) {
pkt->rr[pkt->rrset_count] = *rrset->set.rr;
diff --git a/lib/cache/peek.c b/lib/cache/peek.c
index e1901ac3..719652aa 100644
--- a/lib/cache/peek.c
+++ b/lib/cache/peek.c
@@ -10,13 +10,13 @@
/* The whole file only exports peek_nosync().
* Forwards for larger chunks of code: */
-static int found_exact_hit(kr_layer_t *ctx, knot_pkt_t *pkt, knot_db_val_t val,
+static int found_exact_hit(struct kr_query *qry, knot_pkt_t *pkt, knot_db_val_t val,
uint8_t lowest_rank);
static int closest_NS(struct kr_cache *cache, struct key *k, entry_list_t el,
struct kr_query *qry, bool only_NS, bool is_DS);
-static int answer_simple_hit(kr_layer_t *ctx, knot_pkt_t *pkt, uint16_t type,
+static int answer_simple_hit(struct kr_query *qry, knot_pkt_t *pkt, uint16_t type,
const struct entry_h *eh, const void *eh_bound, uint32_t new_ttl);
-static int answer_dname_hit(kr_layer_t *ctx, knot_pkt_t *pkt, const knot_dname_t *dname_owner,
+static int answer_dname_hit(struct kr_query *qry, knot_pkt_t *pkt, const knot_dname_t *dname_owner,
const struct entry_h *eh, const void *eh_bound, uint32_t new_ttl);
static int try_wild(struct key *k, struct answer *ans, const knot_dname_t *clencl_name,
uint16_t type, uint8_t lowest_rank,
@@ -124,7 +124,7 @@ int peek_nosync(kr_layer_t *ctx, knot_pkt_t *pkt)
ret = cache_op(cache, read, &key, &val, 1);
if (!ret) {
/* found an entry: test conditions, materialize into pkt, etc. */
- ret = found_exact_hit(ctx, pkt, val, lowest_rank);
+ ret = found_exact_hit(qry, pkt, val, lowest_rank);
}
}
if (!ret) {
@@ -159,7 +159,7 @@ int peek_nosync(kr_layer_t *ctx, knot_pkt_t *pkt)
return ctx->state;
const int32_t new_ttl = get_new_ttl(v.data, qry, qry->sname,
KNOT_RRTYPE_CNAME, qry->timestamp.tv_sec);
- ret = answer_simple_hit(ctx, pkt, KNOT_RRTYPE_CNAME, v.data,
+ ret = answer_simple_hit(qry, pkt, KNOT_RRTYPE_CNAME, v.data,
knot_db_val_bound(v), new_ttl);
return ret == kr_ok() ? KR_STATE_DONE : ctx->state;
}
@@ -170,7 +170,7 @@ int peek_nosync(kr_layer_t *ctx, knot_pkt_t *pkt)
/* TTL: for simplicity, we just ask for TTL of the generated CNAME. */
const int32_t new_ttl = get_new_ttl(v.data, qry, qry->sname,
KNOT_RRTYPE_CNAME, qry->timestamp.tv_sec);
- ret = answer_dname_hit(ctx, pkt, k->zname, v.data,
+ ret = answer_dname_hit(qry, pkt, k->zname, v.data,
knot_db_val_bound(v), new_ttl);
return ret == kr_ok() ? KR_STATE_DONE : ctx->state;
}
@@ -304,7 +304,7 @@ int peek_nosync(kr_layer_t *ctx, knot_pkt_t *pkt)
if (i == 1) knot_pkt_begin(pkt, KNOT_AUTHORITY);
if (!ans.rrsets[i].set.rr) continue;
expiring = expiring || ans.rrsets[i].set.expiring;
- ret = pkt_append(pkt, &ans.rrsets[i], ans.rrsets[i].set.rank);
+ ret = pkt_append(pkt, &ans.rrsets[i]);
if (kr_fails_assert(ret == 0))
return ctx->state;
}
@@ -422,12 +422,9 @@ static void answer_simple_qflags(struct kr_qflags *qf, const struct entry_h *eh,
if (kr_fails_assert((ret) >= 0)) return kr_error((ret)); \
} while (false)
-static int answer_simple_hit(kr_layer_t *ctx, knot_pkt_t *pkt, uint16_t type,
+static int answer_simple_hit(struct kr_query *qry, knot_pkt_t *pkt, uint16_t type,
const struct entry_h *eh, const void *eh_bound, uint32_t new_ttl)
{
- struct kr_request *req = ctx->req;
- struct kr_query *qry = req->current_query;
-
/* All OK, so start constructing the (pseudo-)packet. */
int ret = pkt_renew(pkt, qry->sname, qry->stype);
CHECK_RET(ret);
@@ -440,7 +437,7 @@ static int answer_simple_hit(kr_layer_t *ctx, knot_pkt_t *pkt, uint16_t type,
qry->sname, type, new_ttl);
CHECK_RET(ret);
/* Put links to the materialized data into the pkt. */
- ret = pkt_append(pkt, &ans.rrsets[AR_ANSWER], eh->rank);
+ ret = pkt_append(pkt, &ans.rrsets[AR_ANSWER]);
CHECK_RET(ret);
answer_simple_qflags(&qry->flags, eh, new_ttl);
@@ -451,12 +448,9 @@ static int answer_simple_hit(kr_layer_t *ctx, knot_pkt_t *pkt, uint16_t type,
return kr_ok();
}
-static int answer_dname_hit(kr_layer_t *ctx, knot_pkt_t *pkt, const knot_dname_t *dname_owner,
+static int answer_dname_hit(struct kr_query *qry, knot_pkt_t *pkt, const knot_dname_t *dname_owner,
const struct entry_h *eh, const void *eh_bound, uint32_t new_ttl)
{
- struct kr_request *req = ctx->req;
- struct kr_query *qry = req->current_query;
-
/* All OK, so start constructing the (pseudo-)packet. */
int ret = pkt_renew(pkt, qry->sname, qry->stype);
CHECK_RET(ret);
@@ -469,13 +463,14 @@ static int answer_dname_hit(kr_layer_t *ctx, knot_pkt_t *pkt, const knot_dname_t
dname_owner, KNOT_RRTYPE_DNAME, new_ttl);
CHECK_RET(ret);
/* Put link to the RRset into the pkt. */
- ret = pkt_append(pkt, &ans.rrsets[AR_ANSWER], eh->rank);
+ ret = pkt_append(pkt, &ans.rrsets[AR_ANSWER]);
CHECK_RET(ret);
const knot_dname_t *dname_target =
knot_dname_target(ans.rrsets[AR_ANSWER].set.rr->rrs.rdata);
/* Generate CNAME RRset for the answer in (pseudo-)packet. */
const int AR_CNAME = AR_SOA;
+ ans.rrsets[AR_CNAME].set.rank = ans.rrsets[AR_ANSWER].set.rank;
knot_rrset_t *rr = ans.rrsets[AR_CNAME].set.rr
= knot_rrset_new(qry->sname, KNOT_RRTYPE_CNAME, KNOT_CLASS_IN,
new_ttl, ans.mm);
@@ -494,7 +489,7 @@ static int answer_dname_hit(kr_layer_t *ctx, knot_pkt_t *pkt, const knot_dname_t
CHECK_RET(rr->rrs.rdata ? kr_ok() : -ENOMEM);
knot_rdata_init(rr->rrs.rdata, rdata_len, cname_target);
/* Put link to the RRset into the pkt. */
- ret = pkt_append(pkt, &ans.rrsets[AR_CNAME], eh->rank);
+ ret = pkt_append(pkt, &ans.rrsets[AR_CNAME]);
CHECK_RET(ret);
} else {
/* Note that it's basically a successful answer; name just doesn't fit. */
@@ -510,12 +505,9 @@ static int answer_dname_hit(kr_layer_t *ctx, knot_pkt_t *pkt, const knot_dname_t
#undef CHECK_RET
/** TODO: description; see the single call site for now. */
-static int found_exact_hit(kr_layer_t *ctx, knot_pkt_t *pkt, knot_db_val_t val,
+static int found_exact_hit(struct kr_query *qry, knot_pkt_t *pkt, knot_db_val_t val,
uint8_t lowest_rank)
{
- struct kr_request *req = ctx->req;
- struct kr_query *qry = req->current_query;
-
int ret = entry_h_seek(&val, qry->stype);
if (ret) return ret;
const struct entry_h *eh = entry_h_consistent_E(val, qry->stype);
@@ -542,9 +534,9 @@ static int found_exact_hit(kr_layer_t *ctx, knot_pkt_t *pkt, knot_db_val_t val,
* possible that we could generate a higher-security negative proof.
* Rank is high-enough so we take it to save time searching;
* in practice this also helps in some incorrect zones (live-signed). */
- return answer_from_pkt (ctx, pkt, qry->stype, eh, eh_bound, new_ttl);
+ return answer_from_pkt (qry, pkt, qry->stype, eh, eh_bound, new_ttl);
} else {
- return answer_simple_hit(ctx, pkt, qry->stype, eh, eh_bound, new_ttl);
+ return answer_simple_hit(qry, pkt, qry->stype, eh, eh_bound, new_ttl);
}
}
diff --git a/lib/layer/iterate.c b/lib/layer/iterate.c
index edc666eb..4eacf86f 100644
--- a/lib/layer/iterate.c
+++ b/lib/layer/iterate.c
@@ -932,6 +932,7 @@ static int begin(kr_layer_t *ctx)
return reset(ctx);
}
+/* LATER: make calls of this function more organized; spaghetti is though here. */
int kr_make_query(struct kr_query *query, knot_pkt_t *pkt)
{
/* Minimize QNAME (if possible). */
@@ -977,6 +978,7 @@ static int prepare_query(kr_layer_t *ctx, knot_pkt_t *pkt)
return KR_STATE_FAIL;
}
+ // TODO: this logging (and rplan's) is confusing, especially around `uid` values
WITH_VERBOSE(query) {
KR_DNAME_GET_STR(name_str, query->sname);
KR_RRTYPE_GET_STR(type_str, query->stype);
@@ -1026,6 +1028,21 @@ static void bound_ttls(ranked_rr_array_t *array, uint32_t qry_uid,
}
}
+static void ede_passthru(const knot_pkt_t *pkt, struct kr_request *req)
+{
+ const uint8_t *ede_raw = pkt->edns_opts ?
+ pkt->edns_opts->ptr[KNOT_EDNS_OPTION_EDE] : NULL;
+ if (!ede_raw) return;
+ kr_require(ede_raw[0] * 256 + ede_raw[1] == KNOT_EDNS_OPTION_EDE);
+ uint16_t ede_len = ede_raw[2] * 256 + ede_raw[3];
+ if (ede_len < 2) return;
+ uint16_t ede_code = ede_raw[4] * 256 + ede_raw[5];
+ if (ede_code >= KNOT_EDNS_EDE_INDETERMINATE // long range of DNSSEC codes
+ && ede_code <= KNOT_EDNS_EDE_NSEC_MISS) {
+ kr_request_set_extended_error(req, ede_code, "V5T7: forwarded EDE code");
+ }
+}
+
/** Resolve input query or continue resolution with followups.
*
* This roughly corresponds to RFC1034, 5.3.3 4a-d.
@@ -1068,7 +1085,7 @@ static int resolve(kr_layer_t *ctx, knot_pkt_t *pkt)
VERBOSE_MSG("<= malformed response (parsed %d)\n", (int)pkt->parsed);
query->server_selection.error(query, req->upstream.transport, KR_SELECTION_MALFORMED);
return KR_STATE_FAIL;
- } else if (!is_paired_to_query(pkt, query)) {
+ } else if (!query->flags.CACHED && !is_paired_to_query(pkt, query)) {
WITH_VERBOSE(query) {
const char *ns_str =
req->upstream.transport ? kr_straddr(&req->upstream.transport->address.ip) : "(internal)";
@@ -1129,6 +1146,10 @@ static int resolve(kr_layer_t *ctx, knot_pkt_t *pkt)
}
ret = KR_STATE_FAIL;
selection_error = KR_SELECTION_SERVFAIL;
+ if (query->flags.FORWARD) {
+ /* additionally pass some of the EDE codes through */
+ ede_passthru(pkt, req);
+ }
break;
case KNOT_RCODE_FORMERR:
ret = KR_STATE_FAIL;
diff --git a/lib/log.c b/lib/log.c
index 57efcfb0..ad4215f9 100644
--- a/lib/log.c
+++ b/lib/log.c
@@ -78,6 +78,7 @@ const log_group_names_t log_group_names[] = {
GRP_NAME_ITEM(LOG_GRP_DEVEL),
GRP_NAME_ITEM(LOG_GRP_RENUMBER),
GRP_NAME_ITEM(LOG_GRP_EDE),
+ GRP_NAME_ITEM(LOG_GRP_RULES),
GRP_NAME_ITEM(LOG_GRP_PROTOLAYER),
GRP_NAME_ITEM(LOG_GRP_REQDBG),
{ NULL, LOG_GRP_UNKNOWN },
diff --git a/lib/log.h b/lib/log.h
index 954f74a6..713d3f89 100644
--- a/lib/log.h
+++ b/lib/log.h
@@ -79,6 +79,7 @@ enum kr_log_group {
LOG_GRP_DEVEL,
LOG_GRP_RENUMBER,
LOG_GRP_EDE,
+ LOG_GRP_RULES,
LOG_GRP_PROTOLAYER,
/* ^^ Add new log groups above ^^. */
LOG_GRP_REQDBG, /* Must be first non-displayed entry in enum! */
@@ -132,6 +133,7 @@ enum kr_log_group {
#define LOG_GRP_DEVEL_TAG "devel" /**< ``devel``: for development purposes */
#define LOG_GRP_RENUMBER_TAG "renum" /**< ``renum``: operation related to renumber */
#define LOG_GRP_EDE_TAG "exterr" /**< ``exterr``: extended error module */
+#define LOG_GRP_RULES_TAG "rules" /**< ``rules``: new policy rules (their processing) */
#define LOG_GRP_PROTOLAYER_TAG "prlayr" /**< ``prlayr``: protocol layer system (session2) */
#define LOG_GRP_REQDBG_TAG "reqdbg" /**< ``reqdbg``: debug logs enabled by policy actions */
///@}
@@ -232,8 +234,8 @@ struct kr_query;
* @param grp GROUP_NAME (without the LOG_GRP_ prefix)
* @param fmt printf-like format string
*/
-#define kr_log_req(req, qry_id, indent, grp, fmt, ...) \
- kr_log_req1(req, qry_id, indent, LOG_GRP_ ## grp, LOG_GRP_ ## grp ## _TAG, fmt, ## __VA_ARGS__)
+#define kr_log_req(req, qry_uid, indent, grp, fmt, ...) \
+ kr_log_req1(req, qry_uid, indent, LOG_GRP_ ## grp, LOG_GRP_ ## grp ## _TAG, fmt, ## __VA_ARGS__)
KR_EXPORT KR_PRINTF(6)
void kr_log_req1(const struct kr_request * const req, uint32_t qry_uid,
const unsigned int indent, enum kr_log_group group, const char *tag, const char *fmt, ...);
diff --git a/lib/meson.build b/lib/meson.build
index ec11da9f..48185e17 100644
--- a/lib/meson.build
+++ b/lib/meson.build
@@ -23,8 +23,13 @@ libkres_src = files([
'layer/iterate.c',
'layer/validate.c',
'log.c',
+ 'rules/api.c',
+ 'rules/defaults.c',
+ 'rules/forward.c',
+ 'rules/zonefile.c',
'module.c',
'resolve.c',
+ 'resolve-produce.c',
'rplan.c',
'selection.c',
'selection_forward.c',
@@ -55,7 +60,10 @@ libkres_headers = files([
'log.h',
'module.h',
'resolve.h',
+ 'resolve-impl.h',
'rplan.h',
+ 'rules/api.h',
+ 'rules/impl.h',
'selection.h',
'selection_forward.h',
'selection_iter.h',
@@ -94,6 +102,7 @@ libkres_lib = library('kres',
libuv,
lmdb,
libknot,
+ libzscanner,
libdnssec,
gnutls,
luajit,
diff --git a/lib/resolve-impl.h b/lib/resolve-impl.h
new file mode 100644
index 00000000..ffedf4d2
--- /dev/null
+++ b/lib/resolve-impl.h
@@ -0,0 +1,52 @@
+/* Copyright (C) CZ.NIC, z.s.p.o. <knot-resolver@labs.nic.cz>
+ * SPDX-License-Identifier: GPL-3.0-or-later
+ */
+#pragma once
+
+#include "lib/resolve.h"
+
+#define VERBOSE_MSG(qry, ...) kr_log_q((qry), RESOLVER, __VA_ARGS__)
+
+/** @internal Macro for iterating module layers. */
+#define RESUME_LAYERS(from, r, qry, func, ...) \
+ (r)->current_query = (qry); \
+ for (size_t i = (from); i < (r)->ctx->modules->len; ++i) { \
+ struct kr_module *mod = (r)->ctx->modules->at[i]; \
+ if (mod->layer) { \
+ struct kr_layer layer = {.state = (r)->state, .api = mod->layer, .req = (r)}; \
+ if (layer.api && layer.api->func) { \
+ (r)->state = layer.api->func(&layer, ##__VA_ARGS__); \
+ /* It's an easy mistake to return error code, for example. */ \
+ /* (though we could allow such an overload later) */ \
+ if (kr_fails_assert(kr_state_consistent((r)->state))) { \
+ (r)->state = KR_STATE_FAIL; \
+ } else \
+ if ((r)->state == KR_STATE_YIELD) { \
+ func ## _yield(&layer, ##__VA_ARGS__); \
+ break; \
+ } \
+ } \
+ } \
+ } /* Invalidate current query. */ \
+ (r)->current_query = NULL
+
+/** @internal Macro for starting module iteration. */
+#define ITERATE_LAYERS(req, qry, func, ...) RESUME_LAYERS(0, req, qry, func, ##__VA_ARGS__)
+
+/** Randomize QNAME letter case.
+ *
+ * This adds 32 bits of randomness at maximum, but that's more than an average domain name length.
+ * https://tools.ietf.org/html/draft-vixie-dnsext-dns0x20-00
+ */
+void randomized_qname_case(knot_dname_t * restrict qname, uint32_t secret);
+
+void set_yield(ranked_rr_array_t *array, const uint32_t qry_uid, const bool yielded);
+int consume_yield(kr_layer_t *ctx, knot_pkt_t *pkt);
+
+static inline int begin_yield(kr_layer_t *ctx) { return kr_ok(); }
+static inline int reset_yield(kr_layer_t *ctx) { return kr_ok(); }
+static inline int finish_yield(kr_layer_t *ctx) { return kr_ok(); }
+static inline int produce_yield(kr_layer_t *ctx, knot_pkt_t *pkt) { return kr_ok(); }
+static inline int checkout_yield(kr_layer_t *ctx, knot_pkt_t *packet, struct sockaddr *dst, int type) { return kr_ok(); }
+static inline int answer_finalize_yield(kr_layer_t *ctx) { return kr_ok(); }
+
diff --git a/lib/resolve-produce.c b/lib/resolve-produce.c
new file mode 100644
index 00000000..d9bec433
--- /dev/null
+++ b/lib/resolve-produce.c
@@ -0,0 +1,728 @@
+/* Copyright (C) CZ.NIC, z.s.p.o. <knot-resolver@labs.nic.cz>
+ * SPDX-License-Identifier: GPL-3.0-or-later
+ */
+
+#include <ctype.h>
+#include <inttypes.h>
+#include <stdio.h>
+#include <fcntl.h>
+#include <arpa/inet.h>
+#include <libknot/rrtype/rdname.h>
+#include <libknot/descriptor.h>
+#include <ucw/mempool.h>
+#include <sys/socket.h>
+#include "lib/resolve.h"
+#include "lib/layer.h"
+#include "lib/rplan.h"
+#include "lib/layer/iterate.h"
+#include "lib/dnssec/ta.h"
+#include "lib/dnssec.h"
+
+#include "lib/resolve-impl.h"
+
+/** @internal Find layer id matching API. */
+static inline size_t layer_id(struct kr_request *req, const struct kr_layer_api *api) {
+ module_array_t *modules = req->ctx->modules;
+ for (size_t i = 0; i < modules->len; ++i) {
+ if (modules->at[i]->layer == api) {
+ return i;
+ }
+ }
+ return 0; /* Not found, try all. */
+}
+
+/* @internal We don't need to deal with locale here */
+KR_CONST static inline bool isletter(unsigned chr)
+{ return (chr | 0x20 /* tolower */) - 'a' <= 'z' - 'a'; }
+
+void randomized_qname_case(knot_dname_t * restrict qname, uint32_t secret)
+{
+ if (secret == 0)
+ return;
+ if (kr_fails_assert(qname))
+ return;
+ const int len = knot_dname_size(qname) - 2; /* Skip first, last label. First is length, last is always root */
+ for (int i = 0; i < len; ++i) {
+ /* Note: this relies on the fact that correct label lengths
+ * can't pass the isletter() test (by "luck"). */
+ if (isletter(*++qname)) {
+ *qname ^= ((secret >> (i & 31)) & 1) * 0x20;
+ }
+ }
+}
+
+/** This turns off QNAME minimisation if there is a non-terminal between current zone cut, and name target.
+ * It save several minimization steps, as the zone cut is likely final one.
+ */
+static void check_empty_nonterms(struct kr_query *qry, knot_pkt_t *pkt, struct kr_cache *cache, uint32_t timestamp)
+{
+ // FIXME cleanup, etc.
+#if 0
+ if (qry->flags.NO_MINIMIZE) {
+ return;
+ }
+
+ const knot_dname_t *target = qry->sname;
+ const knot_dname_t *cut_name = qry->zone_cut.name;
+ if (!target || !cut_name)
+ return;
+
+ struct kr_cache_entry *entry = NULL;
+ /* @note: The non-terminal must be direct child of zone cut (e.g. label distance <= 2),
+ * otherwise this would risk leaking information to parent if the NODATA TTD > zone cut TTD. */
+ int labels = knot_dname_labels(target, NULL) - knot_dname_labels(cut_name, NULL);
+ while (target[0] && labels > 2) {
+ target = knot_wire_next_label(target, NULL);
+ --labels;
+ }
+ for (int i = 0; i < labels; ++i) {
+ int ret = kr_cache_peek(cache, KR_CACHE_PKT, target, KNOT_RRTYPE_NS, &entry, &timestamp);
+ if (ret == 0) { /* Either NXDOMAIN or NODATA, start here. */
+ /* @todo We could stop resolution here for NXDOMAIN, but we can't because of broken CDNs */
+ qry->flags.NO_MINIMIZE = true;
+ kr_make_query(qry, pkt);
+ break;
+ }
+ kr_assert(target[0]);
+ target = knot_wire_next_label(target, NULL);
+ }
+ kr_cache_commit(cache);
+#endif
+}
+
+static int ns_fetch_cut(struct kr_query *qry, const knot_dname_t *requested_name,
+ struct kr_request *req, knot_pkt_t *pkt)
+{
+ /* It can occur that here parent query already have
+ * provably insecure zonecut which not in the cache yet. */
+ struct kr_qflags pflags;
+ if (qry->parent) {
+ pflags = qry->parent->flags;
+ }
+ const bool is_insecure = qry->parent != NULL
+ && !(pflags.AWAIT_IPV4 || pflags.AWAIT_IPV6)
+ && (pflags.DNSSEC_INSECURE || pflags.DNSSEC_NODS);
+
+ /* Want DNSSEC if it's possible to secure this name
+ * (e.g. is covered by any TA) */
+ if (is_insecure) {
+ /* If parent is insecure we don't want DNSSEC
+ * even if cut name is covered by TA. */
+ qry->flags.DNSSEC_WANT = false;
+ qry->flags.DNSSEC_INSECURE = true;
+ VERBOSE_MSG(qry, "=> going insecure because parent query is insecure\n");
+ } else if (kr_ta_closest(req->ctx, qry->zone_cut.name, KNOT_RRTYPE_NS)) {
+ qry->flags.DNSSEC_WANT = true;
+ } else {
+ qry->flags.DNSSEC_WANT = false;
+ VERBOSE_MSG(qry, "=> going insecure because there's no covering TA\n");
+ }
+
+ struct kr_zonecut cut_found;
+ kr_zonecut_init(&cut_found, requested_name, req->rplan.pool);
+ /* Cut that has been found can differs from cut that has been requested.
+ * So if not already insecure,
+ * try to fetch ta & keys even if initial cut name not covered by TA */
+ bool secure = !is_insecure;
+ int ret = kr_zonecut_find_cached(req->ctx, &cut_found, requested_name,
+ qry, &secure);
+ if (ret == kr_error(ENOENT)) {
+ /* No cached cut found, start from SBELT
+ * and issue priming query. */
+ kr_zonecut_deinit(&cut_found);
+ ret = kr_zonecut_set_sbelt(req->ctx, &qry->zone_cut);
+ if (ret != 0) {
+ return KR_STATE_FAIL;
+ }
+ VERBOSE_MSG(qry, "=> using root hints\n");
+ qry->flags.AWAIT_CUT = false;
+ return KR_STATE_DONE;
+ } else if (ret != kr_ok()) {
+ kr_zonecut_deinit(&cut_found);
+ return KR_STATE_FAIL;
+ }
+
+ /* Find out security status.
+ * Go insecure if the zone cut is provably insecure */
+ if ((qry->flags.DNSSEC_WANT) && !secure) {
+ VERBOSE_MSG(qry, "=> NS is provably without DS, going insecure\n");
+ qry->flags.DNSSEC_WANT = false;
+ qry->flags.DNSSEC_INSECURE = true;
+ }
+ /* Zonecut name can change, check it again
+ * to prevent unnecessary DS & DNSKEY queries */
+ if (!(qry->flags.DNSSEC_INSECURE) &&
+ kr_ta_closest(req->ctx, cut_found.name, KNOT_RRTYPE_NS)) {
+ qry->flags.DNSSEC_WANT = true;
+ } else {
+ qry->flags.DNSSEC_WANT = false;
+ }
+ /* Check if any DNSKEY found for cached cut */
+ if (qry->flags.DNSSEC_WANT && cut_found.key == NULL &&
+ kr_zonecut_is_empty(&cut_found)) {
+ /* Cut found and there are no proofs of zone insecurity.
+ * But no DNSKEY found and no glue fetched.
+ * We have got circular dependency - must fetch A\AAAA
+ * from authoritative, but we have no key to verify it. */
+ kr_zonecut_deinit(&cut_found);
+ if (requested_name[0] != '\0' ) {
+ /* If not root - try next label */
+ return KR_STATE_CONSUME;
+ }
+ /* No cached cut & keys found, start from SBELT */
+ ret = kr_zonecut_set_sbelt(req->ctx, &qry->zone_cut);
+ if (ret != 0) {
+ return KR_STATE_FAIL;
+ }
+ VERBOSE_MSG(qry, "=> using root hints\n");
+ qry->flags.AWAIT_CUT = false;
+ return KR_STATE_DONE;
+ }
+ /* Use the found zone cut. */
+ kr_zonecut_move(&qry->zone_cut, &cut_found);
+ /* Check if there's a non-terminal between target and current cut. */
+ struct kr_cache *cache = &req->ctx->cache;
+ check_empty_nonterms(qry, pkt, cache, qry->timestamp.tv_sec);
+ /* Cut found */
+ return KR_STATE_PRODUCE;
+}
+
+/** @internal Spawn subrequest in current zone cut (no minimization or lookup). */
+static struct kr_query *zone_cut_subreq(struct kr_rplan *rplan, struct kr_query *parent,
+ const knot_dname_t *qname, uint16_t qtype)
+{
+ struct kr_query *next = kr_rplan_push(rplan, parent, qname, parent->sclass, qtype);
+ if (!next) {
+ return NULL;
+ }
+ kr_zonecut_set(&next->zone_cut, parent->zone_cut.name);
+ if (kr_zonecut_copy(&next->zone_cut, &parent->zone_cut) != 0 ||
+ kr_zonecut_copy_trust(&next->zone_cut, &parent->zone_cut) != 0) {
+ return NULL;
+ }
+ next->flags.NO_MINIMIZE = true;
+ if (parent->flags.DNSSEC_WANT) {
+ next->flags.DNSSEC_WANT = true;
+ }
+ return next;
+}
+
+static int forward_trust_chain_check(struct kr_request *request, struct kr_query *qry, bool resume)
+{
+ struct kr_rplan *rplan = &request->rplan;
+ trie_t *trust_anchors = request->ctx->trust_anchors;
+ trie_t *negative_anchors = request->ctx->negative_anchors;
+
+ if (qry->parent != NULL &&
+ !(qry->forward_flags.CNAME) &&
+ !(qry->flags.DNS64_MARK) &&
+ knot_dname_in_bailiwick(qry->zone_cut.name, qry->parent->zone_cut.name) >= 0) {
+ return KR_STATE_PRODUCE;
+ }
+
+ if (kr_fails_assert(qry->flags.FORWARD))
+ return KR_STATE_FAIL;
+
+ if (!trust_anchors) {
+ qry->flags.AWAIT_CUT = false;
+ return KR_STATE_PRODUCE;
+ }
+
+ if (qry->flags.DNSSEC_INSECURE) {
+ qry->flags.AWAIT_CUT = false;
+ return KR_STATE_PRODUCE;
+ }
+
+ if (qry->forward_flags.NO_MINIMIZE) {
+ qry->flags.AWAIT_CUT = false;
+ return KR_STATE_PRODUCE;
+ }
+
+ const knot_dname_t *start_name = qry->sname;
+ if ((qry->flags.AWAIT_CUT) && !resume) {
+ qry->flags.AWAIT_CUT = false;
+ const knot_dname_t *longest_ta = kr_ta_closest(request->ctx, qry->sname, qry->stype);
+ if (longest_ta) {
+ start_name = longest_ta;
+ qry->zone_cut.name = knot_dname_copy(start_name, qry->zone_cut.pool);
+ qry->flags.DNSSEC_WANT = true;
+ } else {
+ qry->flags.DNSSEC_WANT = false;
+ return KR_STATE_PRODUCE;
+ }
+ }
+
+ bool has_ta = (qry->zone_cut.trust_anchor != NULL);
+ knot_dname_t *ta_name = (has_ta ? qry->zone_cut.trust_anchor->owner : NULL);
+ bool refetch_ta = (!has_ta || !knot_dname_is_equal(qry->zone_cut.name, ta_name));
+ bool is_dnskey_subreq = kr_rplan_satisfies(qry, ta_name, KNOT_CLASS_IN, KNOT_RRTYPE_DNSKEY);
+ bool refetch_key = has_ta && (!qry->zone_cut.key || !knot_dname_is_equal(ta_name, qry->zone_cut.key->owner));
+ if (refetch_key && !is_dnskey_subreq) {
+ struct kr_query *next = zone_cut_subreq(rplan, qry, ta_name, KNOT_RRTYPE_DNSKEY);
+ if (!next) {
+ return KR_STATE_FAIL;
+ }
+ return KR_STATE_DONE;
+ }
+
+ int name_offset = 1;
+ const knot_dname_t *wanted_name;
+ bool nods, ds_req, ns_req, minimized, ns_exist;
+ do {
+ wanted_name = start_name;
+ ds_req = false;
+ ns_req = false;
+ ns_exist = true;
+
+ int cut_labels = knot_dname_labels(qry->zone_cut.name, NULL);
+ int wanted_name_labels = knot_dname_labels(wanted_name, NULL);
+ while (wanted_name[0] && wanted_name_labels > cut_labels + name_offset) {
+ wanted_name = knot_wire_next_label(wanted_name, NULL);
+ wanted_name_labels -= 1;
+ }
+ minimized = (wanted_name != qry->sname);
+
+ for (int i = 0; i < request->rplan.resolved.len; ++i) {
+ struct kr_query *q = request->rplan.resolved.at[i];
+ if (q->parent == qry &&
+ q->sclass == qry->sclass &&
+ (q->stype == KNOT_RRTYPE_DS || q->stype == KNOT_RRTYPE_NS) &&
+ knot_dname_is_equal(q->sname, wanted_name)) {
+ if (q->stype == KNOT_RRTYPE_DS) {
+ ds_req = true;
+ if (q->flags.CNAME) {
+ ns_exist = false;
+ } else if (!(q->flags.DNSSEC_OPTOUT)) {
+ int ret = kr_dnssec_matches_name_and_type(&request->auth_selected, q->uid,
+ wanted_name, KNOT_RRTYPE_NS);
+ ns_exist = (ret == kr_ok());
+ }
+ } else {
+ if (q->flags.CNAME) {
+ ns_exist = false;
+ }
+ ns_req = true;
+ }
+ }
+ }
+
+ if (ds_req && ns_exist && !ns_req && (minimized || resume)) {
+ struct kr_query *next = zone_cut_subreq(rplan, qry, wanted_name,
+ KNOT_RRTYPE_NS);
+ if (!next) {
+ return KR_STATE_FAIL;
+ }
+ return KR_STATE_DONE;
+ }
+
+ if (qry->parent == NULL && (qry->flags.CNAME) &&
+ ds_req && ns_req) {
+ return KR_STATE_PRODUCE;
+ }
+
+ /* set `nods` */
+ if ((qry->stype == KNOT_RRTYPE_DS) &&
+ knot_dname_is_equal(wanted_name, qry->sname)) {
+ nods = true;
+ } else if (resume && !ds_req) {
+ nods = false;
+ } else if (!minimized && qry->stype != KNOT_RRTYPE_DNSKEY) {
+ nods = true;
+ } else {
+ nods = ds_req;
+ }
+ name_offset += 1;
+ } while (ds_req && (ns_req || !ns_exist) && minimized);
+
+ /* Disable DNSSEC if it enters NTA. */
+ if (kr_ta_get(negative_anchors, wanted_name)){
+ VERBOSE_MSG(qry, ">< negative TA, going insecure\n");
+ qry->flags.DNSSEC_WANT = false;
+ }
+
+ /* Enable DNSSEC if enters a new island of trust. */
+ bool want_secure = (qry->flags.DNSSEC_WANT) &&
+ !knot_wire_get_cd(request->qsource.packet->wire);
+ if (!(qry->flags.DNSSEC_WANT) &&
+ !knot_wire_get_cd(request->qsource.packet->wire) &&
+ kr_ta_get(trust_anchors, wanted_name)) {
+ qry->flags.DNSSEC_WANT = true;
+ want_secure = true;
+ if (kr_log_is_debug_qry(RESOLVER, qry)) {
+ KR_DNAME_GET_STR(qname_str, wanted_name);
+ VERBOSE_MSG(qry, ">< TA: '%s'\n", qname_str);
+ }
+ }
+
+ if (want_secure && !qry->zone_cut.trust_anchor) {
+ knot_rrset_t *ta_rr = kr_ta_get(trust_anchors, wanted_name);
+ if (!ta_rr) {
+ char name[] = "\0";
+ ta_rr = kr_ta_get(trust_anchors, (knot_dname_t*)name);
+ }
+ if (ta_rr) {
+ qry->zone_cut.trust_anchor = knot_rrset_copy(ta_rr, qry->zone_cut.pool);
+ }
+ }
+
+ has_ta = (qry->zone_cut.trust_anchor != NULL);
+ ta_name = (has_ta ? qry->zone_cut.trust_anchor->owner : NULL);
+ refetch_ta = (!has_ta || !knot_dname_is_equal(wanted_name, ta_name));
+ if (!nods && want_secure && refetch_ta) {
+ struct kr_query *next = zone_cut_subreq(rplan, qry, wanted_name,
+ KNOT_RRTYPE_DS);
+ if (!next) {
+ return KR_STATE_FAIL;
+ }
+ return KR_STATE_DONE;
+ }
+
+ /* Try to fetch missing DNSKEY.
+ * Do not fetch if this is a DNSKEY subrequest to avoid circular dependency. */
+ is_dnskey_subreq = kr_rplan_satisfies(qry, ta_name, KNOT_CLASS_IN, KNOT_RRTYPE_DNSKEY);
+ refetch_key = has_ta && (!qry->zone_cut.key || !knot_dname_is_equal(ta_name, qry->zone_cut.key->owner));
+ if (want_secure && refetch_key && !is_dnskey_subreq) {
+ struct kr_query *next = zone_cut_subreq(rplan, qry, ta_name, KNOT_RRTYPE_DNSKEY);
+ if (!next) {
+ return KR_STATE_FAIL;
+ }
+ return KR_STATE_DONE;
+ }
+
+ return KR_STATE_PRODUCE;
+}
+
+/* @todo: Validator refactoring, keep this in driver for now. */
+static int trust_chain_check(struct kr_request *request, struct kr_query *qry)
+{
+ struct kr_rplan *rplan = &request->rplan;
+ trie_t *trust_anchors = request->ctx->trust_anchors;
+ trie_t *negative_anchors = request->ctx->negative_anchors;
+
+ /* Disable DNSSEC if it enters NTA. */
+ if (kr_ta_get(negative_anchors, qry->zone_cut.name)){
+ VERBOSE_MSG(qry, ">< negative TA, going insecure\n");
+ qry->flags.DNSSEC_WANT = false;
+ qry->flags.DNSSEC_INSECURE = true;
+ }
+ if (qry->flags.DNSSEC_NODS) {
+ /* This is the next query iteration with minimized qname.
+ * At previous iteration DS non-existence has been proven */
+ VERBOSE_MSG(qry, "<= DS doesn't exist, going insecure\n");
+ qry->flags.DNSSEC_NODS = false;
+ qry->flags.DNSSEC_WANT = false;
+ qry->flags.DNSSEC_INSECURE = true;
+ }
+ /* Enable DNSSEC if entering a new (or different) island of trust,
+ * and update the TA RRset if required. */
+ const bool has_cd = knot_wire_get_cd(request->qsource.packet->wire);
+ knot_rrset_t *ta_rr = kr_ta_get(trust_anchors, qry->zone_cut.name);
+ if (!has_cd && ta_rr) {
+ qry->flags.DNSSEC_WANT = true;
+ if (qry->zone_cut.trust_anchor == NULL
+ || !knot_dname_is_equal(qry->zone_cut.trust_anchor->owner, qry->zone_cut.name)) {
+ mm_free(qry->zone_cut.pool, qry->zone_cut.trust_anchor);
+ qry->zone_cut.trust_anchor = knot_rrset_copy(ta_rr, qry->zone_cut.pool);
+
+ if (kr_log_is_debug_qry(RESOLVER, qry)) {
+ KR_DNAME_GET_STR(qname_str, ta_rr->owner);
+ VERBOSE_MSG(qry, ">< TA: '%s'\n", qname_str);
+ }
+ }
+ }
+
+ /* Try to fetch missing DS (from above the cut). */
+ const bool has_ta = (qry->zone_cut.trust_anchor != NULL);
+ const knot_dname_t *ta_name = (has_ta ? qry->zone_cut.trust_anchor->owner : NULL);
+ const bool refetch_ta = !has_ta || !knot_dname_is_equal(qry->zone_cut.name, ta_name);
+ const bool want_secure = qry->flags.DNSSEC_WANT && !has_cd;
+ if (want_secure && refetch_ta) {
+ /* @todo we could fetch the information from the parent cut, but we don't remember that now */
+ struct kr_query *next = kr_rplan_push(rplan, qry, qry->zone_cut.name, qry->sclass, KNOT_RRTYPE_DS);
+ if (!next) {
+ return KR_STATE_FAIL;
+ }
+ next->flags.AWAIT_CUT = true;
+ next->flags.DNSSEC_WANT = true;
+ return KR_STATE_DONE;
+ }
+ /* Try to fetch missing DNSKEY (either missing or above current cut).
+ * Do not fetch if this is a DNSKEY subrequest to avoid circular dependency. */
+ const bool is_dnskey_subreq = kr_rplan_satisfies(qry, ta_name, KNOT_CLASS_IN, KNOT_RRTYPE_DNSKEY);
+ const bool refetch_key = has_ta && (!qry->zone_cut.key || !knot_dname_is_equal(ta_name, qry->zone_cut.key->owner));
+ if (want_secure && refetch_key && !is_dnskey_subreq) {
+ struct kr_query *next = zone_cut_subreq(rplan, qry, ta_name, KNOT_RRTYPE_DNSKEY);
+ if (!next) {
+ return KR_STATE_FAIL;
+ }
+ return KR_STATE_DONE;
+ }
+
+ return KR_STATE_PRODUCE;
+}
+
+/// Check current zone cut status and credibility, spawn subrequests if needed.
+/// \return KR_STATE_FAIL, KR_STATE_DONE, kr_ok()
+/// TODO: careful review might be nice
+static int zone_cut_check(struct kr_request *request, struct kr_query *qry, knot_pkt_t *packet)
+{
+ // Set up nameserver+cut if overridden by policy.
+ int ret = kr_rule_data_src_check(qry, packet);
+ if (ret) return KR_STATE_FAIL;
+
+ /* Stub mode, just forward and do not solve cut. */
+ if (qry->flags.STUB) {
+ return KR_STATE_PRODUCE;
+ }
+
+ /* Forwarding to upstream resolver mode.
+ * Since forwarding targets already are in qry->ns -
+ * cut fetching is not needed. */
+ if (qry->flags.FORWARD) {
+ return forward_trust_chain_check(request, qry, false);
+ }
+ if (!(qry->flags.AWAIT_CUT)) {
+ /* The query was resolved from cache.
+ * Spawn DS \ DNSKEY requests if needed and exit */
+ return trust_chain_check(request, qry);
+ }
+
+ /* The query wasn't resolved from cache,
+ * now it's the time to look up closest zone cut from cache. */
+ struct kr_cache *cache = &request->ctx->cache;
+ if (!kr_cache_is_open(cache)) {
+ ret = kr_zonecut_set_sbelt(request->ctx, &qry->zone_cut);
+ if (ret != 0) {
+ return KR_STATE_FAIL;
+ }
+ VERBOSE_MSG(qry, "=> no cache open, using root hints\n");
+ qry->flags.AWAIT_CUT = false;
+ return KR_STATE_DONE;
+ }
+
+ const knot_dname_t *requested_name = qry->sname;
+ /* If at/subdomain of parent zone cut, start from its encloser.
+ * This is for case when we get to a dead end
+ * (and need glue from parent), or DS refetch. */
+ if (qry->parent) {
+ const knot_dname_t *parent = qry->parent->zone_cut.name;
+ if (parent[0] != '\0'
+ && knot_dname_in_bailiwick(qry->sname, parent) >= 0) {
+ requested_name = knot_wire_next_label(parent, NULL);
+ }
+ } else if ((qry->stype == KNOT_RRTYPE_DS) && (qry->sname[0] != '\0')) {
+ /* If this is explicit DS query, start from encloser too. */
+ requested_name = knot_wire_next_label(requested_name, NULL);
+ }
+
+ int state = KR_STATE_FAIL;
+ do {
+ state = ns_fetch_cut(qry, requested_name, request, packet);
+ if (state == KR_STATE_DONE || (state & KR_STATE_FAIL)) {
+ return state;
+ } else if (state == KR_STATE_CONSUME) {
+ requested_name = knot_wire_next_label(requested_name, NULL);
+ }
+ } while (state == KR_STATE_CONSUME);
+
+ /* Update minimized QNAME if zone cut changed */
+ if (qry->zone_cut.name && qry->zone_cut.name[0] != '\0' && !(qry->flags.NO_MINIMIZE)) {
+ if (kr_make_query(qry, packet) != 0) {
+ return KR_STATE_FAIL;
+ }
+ }
+ qry->flags.AWAIT_CUT = false;
+
+ /* Check trust chain */
+ return trust_chain_check(request, qry);
+}
+
+
+static int ns_resolve_addr(struct kr_query *qry, struct kr_request *param, struct kr_transport *transport, uint16_t next_type)
+{
+ struct kr_rplan *rplan = &param->rplan;
+ struct kr_context *ctx = param->ctx;
+
+
+ /* Start NS queries from root, to avoid certain cases
+ * where a NS drops out of cache and the rest is unavailable,
+ * this would lead to dependency loop in current zone cut.
+ */
+
+ /* Bail out if the query is already pending or dependency loop. */
+ if (!next_type || kr_rplan_satisfies(qry->parent, transport->ns_name, KNOT_CLASS_IN, next_type)) {
+ /* Fall back to SBELT if root server query fails. */
+ if (!next_type && qry->zone_cut.name[0] == '\0') {
+ VERBOSE_MSG(qry, "=> fallback to root hints\n");
+ kr_zonecut_set_sbelt(ctx, &qry->zone_cut);
+ return kr_error(EAGAIN);
+ }
+ /* No IPv4 nor IPv6, flag server as unusable. */
+ VERBOSE_MSG(qry, "=> unresolvable NS address, bailing out\n");
+ kr_zonecut_del_all(&qry->zone_cut, transport->ns_name);
+ return kr_error(EHOSTUNREACH);
+ }
+ /* Push new query to the resolution plan */
+ struct kr_query *next =
+ kr_rplan_push(rplan, qry, transport->ns_name, KNOT_CLASS_IN, next_type);
+ if (!next) {
+ return kr_error(ENOMEM);
+ }
+ next->flags.NONAUTH = true;
+
+ /* At the root level with no NS addresses, add SBELT subrequest. */
+ int ret = 0;
+ if (qry->zone_cut.name[0] == '\0') {
+ ret = kr_zonecut_set_sbelt(ctx, &next->zone_cut);
+ if (ret == 0) { /* Copy TA and key since it's the same cut to avoid lookup. */
+ kr_zonecut_copy_trust(&next->zone_cut, &qry->zone_cut);
+ kr_zonecut_set_sbelt(ctx, &qry->zone_cut); /* Add SBELT to parent in case query fails. */
+ }
+ } else {
+ next->flags.AWAIT_CUT = true;
+ }
+
+ if (ret == 0) {
+ if (next_type == KNOT_RRTYPE_AAAA) {
+ qry->flags.AWAIT_IPV6 = true;
+ } else {
+ qry->flags.AWAIT_IPV4 = true;
+ }
+ }
+
+ return ret;
+}
+
+int kr_resolve_produce(struct kr_request *request, struct kr_transport **transport, knot_pkt_t *packet)
+{
+ kr_require(request && transport && packet);
+ struct kr_rplan *rplan = &request->rplan;
+
+ /* No query left for resolution */
+ if (kr_rplan_empty(rplan)) {
+ return KR_STATE_FAIL;
+ }
+
+ struct kr_query *qry = array_tail(rplan->pending);
+
+ /* If we have deferred answers, resume them. */
+ if (qry->deferred != NULL) {
+ /* @todo: Refactoring validator, check trust chain before resuming. */
+ int state = 0;
+ if (((qry->flags.FORWARD) == 0) ||
+ ((qry->stype == KNOT_RRTYPE_DS) && (qry->flags.CNAME))) {
+ state = trust_chain_check(request, qry);
+ } else {
+ state = forward_trust_chain_check(request, qry, true);
+ }
+
+ switch(state) {
+ case KR_STATE_FAIL: return KR_STATE_FAIL;
+ case KR_STATE_DONE: return KR_STATE_PRODUCE;
+ default: break;
+ }
+ VERBOSE_MSG(qry, "=> resuming yielded answer\n");
+ struct kr_layer_pickle *pickle = qry->deferred;
+ request->state = KR_STATE_YIELD;
+ set_yield(&request->answ_selected, qry->uid, false);
+ set_yield(&request->auth_selected, qry->uid, false);
+ RESUME_LAYERS(layer_id(request, pickle->api), request, qry, consume, pickle->pkt);
+ if (request->state != KR_STATE_YIELD) {
+ /* No new deferred answers, take the next */
+ qry->deferred = pickle->next;
+ }
+ } else {
+ /* Caller is interested in always tracking a zone cut, even if the answer is cached
+ * this is normally not required, and incurs another cache lookups for cached answer. */
+ if (qry->flags.ALWAYS_CUT) { // LATER: maybe the flag doesn't work well anymore
+ switch(zone_cut_check(request, qry, packet)) {
+ case KR_STATE_FAIL: return KR_STATE_FAIL;
+ case KR_STATE_DONE: return KR_STATE_PRODUCE;
+ default: break;
+ }
+ }
+ /* Resolve current query and produce dependent or finish */
+ request->state = KR_STATE_PRODUCE;
+ ITERATE_LAYERS(request, qry, produce, packet);
+ if (!(request->state & KR_STATE_FAIL) && knot_wire_get_qr(packet->wire)) {
+ /* Produced an answer from cache, consume it. */
+ kr_server_selection_cached(qry);
+ qry->secret = 0;
+ request->state = KR_STATE_CONSUME;
+ ITERATE_LAYERS(request, qry, consume, packet);
+ }
+ }
+ switch(request->state) {
+ case KR_STATE_FAIL: return request->state;
+ case KR_STATE_CONSUME: break;
+ case KR_STATE_DONE:
+ default: /* Current query is done */
+ if (qry->flags.RESOLVED && request->state != KR_STATE_YIELD) {
+ kr_rplan_pop(rplan, qry);
+ }
+ ITERATE_LAYERS(request, qry, reset);
+ return kr_rplan_empty(rplan) ? KR_STATE_DONE : KR_STATE_PRODUCE;
+ }
+ /* At this point we need to send a query upstream to proceed towards success. */
+
+ /* This query has RD=0 or is ANY, stop here. */
+ if (qry->stype == KNOT_RRTYPE_ANY ||
+ !knot_wire_get_rd(request->qsource.packet->wire)) {
+ VERBOSE_MSG(qry, "=> qtype is ANY or RD=0, bail out\n");
+ return KR_STATE_FAIL;
+ }
+
+ /* Update zone cut, spawn new subrequests. */
+ int state = zone_cut_check(request, qry, packet);
+ switch(state) {
+ case KR_STATE_FAIL: return KR_STATE_FAIL;
+ case KR_STATE_DONE: return KR_STATE_PRODUCE;
+ default: break;
+ }
+
+ const struct kr_qflags qflg = qry->flags;
+ const bool retry = qflg.TCP || qflg.BADCOOKIE_AGAIN;
+ if (!qflg.FORWARD && !qflg.STUB && !retry) { /* Keep NS when requerying/stub/badcookie. */
+ /* Root DNSKEY must be fetched from the hints to avoid chicken and egg problem. */
+ if (qry->sname[0] == '\0' && qry->stype == KNOT_RRTYPE_DNSKEY) {
+ kr_zonecut_set_sbelt(request->ctx, &qry->zone_cut);
+ }
+ }
+
+ qry->server_selection.choose_transport(qry, transport);
+
+ if (*transport == NULL) {
+ /* Properly signal to serve_stale module. */
+ if (qry->flags.NO_NS_FOUND) {
+ ITERATE_LAYERS(request, qry, reset);
+ kr_rplan_pop(rplan, qry);
+ return KR_STATE_FAIL;
+ } else {
+ /* FIXME: This is probably quite inefficient:
+ * we go through the whole qr_task_step loop just because of the serve_stale
+ * module which might not even be loaded. */
+ qry->flags.NO_NS_FOUND = true;
+ return KR_STATE_PRODUCE;
+ }
+ }
+
+ if ((*transport)->protocol == KR_TRANSPORT_RESOLVE_A || (*transport)->protocol == KR_TRANSPORT_RESOLVE_AAAA) {
+ uint16_t type = (*transport)->protocol == KR_TRANSPORT_RESOLVE_A ? KNOT_RRTYPE_A : KNOT_RRTYPE_AAAA;
+ ns_resolve_addr(qry, qry->request, *transport, type);
+ ITERATE_LAYERS(request, qry, reset);
+ return KR_STATE_PRODUCE;
+ }
+
+ /* Randomize query case (if not in not turned off) */
+ qry->secret = qry->flags.NO_0X20 ? 0 : kr_rand_bytes(sizeof(qry->secret));
+ knot_dname_t *qname_raw = kr_pkt_qname_raw(packet);
+ randomized_qname_case(qname_raw, qry->secret);
+
+ /*
+ * Additional query is going to be finalized when calling
+ * kr_resolve_checkout().
+ */
+ qry->timestamp_mono = kr_now();
+ return request->state;
+}
+
diff --git a/lib/resolve.c b/lib/resolve.c
index bbda5e1f..9dd7ae6e 100644
--- a/lib/resolve.c
+++ b/lib/resolve.c
@@ -2,6 +2,8 @@
* SPDX-License-Identifier: GPL-3.0-or-later
*/
+#include "lib/resolve-impl.h"
+
#include <ctype.h>
#include <inttypes.h>
#include <stdio.h>
@@ -25,8 +27,6 @@
#define KNOT_EDNS_OPTION_COOKIE 10
#endif /* ENABLE_COOKIES */
-#define VERBOSE_MSG(qry, ...) kr_log_q((qry), RESOLVER, __VA_ARGS__)
-
/** Magic defaults */
#ifndef LRU_RTT_SIZE
#define LRU_RTT_SIZE 65536 /**< NS RTT cache size */
@@ -77,7 +77,7 @@ bool kr_rank_test(uint8_t rank, uint8_t kr_flag)
}
/** @internal Set @a yielded to all RRs with matching @a qry_uid. */
-static void set_yield(ranked_rr_array_t *array, const uint32_t qry_uid, const bool yielded)
+void set_yield(ranked_rr_array_t *array, const uint32_t qry_uid, const bool yielded)
{
for (unsigned i = 0; i < array->len; ++i) {
ranked_rr_array_entry_t *entry = array->at[i];
@@ -91,7 +91,7 @@ static void set_yield(ranked_rr_array_t *array, const uint32_t qry_uid, const bo
* @internal Defer execution of current query.
* The current layer state and input will be pushed to a stack and resumed on next iteration.
*/
-static int consume_yield(kr_layer_t *ctx, knot_pkt_t *pkt)
+int consume_yield(kr_layer_t *ctx, knot_pkt_t *pkt)
{
struct kr_request *req = ctx->req;
size_t pkt_size = pkt->size;
@@ -113,209 +113,6 @@ static int consume_yield(kr_layer_t *ctx, knot_pkt_t *pkt)
}
return kr_error(ENOMEM);
}
-static int begin_yield(kr_layer_t *ctx) { return kr_ok(); }
-static int reset_yield(kr_layer_t *ctx) { return kr_ok(); }
-static int finish_yield(kr_layer_t *ctx) { return kr_ok(); }
-static int produce_yield(kr_layer_t *ctx, knot_pkt_t *pkt) { return kr_ok(); }
-static int checkout_yield(kr_layer_t *ctx, knot_pkt_t *packet, struct sockaddr *dst, int type) { return kr_ok(); }
-static int answer_finalize_yield(kr_layer_t *ctx) { return kr_ok(); }
-
-/** @internal Macro for iterating module layers. */
-#define RESUME_LAYERS(from, r, qry, func, ...) \
- (r)->current_query = (qry); \
- for (size_t i = (from); i < (r)->ctx->modules->len; ++i) { \
- struct kr_module *mod = (r)->ctx->modules->at[i]; \
- if (mod->layer) { \
- struct kr_layer layer = {.state = (r)->state, .api = mod->layer, .req = (r)}; \
- if (layer.api && layer.api->func) { \
- (r)->state = layer.api->func(&layer, ##__VA_ARGS__); \
- /* It's an easy mistake to return error code, for example. */ \
- /* (though we could allow such an overload later) */ \
- if (kr_fails_assert(kr_state_consistent((r)->state))) { \
- (r)->state = KR_STATE_FAIL; \
- } else \
- if ((r)->state == KR_STATE_YIELD) { \
- func ## _yield(&layer, ##__VA_ARGS__); \
- break; \
- } \
- } \
- } \
- } /* Invalidate current query. */ \
- (r)->current_query = NULL
-
-/** @internal Macro for starting module iteration. */
-#define ITERATE_LAYERS(req, qry, func, ...) RESUME_LAYERS(0, req, qry, func, ##__VA_ARGS__)
-
-/** @internal Find layer id matching API. */
-static inline size_t layer_id(struct kr_request *req, const struct kr_layer_api *api) {
- module_array_t *modules = req->ctx->modules;
- for (size_t i = 0; i < modules->len; ++i) {
- if (modules->at[i]->layer == api) {
- return i;
- }
- }
- return 0; /* Not found, try all. */
-}
-
-/* @internal We don't need to deal with locale here */
-KR_CONST static inline bool isletter(unsigned chr)
-{ return (chr | 0x20 /* tolower */) - 'a' <= 'z' - 'a'; }
-
-/* Randomize QNAME letter case.
- * This adds 32 bits of randomness at maximum, but that's more than an average domain name length.
- * https://tools.ietf.org/html/draft-vixie-dnsext-dns0x20-00
- */
-static void randomized_qname_case(knot_dname_t * restrict qname, uint32_t secret)
-{
- if (secret == 0)
- return;
- if (kr_fails_assert(qname))
- return;
- const int len = knot_dname_size(qname) - 2; /* Skip first, last label. First is length, last is always root */
- for (int i = 0; i < len; ++i) {
- /* Note: this relies on the fact that correct label lengths
- * can't pass the isletter() test (by "luck"). */
- if (isletter(*++qname)) {
- *qname ^= ((secret >> (i & 31)) & 1) * 0x20;
- }
- }
-}
-
-/** This turns of QNAME minimisation if there is a non-terminal between current zone cut, and name target.
- * It save several minimization steps, as the zone cut is likely final one.
- */
-static void check_empty_nonterms(struct kr_query *qry, knot_pkt_t *pkt, struct kr_cache *cache, uint32_t timestamp)
-{
- // FIXME cleanup, etc.
-#if 0
- if (qry->flags.NO_MINIMIZE) {
- return;
- }
-
- const knot_dname_t *target = qry->sname;
- const knot_dname_t *cut_name = qry->zone_cut.name;
- if (!target || !cut_name)
- return;
-
- struct kr_cache_entry *entry = NULL;
- /* @note: The non-terminal must be direct child of zone cut (e.g. label distance <= 2),
- * otherwise this would risk leaking information to parent if the NODATA TTD > zone cut TTD. */
- int labels = knot_dname_labels(target, NULL) - knot_dname_labels(cut_name, NULL);
- while (target[0] && labels > 2) {
- target = knot_wire_next_label(target, NULL);
- --labels;
- }
- for (int i = 0; i < labels; ++i) {
- int ret = kr_cache_peek(cache, KR_CACHE_PKT, target, KNOT_RRTYPE_NS, &entry, &timestamp);
- if (ret == 0) { /* Either NXDOMAIN or NODATA, start here. */
- /* @todo We could stop resolution here for NXDOMAIN, but we can't because of broken CDNs */
- qry->flags.NO_MINIMIZE = true;
- kr_make_query(qry, pkt);
- break;
- }
- kr_assert(target[0]);
- target = knot_wire_next_label(target, NULL);
- }
- kr_cache_commit(cache);
-#endif
-}
-
-static int ns_fetch_cut(struct kr_query *qry, const knot_dname_t *requested_name,
- struct kr_request *req, knot_pkt_t *pkt)
-{
- /* It can occur that here parent query already have
- * provably insecure zonecut which not in the cache yet. */
- struct kr_qflags pflags;
- if (qry->parent) {
- pflags = qry->parent->flags;
- }
- const bool is_insecure = qry->parent != NULL
- && !(pflags.AWAIT_IPV4 || pflags.AWAIT_IPV6)
- && (pflags.DNSSEC_INSECURE || pflags.DNSSEC_NODS);
-
- /* Want DNSSEC if it's possible to secure this name
- * (e.g. is covered by any TA) */
- if (is_insecure) {
- /* If parent is insecure we don't want DNSSEC
- * even if cut name is covered by TA. */
- qry->flags.DNSSEC_WANT = false;
- qry->flags.DNSSEC_INSECURE = true;
- VERBOSE_MSG(qry, "=> going insecure because parent query is insecure\n");
- } else if (kr_ta_closest(req->ctx, qry->zone_cut.name, KNOT_RRTYPE_NS)) {
- qry->flags.DNSSEC_WANT = true;
- } else {
- qry->flags.DNSSEC_WANT = false;
- VERBOSE_MSG(qry, "=> going insecure because there's no covering TA\n");
- }
-
- struct kr_zonecut cut_found;
- kr_zonecut_init(&cut_found, requested_name, req->rplan.pool);
- /* Cut that has been found can differs from cut that has been requested.
- * So if not already insecure,
- * try to fetch ta & keys even if initial cut name not covered by TA */
- bool secure = !is_insecure;
- int ret = kr_zonecut_find_cached(req->ctx, &cut_found, requested_name,
- qry, &secure);
- if (ret == kr_error(ENOENT)) {
- /* No cached cut found, start from SBELT
- * and issue priming query. */
- kr_zonecut_deinit(&cut_found);
- ret = kr_zonecut_set_sbelt(req->ctx, &qry->zone_cut);
- if (ret != 0) {
- return KR_STATE_FAIL;
- }
- VERBOSE_MSG(qry, "=> using root hints\n");
- qry->flags.AWAIT_CUT = false;
- return KR_STATE_DONE;
- } else if (ret != kr_ok()) {
- kr_zonecut_deinit(&cut_found);
- return KR_STATE_FAIL;
- }
-
- /* Find out security status.
- * Go insecure if the zone cut is provably insecure */
- if ((qry->flags.DNSSEC_WANT) && !secure) {
- VERBOSE_MSG(qry, "=> NS is provably without DS, going insecure\n");
- qry->flags.DNSSEC_WANT = false;
- qry->flags.DNSSEC_INSECURE = true;
- }
- /* Zonecut name can change, check it again
- * to prevent unnecessary DS & DNSKEY queries */
- if (!(qry->flags.DNSSEC_INSECURE) &&
- kr_ta_closest(req->ctx, cut_found.name, KNOT_RRTYPE_NS)) {
- qry->flags.DNSSEC_WANT = true;
- } else {
- qry->flags.DNSSEC_WANT = false;
- }
- /* Check if any DNSKEY found for cached cut */
- if (qry->flags.DNSSEC_WANT && cut_found.key == NULL &&
- kr_zonecut_is_empty(&cut_found)) {
- /* Cut found and there are no proofs of zone insecurity.
- * But no DNSKEY found and no glue fetched.
- * We have got circular dependency - must fetch A\AAAA
- * from authoritative, but we have no key to verify it. */
- kr_zonecut_deinit(&cut_found);
- if (requested_name[0] != '\0' ) {
- /* If not root - try next label */
- return KR_STATE_CONSUME;
- }
- /* No cached cut & keys found, start from SBELT */
- ret = kr_zonecut_set_sbelt(req->ctx, &qry->zone_cut);
- if (ret != 0) {
- return KR_STATE_FAIL;
- }
- VERBOSE_MSG(qry, "=> using root hints\n");
- qry->flags.AWAIT_CUT = false;
- return KR_STATE_DONE;
- }
- /* Use the found zone cut. */
- kr_zonecut_move(&qry->zone_cut, &cut_found);
- /* Check if there's a non-terminal between target and current cut. */
- struct kr_cache *cache = &req->ctx->cache;
- check_empty_nonterms(qry, pkt, cache, qry->timestamp.tv_sec);
- /* Cut found */
- return KR_STATE_PRODUCE;
-}
static int edns_put(knot_pkt_t *pkt, bool reclaim)
{
@@ -629,27 +426,42 @@ static void answer_finalize(struct kr_request *request)
static int query_finalize(struct kr_request *request, struct kr_query *qry, knot_pkt_t *pkt)
{
knot_pkt_begin(pkt, KNOT_ADDITIONAL);
+ const bool is_iter = !(qry->flags.STUB || qry->flags.FORWARD);
+ if (!is_iter)
+ knot_wire_set_rd(pkt->wire);
+ // The rest of this function is all about EDNS.
if (qry->flags.NO_EDNS)
return kr_ok();
- /* Remove any EDNS records from any previous iteration. */
+ // Replace any EDNS records from any previous iteration.
int ret = edns_erase_and_reserve(pkt);
+ if (ret == 0) ret = edns_create(pkt, request);
if (ret) return ret;
- ret = edns_create(pkt, request);
- if (ret) return ret;
- if (qry->flags.STUB) {
- /* Stub resolution */
- knot_wire_set_rd(pkt->wire);
- if (knot_wire_get_cd(request->qsource.packet->wire)) {
- knot_wire_set_cd(pkt->wire);
- }
- } else {
- /* Full resolution (ask for +cd and +do) */
+
+ if (!qry->flags.STUB)
knot_edns_set_do(pkt->opt_rr);
+
+ // CD flag is a bit controversial for .FORWARD:
+ // The original DNSSEC RFCs assume that if someone is validating,
+ // they will use CD=1 in requests to upstream. The intention was that
+ // this way both sides could use independent sets of trust anchors.
+ //
+ // However, in practice the trust anchor differences seem rather rare/small.
+ // And some of the normal use cases get harmed. With CD=1, the upstream
+ // (e.g. 1.1.1.1) can keep returning a cached bogus answer, even though they could
+ // instead retry with a different authoritative server and get a good one.
+ //
+ // Therefore if we want validaton (CD from client, negative trust anchors),
+ // we send CD=0 and then propagate returned SERVFAIL (but some retry logic remains).
+ //
+ // Theoretically it might be best to use both CD=0 and CD=1, with either of them
+ // in some kind of DNSSEC fallback, but I see bad complexity/improvement ratio.
+ if (is_iter) {
knot_wire_set_cd(pkt->wire);
- if (qry->flags.FORWARD) {
- knot_wire_set_rd(pkt->wire);
- }
+ } else {
+ if (knot_wire_get_cd(request->qsource.packet->wire) || !qry->flags.DNSSEC_WANT)
+ knot_wire_set_cd(pkt->wire);
}
+
return kr_ok();
}
@@ -955,548 +767,6 @@ int kr_resolve_consume(struct kr_request *request, struct kr_transport **transpo
return kr_rplan_empty(&request->rplan) ? KR_STATE_DONE : KR_STATE_PRODUCE;
}
-/** @internal Spawn subrequest in current zone cut (no minimization or lookup). */
-static struct kr_query *zone_cut_subreq(struct kr_rplan *rplan, struct kr_query *parent,
- const knot_dname_t *qname, uint16_t qtype)
-{
- struct kr_query *next = kr_rplan_push(rplan, parent, qname, parent->sclass, qtype);
- if (!next) {
- return NULL;
- }
- kr_zonecut_set(&next->zone_cut, parent->zone_cut.name);
- if (kr_zonecut_copy(&next->zone_cut, &parent->zone_cut) != 0 ||
- kr_zonecut_copy_trust(&next->zone_cut, &parent->zone_cut) != 0) {
- return NULL;
- }
- next->flags.NO_MINIMIZE = true;
- if (parent->flags.DNSSEC_WANT) {
- next->flags.DNSSEC_WANT = true;
- }
- return next;
-}
-
-static int forward_trust_chain_check(struct kr_request *request, struct kr_query *qry, bool resume)
-{
- struct kr_rplan *rplan = &request->rplan;
- trie_t *trust_anchors = request->ctx->trust_anchors;
- trie_t *negative_anchors = request->ctx->negative_anchors;
-
- if (qry->parent != NULL &&
- !(qry->forward_flags.CNAME) &&
- !(qry->flags.DNS64_MARK) &&
- knot_dname_in_bailiwick(qry->zone_cut.name, qry->parent->zone_cut.name) >= 0) {
- return KR_STATE_PRODUCE;
- }
-
- if (kr_fails_assert(qry->flags.FORWARD))
- return KR_STATE_FAIL;
-
- if (!trust_anchors) {
- qry->flags.AWAIT_CUT = false;
- return KR_STATE_PRODUCE;
- }
-
- if (qry->flags.DNSSEC_INSECURE) {
- qry->flags.AWAIT_CUT = false;
- return KR_STATE_PRODUCE;
- }
-
- if (qry->forward_flags.NO_MINIMIZE) {
- qry->flags.AWAIT_CUT = false;
- return KR_STATE_PRODUCE;
- }
-
- const knot_dname_t *start_name = qry->sname;
- if ((qry->flags.AWAIT_CUT) && !resume) {
- qry->flags.AWAIT_CUT = false;
- const knot_dname_t *longest_ta = kr_ta_closest(request->ctx, qry->sname, qry->stype);
- if (longest_ta) {
- start_name = longest_ta;
- qry->zone_cut.name = knot_dname_copy(start_name, qry->zone_cut.pool);
- qry->flags.DNSSEC_WANT = true;
- } else {
- qry->flags.DNSSEC_WANT = false;
- return KR_STATE_PRODUCE;
- }
- }
-
- bool has_ta = (qry->zone_cut.trust_anchor != NULL);
- knot_dname_t *ta_name = (has_ta ? qry->zone_cut.trust_anchor->owner : NULL);
- bool refetch_ta = (!has_ta || !knot_dname_is_equal(qry->zone_cut.name, ta_name));
- bool is_dnskey_subreq = kr_rplan_satisfies(qry, ta_name, KNOT_CLASS_IN, KNOT_RRTYPE_DNSKEY);
- bool refetch_key = has_ta && (!qry->zone_cut.key || !knot_dname_is_equal(ta_name, qry->zone_cut.key->owner));
- if (refetch_key && !is_dnskey_subreq) {
- struct kr_query *next = zone_cut_subreq(rplan, qry, ta_name, KNOT_RRTYPE_DNSKEY);
- if (!next) {
- return KR_STATE_FAIL;
- }
- return KR_STATE_DONE;
- }
-
- int name_offset = 1;
- const knot_dname_t *wanted_name;
- bool nods, ds_req, ns_req, minimized, ns_exist;
- do {
- wanted_name = start_name;
- ds_req = false;
- ns_req = false;
- ns_exist = true;
-
- int cut_labels = knot_dname_labels(qry->zone_cut.name, NULL);
- int wanted_name_labels = knot_dname_labels(wanted_name, NULL);
- while (wanted_name[0] && wanted_name_labels > cut_labels + name_offset) {
- wanted_name = knot_wire_next_label(wanted_name, NULL);
- wanted_name_labels -= 1;
- }
- minimized = (wanted_name != qry->sname);
-
- for (int i = 0; i < request->rplan.resolved.len; ++i) {
- struct kr_query *q = request->rplan.resolved.at[i];
- if (q->parent == qry &&
- q->sclass == qry->sclass &&
- (q->stype == KNOT_RRTYPE_DS || q->stype == KNOT_RRTYPE_NS) &&
- knot_dname_is_equal(q->sname, wanted_name)) {
- if (q->stype == KNOT_RRTYPE_DS) {
- ds_req = true;
- if (q->flags.CNAME) {
- ns_exist = false;
- } else if (!(q->flags.DNSSEC_OPTOUT)) {
- int ret = kr_dnssec_matches_name_and_type(&request->auth_selected, q->uid,
- wanted_name, KNOT_RRTYPE_NS);
- ns_exist = (ret == kr_ok());
- }
- } else {
- if (q->flags.CNAME) {
- ns_exist = false;
- }
- ns_req = true;
- }
- }
- }
-
- if (ds_req && ns_exist && !ns_req && (minimized || resume)) {
- struct kr_query *next = zone_cut_subreq(rplan, qry, wanted_name,
- KNOT_RRTYPE_NS);
- if (!next) {
- return KR_STATE_FAIL;
- }
- return KR_STATE_DONE;
- }
-
- if (qry->parent == NULL && (qry->flags.CNAME) &&
- ds_req && ns_req) {
- return KR_STATE_PRODUCE;
- }
-
- /* set `nods` */
- if ((qry->stype == KNOT_RRTYPE_DS) &&
- knot_dname_is_equal(wanted_name, qry->sname)) {
- nods = true;
- } else if (resume && !ds_req) {
- nods = false;
- } else if (!minimized && qry->stype != KNOT_RRTYPE_DNSKEY) {
- nods = true;
- } else {
- nods = ds_req;
- }
- name_offset += 1;
- } while (ds_req && (ns_req || !ns_exist) && minimized);
-
- /* Disable DNSSEC if it enters NTA. */
- if (kr_ta_get(negative_anchors, wanted_name)){
- VERBOSE_MSG(qry, ">< negative TA, going insecure\n");
- qry->flags.DNSSEC_WANT = false;
- }
-
- /* Enable DNSSEC if enters a new island of trust. */
- bool want_secure = (qry->flags.DNSSEC_WANT) &&
- !knot_wire_get_cd(request->qsource.packet->wire);
- if (!(qry->flags.DNSSEC_WANT) &&
- !knot_wire_get_cd(request->qsource.packet->wire) &&
- kr_ta_get(trust_anchors, wanted_name)) {
- qry->flags.DNSSEC_WANT = true;
- want_secure = true;
- if (kr_log_is_debug_qry(RESOLVER, qry)) {
- KR_DNAME_GET_STR(qname_str, wanted_name);
- VERBOSE_MSG(qry, ">< TA: '%s'\n", qname_str);
- }
- }
-
- if (want_secure && !qry->zone_cut.trust_anchor) {
- knot_rrset_t *ta_rr = kr_ta_get(trust_anchors, wanted_name);
- if (!ta_rr) {
- char name[] = "\0";
- ta_rr = kr_ta_get(trust_anchors, (knot_dname_t*)name);
- }
- if (ta_rr) {
- qry->zone_cut.trust_anchor = knot_rrset_copy(ta_rr, qry->zone_cut.pool);
- }
- }
-
- has_ta = (qry->zone_cut.trust_anchor != NULL);
- ta_name = (has_ta ? qry->zone_cut.trust_anchor->owner : NULL);
- refetch_ta = (!has_ta || !knot_dname_is_equal(wanted_name, ta_name));
- if (!nods && want_secure && refetch_ta) {
- struct kr_query *next = zone_cut_subreq(rplan, qry, wanted_name,
- KNOT_RRTYPE_DS);
- if (!next) {
- return KR_STATE_FAIL;
- }
- return KR_STATE_DONE;
- }
-
- /* Try to fetch missing DNSKEY.
- * Do not fetch if this is a DNSKEY subrequest to avoid circular dependency. */
- is_dnskey_subreq = kr_rplan_satisfies(qry, ta_name, KNOT_CLASS_IN, KNOT_RRTYPE_DNSKEY);
- refetch_key = has_ta && (!qry->zone_cut.key || !knot_dname_is_equal(ta_name, qry->zone_cut.key->owner));
- if (want_secure && refetch_key && !is_dnskey_subreq) {
- struct kr_query *next = zone_cut_subreq(rplan, qry, ta_name, KNOT_RRTYPE_DNSKEY);
- if (!next) {
- return KR_STATE_FAIL;
- }
- return KR_STATE_DONE;
- }
-
- return KR_STATE_PRODUCE;
-}
-
-/* @todo: Validator refactoring, keep this in driver for now. */
-static int trust_chain_check(struct kr_request *request, struct kr_query *qry)
-{
- struct kr_rplan *rplan = &request->rplan;
- trie_t *trust_anchors = request->ctx->trust_anchors;
- trie_t *negative_anchors = request->ctx->negative_anchors;
-
- /* Disable DNSSEC if it enters NTA. */
- if (kr_ta_get(negative_anchors, qry->zone_cut.name)){
- VERBOSE_MSG(qry, ">< negative TA, going insecure\n");
- qry->flags.DNSSEC_WANT = false;
- qry->flags.DNSSEC_INSECURE = true;
- }
- if (qry->flags.DNSSEC_NODS) {
- /* This is the next query iteration with minimized qname.
- * At previous iteration DS non-existence has been proven */
- VERBOSE_MSG(qry, "<= DS doesn't exist, going insecure\n");
- qry->flags.DNSSEC_NODS = false;
- qry->flags.DNSSEC_WANT = false;
- qry->flags.DNSSEC_INSECURE = true;
- }
- /* Enable DNSSEC if entering a new (or different) island of trust,
- * and update the TA RRset if required. */
- const bool has_cd = knot_wire_get_cd(request->qsource.packet->wire);
- knot_rrset_t *ta_rr = kr_ta_get(trust_anchors, qry->zone_cut.name);
- if (!has_cd && ta_rr) {
- qry->flags.DNSSEC_WANT = true;
- if (qry->zone_cut.trust_anchor == NULL
- || !knot_dname_is_equal(qry->zone_cut.trust_anchor->owner, qry->zone_cut.name)) {
- mm_free(qry->zone_cut.pool, qry->zone_cut.trust_anchor);
- qry->zone_cut.trust_anchor = knot_rrset_copy(ta_rr, qry->zone_cut.pool);
-
- if (kr_log_is_debug_qry(RESOLVER, qry)) {
- KR_DNAME_GET_STR(qname_str, ta_rr->owner);
- VERBOSE_MSG(qry, ">< TA: '%s'\n", qname_str);
- }
- }
- }
-
- /* Try to fetch missing DS (from above the cut). */
- const bool has_ta = (qry->zone_cut.trust_anchor != NULL);
- const knot_dname_t *ta_name = (has_ta ? qry->zone_cut.trust_anchor->owner : NULL);
- const bool refetch_ta = !has_ta || !knot_dname_is_equal(qry->zone_cut.name, ta_name);
- const bool want_secure = qry->flags.DNSSEC_WANT && !has_cd;
- if (want_secure && refetch_ta) {
- /* @todo we could fetch the information from the parent cut, but we don't remember that now */
- struct kr_query *next = kr_rplan_push(rplan, qry, qry->zone_cut.name, qry->sclass, KNOT_RRTYPE_DS);
- if (!next) {
- return KR_STATE_FAIL;
- }
- next->flags.AWAIT_CUT = true;
- next->flags.DNSSEC_WANT = true;
- return KR_STATE_DONE;
- }
- /* Try to fetch missing DNSKEY (either missing or above current cut).
- * Do not fetch if this is a DNSKEY subrequest to avoid circular dependency. */
- const bool is_dnskey_subreq = kr_rplan_satisfies(qry, ta_name, KNOT_CLASS_IN, KNOT_RRTYPE_DNSKEY);
- const bool refetch_key = has_ta && (!qry->zone_cut.key || !knot_dname_is_equal(ta_name, qry->zone_cut.key->owner));
- if (want_secure && refetch_key && !is_dnskey_subreq) {
- struct kr_query *next = zone_cut_subreq(rplan, qry, ta_name, KNOT_RRTYPE_DNSKEY);
- if (!next) {
- return KR_STATE_FAIL;
- }
- return KR_STATE_DONE;
- }
-
- return KR_STATE_PRODUCE;
-}
-
-/** @internal Check current zone cut status and credibility, spawn subrequests if needed. */
-static int zone_cut_check(struct kr_request *request, struct kr_query *qry, knot_pkt_t *packet)
-/* TODO: using cache on this point in this way just isn't nice; remove in time */
-{
- /* Stub mode, just forward and do not solve cut. */
- if (qry->flags.STUB) {
- return KR_STATE_PRODUCE;
- }
-
- /* Forwarding to upstream resolver mode.
- * Since forwarding targets already are in qry->ns -
- * cut fetching is not needed. */
- if (qry->flags.FORWARD) {
- return forward_trust_chain_check(request, qry, false);
- }
- if (!(qry->flags.AWAIT_CUT)) {
- /* The query was resolved from cache.
- * Spawn DS \ DNSKEY requests if needed and exit */
- return trust_chain_check(request, qry);
- }
-
- /* The query wasn't resolved from cache,
- * now it's the time to look up closest zone cut from cache. */
- struct kr_cache *cache = &request->ctx->cache;
- if (!kr_cache_is_open(cache)) {
- int ret = kr_zonecut_set_sbelt(request->ctx, &qry->zone_cut);
- if (ret != 0) {
- return KR_STATE_FAIL;
- }
- VERBOSE_MSG(qry, "=> no cache open, using root hints\n");
- qry->flags.AWAIT_CUT = false;
- return KR_STATE_DONE;
- }
-
- const knot_dname_t *requested_name = qry->sname;
- /* If at/subdomain of parent zone cut, start from its encloser.
- * This is for case when we get to a dead end
- * (and need glue from parent), or DS refetch. */
- if (qry->parent) {
- const knot_dname_t *parent = qry->parent->zone_cut.name;
- if (parent[0] != '\0'
- && knot_dname_in_bailiwick(qry->sname, parent) >= 0) {
- requested_name = knot_wire_next_label(parent, NULL);
- }
- } else if ((qry->stype == KNOT_RRTYPE_DS) && (qry->sname[0] != '\0')) {
- /* If this is explicit DS query, start from encloser too. */
- requested_name = knot_wire_next_label(requested_name, NULL);
- }
-
- int state = KR_STATE_FAIL;
- do {
- state = ns_fetch_cut(qry, requested_name, request, packet);
- if (state == KR_STATE_DONE || (state & KR_STATE_FAIL)) {
- return state;
- } else if (state == KR_STATE_CONSUME) {
- requested_name = knot_wire_next_label(requested_name, NULL);
- }
- } while (state == KR_STATE_CONSUME);
-
- /* Update minimized QNAME if zone cut changed */
- if (qry->zone_cut.name && qry->zone_cut.name[0] != '\0' && !(qry->flags.NO_MINIMIZE)) {
- if (kr_make_query(qry, packet) != 0) {
- return KR_STATE_FAIL;
- }
- }
- qry->flags.AWAIT_CUT = false;
-
- /* Check trust chain */
- return trust_chain_check(request, qry);
-}
-
-
-static int ns_resolve_addr(struct kr_query *qry, struct kr_request *param, struct kr_transport *transport, uint16_t next_type)
-{
- struct kr_rplan *rplan = &param->rplan;
- struct kr_context *ctx = param->ctx;
-
-
- /* Start NS queries from root, to avoid certain cases
- * where a NS drops out of cache and the rest is unavailable,
- * this would lead to dependency loop in current zone cut.
- */
-
- /* Bail out if the query is already pending or dependency loop. */
- if (!next_type || kr_rplan_satisfies(qry->parent, transport->ns_name, KNOT_CLASS_IN, next_type)) {
- /* Fall back to SBELT if root server query fails. */
- if (!next_type && qry->zone_cut.name[0] == '\0') {
- VERBOSE_MSG(qry, "=> fallback to root hints\n");
- kr_zonecut_set_sbelt(ctx, &qry->zone_cut);
- return kr_error(EAGAIN);
- }
- /* No IPv4 nor IPv6, flag server as unusable. */
- VERBOSE_MSG(qry, "=> unresolvable NS address, bailing out\n");
- kr_zonecut_del_all(&qry->zone_cut, transport->ns_name);
- return kr_error(EHOSTUNREACH);
- }
- /* Push new query to the resolution plan */
- struct kr_query *next =
- kr_rplan_push(rplan, qry, transport->ns_name, KNOT_CLASS_IN, next_type);
- if (!next) {
- return kr_error(ENOMEM);
- }
- next->flags.NONAUTH = true;
-
- /* At the root level with no NS addresses, add SBELT subrequest. */
- int ret = 0;
- if (qry->zone_cut.name[0] == '\0') {
- ret = kr_zonecut_set_sbelt(ctx, &next->zone_cut);
- if (ret == 0) { /* Copy TA and key since it's the same cut to avoid lookup. */
- kr_zonecut_copy_trust(&next->zone_cut, &qry->zone_cut);
- kr_zonecut_set_sbelt(ctx, &qry->zone_cut); /* Add SBELT to parent in case query fails. */
- }
- } else {
- next->flags.AWAIT_CUT = true;
- }
-
- if (ret == 0) {
- if (next_type == KNOT_RRTYPE_AAAA) {
- qry->flags.AWAIT_IPV6 = true;
- } else {
- qry->flags.AWAIT_IPV4 = true;
- }
- }
-
- return ret;
-}
-
-int kr_resolve_produce(struct kr_request *request, struct kr_transport **transport, knot_pkt_t *packet)
-{
- struct kr_rplan *rplan = &request->rplan;
-
- /* No query left for resolution */
- if (kr_rplan_empty(rplan)) {
- return KR_STATE_FAIL;
- }
-
- struct kr_query *qry = array_tail(rplan->pending);
-
- /* Initialize server selection */
- if (!qry->server_selection.initialized) {
- kr_server_selection_init(qry);
- }
-
- /* If we have deferred answers, resume them. */
- if (qry->deferred != NULL) {
- /* @todo: Refactoring validator, check trust chain before resuming. */
- int state = 0;
- if (((qry->flags.FORWARD) == 0) ||
- ((qry->stype == KNOT_RRTYPE_DS) && (qry->flags.CNAME))) {
- state = trust_chain_check(request, qry);
- } else {
- state = forward_trust_chain_check(request, qry, true);
- }
-
- switch(state) {
- case KR_STATE_FAIL: return KR_STATE_FAIL;
- case KR_STATE_DONE: return KR_STATE_PRODUCE;
- default: break;
- }
- VERBOSE_MSG(qry, "=> resuming yielded answer\n");
- struct kr_layer_pickle *pickle = qry->deferred;
- request->state = KR_STATE_YIELD;
- set_yield(&request->answ_selected, qry->uid, false);
- set_yield(&request->auth_selected, qry->uid, false);
- RESUME_LAYERS(layer_id(request, pickle->api), request, qry, consume, pickle->pkt);
- if (request->state != KR_STATE_YIELD) {
- /* No new deferred answers, take the next */
- qry->deferred = pickle->next;
- }
- } else {
- /* Caller is interested in always tracking a zone cut, even if the answer is cached
- * this is normally not required, and incurs another cache lookups for cached answer. */
- if (qry->flags.ALWAYS_CUT) {
- if (!(qry->flags.STUB)) {
- switch(zone_cut_check(request, qry, packet)) {
- case KR_STATE_FAIL: return KR_STATE_FAIL;
- case KR_STATE_DONE: return KR_STATE_PRODUCE;
- default: break;
- }
- }
- }
- /* Resolve current query and produce dependent or finish */
- request->state = KR_STATE_PRODUCE;
- ITERATE_LAYERS(request, qry, produce, packet);
- if (!(request->state & KR_STATE_FAIL) && knot_wire_get_qr(packet->wire)) {
- /* Produced an answer from cache, consume it. */
- qry->secret = 0;
- request->state = KR_STATE_CONSUME;
- ITERATE_LAYERS(request, qry, consume, packet);
- }
- }
- switch(request->state) {
- case KR_STATE_FAIL: return request->state;
- case KR_STATE_CONSUME: break;
- case KR_STATE_DONE:
- default: /* Current query is done */
- if (qry->flags.RESOLVED && request->state != KR_STATE_YIELD) {
- kr_rplan_pop(rplan, qry);
- }
- ITERATE_LAYERS(request, qry, reset);
- return kr_rplan_empty(rplan) ? KR_STATE_DONE : KR_STATE_PRODUCE;
- }
-
-
- /* This query has RD=0 or is ANY, stop here. */
- if (qry->stype == KNOT_RRTYPE_ANY ||
- !knot_wire_get_rd(request->qsource.packet->wire)) {
- VERBOSE_MSG(qry, "=> qtype is ANY or RD=0, bail out\n");
- return KR_STATE_FAIL;
- }
-
- /* Update zone cut, spawn new subrequests. */
- if (!(qry->flags.STUB)) {
- int state = zone_cut_check(request, qry, packet);
- switch(state) {
- case KR_STATE_FAIL: return KR_STATE_FAIL;
- case KR_STATE_DONE: return KR_STATE_PRODUCE;
- default: break;
- }
- }
-
-
- const struct kr_qflags qflg = qry->flags;
- const bool retry = qflg.TCP || qflg.BADCOOKIE_AGAIN;
- if (!qflg.FORWARD && !qflg.STUB && !retry) { /* Keep NS when requerying/stub/badcookie. */
- /* Root DNSKEY must be fetched from the hints to avoid chicken and egg problem. */
- if (qry->sname[0] == '\0' && qry->stype == KNOT_RRTYPE_DNSKEY) {
- kr_zonecut_set_sbelt(request->ctx, &qry->zone_cut);
- }
- }
-
- qry->server_selection.choose_transport(qry, transport);
-
- if (*transport == NULL) {
- /* Properly signal to serve_stale module. */
- if (qry->flags.NO_NS_FOUND) {
- ITERATE_LAYERS(request, qry, reset);
- kr_rplan_pop(rplan, qry);
- return KR_STATE_FAIL;
- } else {
- /* FIXME: This is probably quite inefficient:
- * we go through the whole qr_task_step loop just because of the serve_stale
- * module which might not even be loaded. */
- qry->flags.NO_NS_FOUND = true;
- return KR_STATE_PRODUCE;
- }
- }
-
- if ((*transport)->protocol == KR_TRANSPORT_RESOLVE_A || (*transport)->protocol == KR_TRANSPORT_RESOLVE_AAAA) {
- uint16_t type = (*transport)->protocol == KR_TRANSPORT_RESOLVE_A ? KNOT_RRTYPE_A : KNOT_RRTYPE_AAAA;
- ns_resolve_addr(qry, qry->request, *transport, type);
- ITERATE_LAYERS(request, qry, reset);
- return KR_STATE_PRODUCE;
- }
-
- /* Randomize query case (if not in not turned off) */
- qry->secret = qry->flags.NO_0X20 ? 0 : kr_rand_bytes(sizeof(qry->secret));
- knot_dname_t *qname_raw = kr_pkt_qname_raw(packet);
- randomized_qname_case(qname_raw, qry->secret);
-
- /*
- * Additional query is going to be finalized when calling
- * kr_resolve_checkout().
- */
- qry->timestamp_mono = kr_now();
- return request->state;
-}
-
#if ENABLE_COOKIES
/** Update DNS cookie data in packet. */
static bool outbound_request_update_cookies(struct kr_request *req,
diff --git a/lib/resolve.h b/lib/resolve.h
index d2ecada7..2ead6e26 100644
--- a/lib/resolve.h
+++ b/lib/resolve.h
@@ -16,6 +16,7 @@
#include "lib/rplan.h"
#include "lib/module.h"
#include "lib/cache/api.h"
+#include "lib/rules/api.h"
/**
* @file resolve.h
@@ -281,6 +282,7 @@ struct kr_request {
unsigned int count_no_nsaddr;
unsigned int count_fail_row;
alloc_wire_f alloc_wire_cb; /**< CB to allocate answer wire (can be NULL). */
+ kr_rule_tags_t rule_tags; /**< TagSet applying to this request. */
struct kr_extended_error extended_error; /**< EDE info; don't modify directly, use kr_request_set_extended_error() */
};
@@ -339,10 +341,13 @@ int kr_resolve_consume(struct kr_request *request, struct kr_transport **transpo
/**
* Produce either next additional query or finish.
*
- * If the CONSUME is returned then dst, type and packet will be filled with
+ * If the CONSUME is returned then *transport and *packet will be filled with
* appropriate values and caller is responsible to send them and receive answer.
* If it returns any other state, then content of the variables is undefined.
*
+ * Implemented in its own file ./resolve-produce.c
+ * FIXME: more issues in this doc-comment
+ *
* @param request request state (in PRODUCE state)
* @param dst [out] possible address of the next nameserver
* @param type [out] possible used socket type (SOCK_STREAM, SOCK_DGRAM)
@@ -359,8 +364,7 @@ int kr_resolve_produce(struct kr_request *request, struct kr_transport **transpo
*
* @param request request state (in PRODUCE state)
* @param src address from which the query is going to be sent
- * @param dst address of the name server
- * @param type used socket type (SOCK_STREAM, SOCK_DGRAM)
+ * @param transport destination server's address and other properties
* @param packet [in,out] query packet to be finalised
* @return kr_ok() or error code
*/
diff --git a/lib/rplan.h b/lib/rplan.h
index 891781fc..4998bf05 100644
--- a/lib/rplan.h
+++ b/lib/rplan.h
@@ -8,6 +8,7 @@
#include <libknot/dname.h>
#include <libknot/codes.h>
+#include "lib/rules/api.h"
#include "lib/selection.h"
#include "lib/zonecut.h"
@@ -26,7 +27,7 @@ struct kr_qflags {
bool AWAIT_IPV6 : 1; /**< Query is waiting for AAAA address. */
bool AWAIT_CUT : 1; /**< Query is waiting for zone cut lookup */
bool NO_EDNS : 1; /**< Don't use EDNS. */
- bool CACHED : 1; /**< Query response is cached. */
+ bool CACHED : 1; /**< Query response is cached (or generated locally). */
bool NO_CACHE : 1; /**< No cache for lookup; exception: finding NSs and subqueries. */
bool EXPIRING : 1; /**< Query response is cached but expiring. See is_expiring(). */
bool ALLOW_LOCAL : 1; /**< Allow queries to local or private address ranges. */
@@ -36,7 +37,8 @@ struct kr_qflags {
bool DNSSEC_INSECURE : 1;/**< Query response is DNSSEC insecure. */
bool DNSSEC_CD : 1; /**< Instruction to set CD bit in request. */
bool STUB : 1; /**< Stub resolution, accept received answer as solved. */
- bool ALWAYS_CUT : 1; /**< Always recover zone cut (even if cached). */
+ bool ALWAYS_CUT : 1; /**< Always recover zone cut (even if cached).
+ * This flag might be broken and/or not useful anymore. */
bool DNSSEC_WEXPAND : 1; /**< Query response has wildcard expansion. */
bool PERMISSIVE : 1; /**< Permissive resolver mode. */
bool STRICT : 1; /**< Strict resolver mode. */
@@ -56,6 +58,7 @@ struct kr_qflags {
bool PKT_IS_SANE : 1; /**< Set by iterator in consume phase to indicate whether
* some basic aspects of the packet are OK, e.g. QNAME. */
bool DNS64_DISABLE : 1; /**< Don't do any DNS64 stuff (meant for view:addr). */
+ bool PASSTHRU_LEGACY : 1;/**< Ignore local-data overrides/blocks for this kr_request. */
};
/** Combine flags together. This means set union for simple flags. */
@@ -73,8 +76,12 @@ void kr_qflags_clear(struct kr_qflags *fl1, struct kr_qflags fl2);
typedef int32_t (*kr_stale_cb)(int32_t ttl, const knot_dname_t *owner, uint16_t type,
const struct kr_query *qry);
-/**
- * Single query representation.
+/** A part of kr_request's resolution when sname and stype don't change.
+ *
+ * A kr_request can contain multiple kr_query parts. A new one is needed when:
+ * - CNAME jump occurs (or DNAME and similar)
+ * - some other records are needed to proceed, e.g. DS/DNSKEY for validation or NS addresses
+ * - generally, see kr_rplan_push() calls
*/
struct kr_query {
struct kr_query *parent;
@@ -95,6 +102,13 @@ struct kr_query {
struct timeval timestamp; /**< Real time for TTL+DNSSEC checks (.tv_sec only). */
struct kr_zonecut zone_cut;
struct kr_layer_pickle *deferred;
+ struct kr_query_data_src { // named struct to work around a bug in doc generator
+ bool initialized; /// !initialized -> all meaningless and zeroed
+ bool all_set;
+ uint8_t rule_depth; /// the number of labels for the apex
+ kr_rule_fwd_flags_t flags;
+ knot_db_val_t targets_ptr; /// pointer to targets inside rule DB
+ } data_src; /// information about "data source" for this sname+stype (+tags?)
/** Current xNAME depth, set by iterator. 0 = uninitialized, 1 = no CNAME, ...
* See also KR_CNAME_CHAIN_LIMIT. */
diff --git a/lib/rules/api.c b/lib/rules/api.c
new file mode 100644
index 00000000..325ed77d
--- /dev/null
+++ b/lib/rules/api.c
@@ -0,0 +1,949 @@
+/* Copyright (C) CZ.NIC, z.s.p.o. <knot-resolver@labs.nic.cz>
+ * SPDX-License-Identifier: GPL-3.0-or-later
+ */
+
+#include "lib/rules/api.h"
+#include "lib/rules/impl.h"
+
+#include "lib/cache/cdb_lmdb.h"
+
+#include <stdlib.h>
+
+struct kr_rules *the_rules = NULL;
+
+/* DB key-space summary
+
+ - "\0" starts special keys like "\0rulesets" or "\0stamp"
+ - "\0tagBits" -> kr_rule_tags_t denoting the set of tags that have a name in DB
+ - "\0tag_" + tag name -> one byte with the tag's number
+ - some future additions?
+ - otherwise it's rulesets - each has a prefix, e.g. RULESET_DEFAULT,
+ its length is bounded by KEY_RULESET_MAXLEN - 1; after that prefix:
+ - KEY_EXACT_MATCH + dname_lf ended by double '\0' + KNOT_RRTYPE_FOO
+ -> exact-match rule (for the given name)
+ - KEY_ZONELIKE_A + dname_lf (no '\0' at end)
+ -> zone-like apex (on the given name)
+ - KEY_VIEW_SRC4 or KEY_VIEW_SRC6 + subnet_encode()
+ -> action-rule string; see kr_view_insert_action()
+ */
+
+/*const*/ char RULESET_DEFAULT[] = "d";
+
+static const uint8_t KEY_EXACT_MATCH[1] = "e";
+static const uint8_t KEY_ZONELIKE_A [1] = "a";
+
+static const uint8_t KEY_VIEW_SRC4[1] = "4";
+static const uint8_t KEY_VIEW_SRC6[1] = "6";
+
+static int answer_exact_match(struct kr_query *qry, knot_pkt_t *pkt, uint16_t type,
+ const uint8_t *data, const uint8_t *data_bound);
+static int answer_zla_empty(val_zla_type_t type, struct kr_query *qry, knot_pkt_t *pkt,
+ knot_db_val_t zla_lf, uint32_t ttl);
+static int answer_zla_redirect(struct kr_query *qry, knot_pkt_t *pkt, const char *ruleset_name,
+ knot_db_val_t zla_lf, uint32_t ttl);
+
+// LATER: doing tag_names_default() and kr_rule_tag_add() inside a RW transaction would be better.
+static int tag_names_default(void)
+{
+ uint8_t key_tb_str[] = "\0tagBits";
+ knot_db_val_t key = { .data = key_tb_str, .len = sizeof(key_tb_str) };
+ knot_db_val_t val;
+ // Check what's in there.
+ int ret = ruledb_op(read, &key, &val, 1);
+ if (ret == 0 && !kr_fails_assert(val.data && val.len == sizeof(kr_rule_tags_t)))
+ return kr_ok(); // it's probably OK
+ if (ret != kr_error(ENOENT))
+ return kr_error(ret);
+ kr_rule_tags_t empty = 0;
+ val.data = &empty;
+ val.len = sizeof(empty);
+ return ruledb_op(write, &key, &val, 1);
+}
+
+int kr_rule_tag_add(const char *tag, kr_rule_tags_t *tagset)
+{
+ ENSURE_the_rules;
+ // Construct the DB key.
+ const uint8_t key_prefix[] = "\0tag_";
+ knot_db_val_t key;
+ knot_db_val_t val;
+ const size_t tag_len = strlen(tag);
+ key.len = sizeof(key_prefix) + tag_len;
+ uint8_t key_buf[key.len];
+ key.data = key_buf;
+ memcpy(key_buf, key_prefix, sizeof(key_prefix));
+ memcpy(key_buf + sizeof(key_prefix), tag, tag_len);
+
+ int ret = ruledb_op(read, &key, &val, 1);
+ if (ret == 0) { // tag exists already
+ uint8_t *tindex_p = val.data;
+ static_assert(KR_RULE_TAGS_CAP < (1 << 8 * sizeof(*tindex_p)),
+ "bad combination of constants");
+ if (kr_fails_assert(val.data && val.len == 1
+ && *tindex_p < KR_RULE_TAGS_CAP)) {
+ kr_log_error(RULES, "ERROR: invalid length: %d\n", (int)val.len);
+ return kr_error(EILSEQ);
+ }
+ *tagset |= (1 << *tindex_p);
+ return kr_ok();
+ } else if (ret != kr_error(ENOENT)) {
+ return ret;
+ }
+
+ // We need to add it as a new tag. First find the bitmap of named tags.
+ uint8_t key_tb_str[] = "\0tagBits";
+ knot_db_val_t key_tb = { .data = key_tb_str, .len = sizeof(key_tb_str) };
+ ret = ruledb_op(read, &key_tb, &val, 1);
+ if (ret != 0)
+ return kr_error(ret);
+ if (kr_fails_assert(val.data && val.len == sizeof(kr_rule_tags_t))) {
+ kr_log_error(RULES, "ERROR: invalid length: %d\n", (int)val.len);
+ return kr_error(EILSEQ);
+ }
+ kr_rule_tags_t bmp;
+ memcpy(&bmp, val.data, sizeof(bmp));
+ // Find a free index.
+ static_assert(sizeof(long long) >= sizeof(bmp), "bad combination of constants");
+ int ix = ffsll(~bmp) - 1;
+ if (ix < 0 || ix >= 8 * sizeof(bmp))
+ return kr_error(E2BIG);
+ const kr_rule_tags_t tag_new = 1 << ix;
+ kr_require((tag_new & bmp) == 0);
+
+ // Update the mappings
+ bmp |= tag_new;
+ val.data = &bmp;
+ val.len = sizeof(bmp);
+ ret = ruledb_op(write, &key_tb, &val, 1);
+ if (ret != 0)
+ return kr_error(ret);
+ uint8_t ix_8t = ix;
+ val.data = &ix_8t;
+ val.len = sizeof(ix_8t);
+ ret = ruledb_op(write, &key, &val, 1); // key remained correct
+ if (ret != 0)
+ return kr_error(ret);
+ *tagset |= tag_new;
+ return kr_ok();
+}
+
+
+int kr_rules_init_ensure(void)
+{
+ if (the_rules)
+ return kr_ok();
+ return kr_rules_init(NULL, 0);
+}
+int kr_rules_init(const char *path, size_t maxsize)
+{
+ if (the_rules)
+ return kr_error(EINVAL);
+ the_rules = calloc(1, sizeof(*the_rules));
+ kr_require(the_rules);
+ the_rules->api = kr_cdb_lmdb();
+
+ struct kr_cdb_opts opts = {
+ .is_cache = false,
+ .path = path ? path : "ruledb", // under current workdir
+ // FIXME: the file will be sparse, but we still need to choose its size somehow.
+ // Later we might improve it to auto-resize in case of running out of space.
+ // Caveat: mdb_env_set_mapsize() can only be called without transactions open.
+ .maxsize = maxsize ? maxsize : 100 * 1024*(size_t)1024,
+ };
+ int ret = the_rules->api->open(&the_rules->db, &the_rules->stats, &opts, NULL);
+ /* No persistence - we always refill from config for now.
+ * LATER:
+ * - Make it include versioning?
+ * - "\0stamp" key when loading config(s)?
+ * - Don't clear ruleset data that doesn't come directly from config;
+ * and add marks for that, etc.
+ * (after there actually are any kinds of rules like that)
+ */
+ if (ret == 0) ret = ruledb_op(clear);
+ if (ret != 0) goto failure;
+ kr_require(the_rules->db);
+
+ ret = tag_names_default();
+ if (ret != 0) goto failure;
+
+ ret = rules_defaults_insert();
+ if (ret != 0) goto failure;
+
+ /* Activate one default ruleset. */
+ uint8_t key_rs[] = "\0rulesets";
+ knot_db_val_t key = { .data = key_rs, .len = sizeof(key_rs) };
+ knot_db_val_t rulesets = { .data = &RULESET_DEFAULT, .len = strlen(RULESET_DEFAULT) + 1 };
+ ret = ruledb_op(write, &key, &rulesets, 1);
+ if (ret == 0) return kr_ok();
+failure:
+ free(the_rules);
+ the_rules = NULL;
+ auto_free const char *path_abs = kr_absolutize_path(".", opts.path);
+ kr_log_error(RULES, "failed while opening or initializing rule DB %s/\n", path_abs);
+ return ret;
+}
+
+void kr_rules_deinit(void)
+{
+ if (!the_rules) return;
+ ruledb_op(close);
+ free(the_rules);
+ the_rules = NULL;
+}
+
+int kr_rules_commit(bool accept)
+{
+ if (!the_rules) return kr_error(EINVAL);
+ return ruledb_op(commit, accept);
+}
+
+static bool kr_rule_consume_tags(knot_db_val_t *val, const struct kr_request *req)
+{
+ kr_rule_tags_t tags;
+ if (deserialize_fails_assert(val, &tags)) {
+ val->len = 0;
+ /* We may not fail immediately, but further processing
+ * will fail anyway due to zero remaining length. */
+ return false;
+ }
+ return tags == KR_RULE_TAGS_ALL || (tags & req->rule_tags);
+}
+
+
+
+
+
+
+/** Add name lookup format on the fixed end-position inside key_data.
+ *
+ * Note: key_data[KEY_DNAME_END_OFFSET] = '\0' even though
+ * not always used as a part of the key. */
+static inline uint8_t * key_dname_lf(const knot_dname_t *name, uint8_t key_data[KEY_MAXLEN])
+{
+ return knot_dname_lf(name, key_data + KEY_RULESET_MAXLEN + 1)
+ // FIXME: recheck
+ + (name[0] == '\0' ? 0 : 1);
+}
+
+/** Return length of the common prefix of two strings (knot_db_val_t). */
+static size_t key_common_prefix(knot_db_val_t k1, knot_db_val_t k2)
+{
+ const size_t len = MIN(k1.len, k2.len);
+ const uint8_t *data1 = k1.data, *data2 = k2.data;
+ kr_require(len == 0 || (data1 && data2));
+ for (ssize_t i = 0; i < len; ++i) {
+ if (data1[i] != data2[i])
+ return i;
+ }
+ return len;
+}
+
+/** Find common "subtree" of two strings that both end in a dname_lf ('\0' terminator excluded).
+ *
+ * \return index pointing at the '\0' ending the last matching label
+ * (possibly the virtual '\0' just past the end of either string),
+ * or if no LF label matches, the first character that differs
+ * Function reviewed thoroughly, including the dependency.
+ */
+static size_t key_common_subtree(knot_db_val_t k1, knot_db_val_t k2, size_t lf_start_i)
+{
+ ssize_t i = key_common_prefix(k1, k2);
+ const char *data1 = k1.data, *data2 = k2.data;
+ // beware: '\0' at the end is excluded, so we need to handle ends separately
+ if (i <= lf_start_i
+ || (i == k1.len && i == k2.len)
+ || (i == k1.len && data2[i] == '\0')
+ || (i == k2.len && data1[i] == '\0')) {
+ return i;
+ }
+ do {
+ --i;
+ if (i <= lf_start_i)
+ return i;
+ if (data2[i] == '\0')
+ return i;
+ } while (true);
+}
+
+int rule_local_data_answer(struct kr_query *qry, knot_pkt_t *pkt)
+{
+ // return shorthands; see doc-comment for kr_rule_local_data_answer()
+ static const int RET_CONT_CACHE = 0;
+ static const int RET_ANSWERED = 1;
+
+ kr_require(the_rules);
+ // TODO: implement EDE codes somehow
+
+ //if (kr_fails_assert(!qry->data_src.initialized)) // low-severity assertion
+ if (qry->data_src.initialized) // TODO: why does it happen?
+ memset(&qry->data_src, 0, sizeof(qry->data_src));
+
+ const uint16_t rrtype = qry->stype;
+
+ // Init the SNAME-based part of key; it's pretty static.
+ uint8_t key_data[KEY_MAXLEN];
+ knot_db_val_t key;
+ key.data = key_dname_lf(qry->sname, key_data);
+ key_data[KEY_DNAME_END_OFFSET + 1] = '\0'; // double zero
+ key.data -= sizeof(KEY_EXACT_MATCH);
+
+ int ret;
+
+ // Init code for managing the ruleset part of the key.
+ // LATER(optim.): we might cache the ruleset list a bit
+ uint8_t * const key_data_ruleset_end = key.data;
+ knot_db_val_t rulesets = { NULL, 0 };
+ {
+ uint8_t key_rs[] = "\0rulesets";
+ knot_db_val_t key_rsk = { .data = key_rs, .len = sizeof(key_rs) };
+ ret = ruledb_op(read, &key_rsk, &rulesets, 1);
+ }
+ if (ret == kr_error(ENOENT)) return RET_CONT_CACHE; // no rulesets -> no rule used
+ if (ret != 0) return kr_error(ret);
+ const char *rulesets_str = rulesets.data;
+
+ // Iterate over all rulesets.
+ while (rulesets.len > 0) {
+ const char * const ruleset_name = rulesets_str;
+ { // Write ruleset-specific prefix of the key.
+ const size_t rsp_len = strnlen(rulesets_str, rulesets.len);
+ kr_require(rsp_len <= KEY_RULESET_MAXLEN - 1);
+ key.data = key_data_ruleset_end - rsp_len;
+ memcpy(key.data, rulesets_str, rsp_len);
+ rulesets_str += rsp_len + 1;
+ rulesets.len -= rsp_len + 1;
+ }
+
+ // Probe for exact and CNAME rule.
+ memcpy(key_data_ruleset_end, &KEY_EXACT_MATCH, sizeof(KEY_EXACT_MATCH));
+ key.len = key_data + KEY_DNAME_END_OFFSET + 2 + sizeof(rrtype)
+ - (uint8_t *)key.data;
+ const uint16_t types[] = { rrtype, KNOT_RRTYPE_CNAME };
+ const bool want_CNAME = rrtype != KNOT_RRTYPE_CNAME
+ && rrtype != KNOT_RRTYPE_DS;
+ for (int i = 0; i < 1 + want_CNAME; ++i) {
+ memcpy(key_data + KEY_DNAME_END_OFFSET + 2, &types[i], sizeof(rrtype));
+ knot_db_val_t val;
+ // LATER: use cursor to iterate over multiple rules on the same key,
+ // testing tags on each
+ ret = ruledb_op(read, &key, &val, 1);
+ switch (ret) {
+ case -ENOENT: continue;
+ case 0: break;
+ default: return kr_error(ret);
+ }
+ if (!kr_rule_consume_tags(&val, qry->request)) continue;
+
+ // We found a rule that applies to the dname+rrtype+req.
+ ret = answer_exact_match(qry, pkt, types[i],
+ val.data, val.data + val.len);
+ return ret ? kr_error(ret) : RET_ANSWERED;
+ }
+
+ /* Find the closest zone-like apex that applies.
+ * Now the key needs one byte change and a little truncation
+ * (we may truncate repeatedly). */
+ static_assert(sizeof(KEY_ZONELIKE_A) == sizeof(KEY_EXACT_MATCH),
+ "bad combination of constants");
+ memcpy(key_data_ruleset_end, &KEY_ZONELIKE_A, sizeof(KEY_ZONELIKE_A));
+ key.len = key_data + KEY_DNAME_END_OFFSET - (uint8_t *)key.data;
+ const size_t lf_start_i = key_data_ruleset_end + sizeof(KEY_ZONELIKE_A)
+ - (const uint8_t *)key.data;
+ kr_require(lf_start_i < KEY_MAXLEN);
+ knot_db_val_t key_leq = key;
+ knot_db_val_t val;
+ if (rrtype == KNOT_RRTYPE_DS)
+ goto shorten; // parent-side type, belongs into zone closer to root
+ // LATER: again, use cursor to iterate over multiple rules on the same key.
+ do {
+ ret = ruledb_op(read_leq, &key_leq, &val);
+ if (ret == -ENOENT) break;
+ if (ret < 0) return kr_error(ret);
+ if (ret > 0) { // found a previous key
+ size_t cs_len = key_common_subtree(key, key_leq, lf_start_i);
+ if (cs_len < lf_start_i) // no suitable key can exist in DB
+ break;
+ if (cs_len < key_leq.len) { // retry at the common subtree
+ key_leq.len = cs_len;
+ continue;
+ }
+ kr_assert(cs_len == key_leq.len);
+ }
+ const knot_db_val_t zla_lf = {
+ .data = key_leq.data + lf_start_i,
+ .len = key_leq.len - lf_start_i,
+ };
+ // Found some good key, now check tags.
+ if (!kr_rule_consume_tags(&val, qry->request)) {
+ kr_assert(key_leq.len >= lf_start_i);
+ shorten:
+ // Shorten key_leq by one label and retry.
+ if (key_leq.len <= lf_start_i) // nowhere to shorten
+ break;
+ const char *data = key_leq.data;
+ while (key_leq.len > lf_start_i && data[--key_leq.len] != '\0') ;
+ continue;
+ }
+ // Tags OK; get ZLA type and deal with special _FORWARD case
+ val_zla_type_t ztype;
+ if (deserialize_fails_assert(&val, &ztype))
+ return kr_error(EILSEQ);
+ if (ztype == VAL_ZLAT_FORWARD) {
+ knot_dname_t apex_name[KNOT_DNAME_MAXLEN];
+ ret = knot_dname_lf2wire(apex_name, zla_lf.len, zla_lf.data);
+ if (kr_fails_assert(ret > 0)) return kr_error(ret);
+ if (val.len > 0 // zero len -> default flags
+ && deserialize_fails_assert(&val, &qry->data_src.flags)) {
+ return kr_error(EILSEQ);
+ }
+
+ qry->data_src.initialized = true;
+ qry->data_src.targets_ptr = val;
+ qry->data_src.rule_depth = knot_dname_labels(apex_name, NULL);
+ return RET_CONT_CACHE;
+ }
+ // The other types optionally specify TTL.
+ uint32_t ttl = RULE_TTL_DEFAULT;
+ if (val.len >= sizeof(ttl)) // allow omitting -> can't kr_assert
+ deserialize_fails_assert(&val, &ttl);
+ if (kr_fails_assert(val.len == 0)) {
+ kr_log_error(RULES, "ERROR: unused bytes: %zu\n", val.len);
+ return kr_error(EILSEQ);
+ }
+ // Finally execute the rule.
+ switch (ztype) {
+ case VAL_ZLAT_EMPTY:
+ case VAL_ZLAT_NXDOMAIN:
+ case VAL_ZLAT_NODATA:
+ ret = answer_zla_empty(ztype, qry, pkt, zla_lf, ttl);
+ if (ret == kr_error(EAGAIN))
+ goto shorten;
+ return ret;
+ case VAL_ZLAT_REDIRECT:
+ ret = answer_zla_redirect(qry, pkt, ruleset_name, zla_lf, ttl);
+ return ret ? kr_error(ret) : RET_ANSWERED;
+ default:
+ return kr_error(EILSEQ);
+ }
+ } while (true);
+ }
+
+ return RET_CONT_CACHE;
+}
+
+/** SOA RDATA content, used as default in negative answers.
+ *
+ * It's as recommended except for using a fixed mname (for simplicity):
+ https://tools.ietf.org/html/rfc6303#section-3
+ */
+static const uint8_t soa_rdata[] = "\x09localhost\0\6nobody\7invalid\0"
+ "\0\0\0\1\0\0\x0e\x10\0\0\4\xb0\0\x09\x3a\x80\0\0\x2a\x30";
+
+#define CHECK_RET(ret) do { \
+ if ((ret) < 0) { kr_assert(false); return kr_error((ret)); } \
+} while (false)
+
+static int answer_exact_match(struct kr_query *qry, knot_pkt_t *pkt, uint16_t type,
+ const uint8_t *data, const uint8_t *data_bound)
+{
+ /* Extract ttl from data. */
+ uint32_t ttl;
+ if (kr_fails_assert(data + sizeof(ttl) <= data_bound))
+ return kr_error(EILSEQ);
+ memcpy(&ttl, data, sizeof(ttl));
+ data += sizeof(ttl);
+
+ /* Start constructing the (pseudo-)packet. */
+ int ret = pkt_renew(pkt, qry->sname, qry->stype);
+ CHECK_RET(ret);
+ struct answer_rrset arrset;
+ memset(&arrset, 0, sizeof(arrset));
+
+ /* Materialize the base RRset.
+ * Error handling: we assume it's OK to leak a bit memory from pkt->mm. */
+ arrset.set.rr = knot_rrset_new(qry->sname, type, KNOT_CLASS_IN, ttl, &pkt->mm);
+ if (kr_fails_assert(arrset.set.rr))
+ return kr_error(ENOMEM);
+ ret = rdataset_materialize(&arrset.set.rr->rrs, data, data_bound, &pkt->mm);
+ CHECK_RET(ret);
+ data += ret;
+ arrset.set.rank = KR_RANK_SECURE | KR_RANK_AUTH; // local data has high trust
+ arrset.set.expiring = false;
+ /* Materialize the RRSIG RRset for the answer in (pseudo-)packet.
+ * (There will almost never be any RRSIG.) */
+ ret = rdataset_materialize(&arrset.sig_rds, data, data_bound, &pkt->mm);
+ CHECK_RET(ret);
+ data += ret;
+
+ /* Sanity check: we consumed exactly all data. */
+ const int unused_bytes = data_bound - data;
+ if (kr_fails_assert(unused_bytes == 0)) {
+ kr_log_error(RULES, "ERROR: unused bytes: %d\n", unused_bytes);
+ return kr_error(EILSEQ);
+ }
+
+ /* Special NODATA sub-case. */
+ knot_rrset_t *rr = arrset.set.rr;
+ const int is_nodata = rr->rrs.count == 0;
+ if (is_nodata) {
+ if (kr_fails_assert(type == KNOT_RRTYPE_CNAME && arrset.sig_rds.count == 0))
+ return kr_error(EILSEQ);
+ rr->type = KNOT_RRTYPE_SOA;
+ ret = knot_rrset_add_rdata(rr, soa_rdata, sizeof(soa_rdata) - 1, &pkt->mm);
+ CHECK_RET(ret);
+ ret = knot_pkt_begin(pkt, KNOT_AUTHORITY);
+ CHECK_RET(ret);
+ }
+
+ /* Put links to the materialized data into the pkt. */
+ knot_wire_set_rcode(pkt->wire, KNOT_RCODE_NOERROR);
+ ret = pkt_append(pkt, &arrset);
+ CHECK_RET(ret);
+
+ /* Finishing touches. */
+ qry->flags.EXPIRING = false;
+ qry->flags.CACHED = true;
+ qry->flags.NO_MINIMIZE = true;
+
+ VERBOSE_MSG(qry, "=> satisfied by local data (%s)\n",
+ is_nodata ? "no data" : "positive");
+ return kr_ok();
+}
+
+knot_db_val_t local_data_key(const knot_rrset_t *rrs, uint8_t key_data[KEY_MAXLEN],
+ const char *ruleset_name)
+{
+ knot_db_val_t key;
+ key.data = key_dname_lf(rrs->owner, key_data);
+ key_data[KEY_DNAME_END_OFFSET + 1] = '\0'; // double zero
+
+ key.data -= sizeof(KEY_EXACT_MATCH);
+ memcpy(key.data, &KEY_EXACT_MATCH, sizeof(KEY_EXACT_MATCH));
+
+ const size_t rsp_len = strlen(ruleset_name);
+ key.data -= rsp_len;
+ memcpy(key.data, ruleset_name, rsp_len);
+
+ memcpy(key_data + KEY_DNAME_END_OFFSET + 2, &rrs->type, sizeof(rrs->type));
+ key.len = key_data + KEY_DNAME_END_OFFSET + 2 + sizeof(rrs->type)
+ - (uint8_t *)key.data;
+ return key;
+}
+int kr_rule_local_data_ins(const knot_rrset_t *rrs, const knot_rdataset_t *sig_rds,
+ kr_rule_tags_t tags)
+{
+ ENSURE_the_rules;
+ // Construct the DB key.
+ uint8_t key_data[KEY_MAXLEN];
+ knot_db_val_t key = local_data_key(rrs, key_data, RULESET_DEFAULT);
+ return local_data_ins(key, rrs, sig_rds, tags);
+}
+int local_data_ins(knot_db_val_t key, const knot_rrset_t *rrs,
+ const knot_rdataset_t *sig_rds, kr_rule_tags_t tags)
+{
+ // Allocate the data in DB.
+ const int rr_ssize = rdataset_dematerialize_size(&rrs->rrs);
+ const int to_alloc = sizeof(tags) + sizeof(rrs->ttl) + rr_ssize
+ + rdataset_dematerialize_size(sig_rds);
+ knot_db_val_t val = { .data = NULL, .len = to_alloc };
+ int ret = ruledb_op(write, &key, &val, 1);
+ if (ret) {
+ // ENOSPC seems to be the only expectable error.
+ kr_assert(ret == kr_error(ENOSPC));
+ return kr_error(ret);
+ }
+
+ // Write all the data.
+ memcpy(val.data, &tags, sizeof(tags));
+ val.data += sizeof(tags);
+ memcpy(val.data, &rrs->ttl, sizeof(rrs->ttl));
+ val.data += sizeof(rrs->ttl);
+ rdataset_dematerialize(&rrs->rrs, val.data);
+ val.data += rr_ssize;
+ rdataset_dematerialize(sig_rds, val.data);
+ return kr_ok();
+}
+int kr_rule_local_data_del(const knot_rrset_t *rrs, kr_rule_tags_t tags)
+{
+ ENSURE_the_rules;
+ uint8_t key_data[KEY_MAXLEN];
+ knot_db_val_t key = local_data_key(rrs, key_data, RULESET_DEFAULT);
+ return ruledb_op(remove, &key, 1);
+}
+
+/** Empty or NXDOMAIN or NODATA. Returning kr_error(EAGAIN) means the rule didn't match. */
+static int answer_zla_empty(val_zla_type_t type, struct kr_query *qry, knot_pkt_t *pkt,
+ const knot_db_val_t zla_lf, uint32_t ttl)
+{
+ if (kr_fails_assert(type == VAL_ZLAT_EMPTY || type == VAL_ZLAT_NXDOMAIN
+ || type == VAL_ZLAT_NODATA))
+ return kr_error(EINVAL);
+
+ knot_dname_t apex_name[KNOT_DNAME_MAXLEN];
+ int ret = knot_dname_lf2wire(apex_name, zla_lf.len, zla_lf.data);
+ CHECK_RET(ret);
+
+ const bool hit_apex = knot_dname_is_equal(qry->sname, apex_name);
+ if (hit_apex && type == VAL_ZLAT_NODATA)
+ return kr_error(EAGAIN);
+
+ /* Start constructing the (pseudo-)packet. */
+ ret = pkt_renew(pkt, qry->sname, qry->stype);
+ CHECK_RET(ret);
+ struct answer_rrset arrset;
+ memset(&arrset, 0, sizeof(arrset));
+
+ /* Construct SOA or NS data (hardcoded content). _EMPTY has a proper zone apex. */
+ const bool want_NS = hit_apex && type == VAL_ZLAT_EMPTY && qry->stype == KNOT_RRTYPE_NS;
+ arrset.set.rr = knot_rrset_new(apex_name, want_NS ? KNOT_RRTYPE_NS : KNOT_RRTYPE_SOA,
+ KNOT_CLASS_IN, ttl, &pkt->mm);
+ if (kr_fails_assert(arrset.set.rr))
+ return kr_error(ENOMEM);
+ if (want_NS) {
+ kr_require(zla_lf.len + 2 == knot_dname_size(apex_name));
+ // TODO: maybe it's weird to use this NS name, but what else?
+ ret = knot_rrset_add_rdata(arrset.set.rr, apex_name, zla_lf.len + 2, &pkt->mm);
+ } else {
+ ret = knot_rrset_add_rdata(arrset.set.rr, soa_rdata,
+ sizeof(soa_rdata) - 1, &pkt->mm);
+ }
+ CHECK_RET(ret);
+ arrset.set.rank = KR_RANK_SECURE | KR_RANK_AUTH; // local data has high trust
+ arrset.set.expiring = false;
+
+ /* Small differences if we exactly hit the name or even type. */
+ if (type == VAL_ZLAT_NODATA || (type == VAL_ZLAT_EMPTY && hit_apex)) {
+ knot_wire_set_rcode(pkt->wire, KNOT_RCODE_NOERROR);
+ } else {
+ knot_wire_set_rcode(pkt->wire, KNOT_RCODE_NXDOMAIN);
+ }
+ if (type == VAL_ZLAT_EMPTY && hit_apex
+ && (qry->stype == KNOT_RRTYPE_SOA || qry->stype == KNOT_RRTYPE_NS)) {
+ ret = knot_pkt_begin(pkt, KNOT_ANSWER);
+ } else {
+ ret = knot_pkt_begin(pkt, KNOT_AUTHORITY);
+ }
+ CHECK_RET(ret);
+
+ /* Put links to the RR into the pkt. */
+ ret = pkt_append(pkt, &arrset);
+ CHECK_RET(ret);
+
+ /* Finishing touches. */
+ qry->flags.EXPIRING = false;
+ qry->flags.CACHED = true;
+ qry->flags.NO_MINIMIZE = true;
+
+ VERBOSE_MSG(qry, "=> satisfied by local data (%s zone)\n",
+ type == VAL_ZLAT_EMPTY ? "empty" : "nxdomain");
+ return kr_ok();
+}
+
+static int answer_zla_redirect(struct kr_query *qry, knot_pkt_t *pkt, const char *ruleset_name,
+ const knot_db_val_t zla_lf, uint32_t ttl)
+{
+ VERBOSE_MSG(qry, "=> redirecting by local data\n"); // lazy to get the zone name
+
+ knot_dname_t apex_name[KNOT_DNAME_MAXLEN];
+ int ret = knot_dname_lf2wire(apex_name, zla_lf.len, zla_lf.data);
+ CHECK_RET(ret);
+ const bool name_matches = knot_dname_is_equal(qry->sname, apex_name);
+ if (name_matches || qry->stype == KNOT_RRTYPE_NS || qry->stype == KNOT_RRTYPE_SOA)
+ goto nodata;
+
+ // Reconstruct the DB key from scratch.
+ knot_rrset_t rrs;
+ knot_rrset_init(&rrs, apex_name, qry->stype, 0, 0); // 0 are unused
+ uint8_t key_data[KEY_MAXLEN];
+ knot_db_val_t key = local_data_key(&rrs, key_data, ruleset_name);
+
+ knot_db_val_t val;
+ ret = ruledb_op(read, &key, &val, 1);
+ switch (ret) {
+ case -ENOENT: goto nodata;
+ case 0: break;
+ default: return ret;
+ }
+ if (kr_rule_consume_tags(&val, qry->request)) // found a match
+ return answer_exact_match(qry, pkt, qry->stype,
+ val.data, val.data + val.len);
+
+nodata: // Want NODATA answer (or NOERROR if it hits apex SOA).
+ // Start constructing the (pseudo-)packet.
+ ret = pkt_renew(pkt, qry->sname, qry->stype);
+ CHECK_RET(ret);
+ struct answer_rrset arrset;
+ memset(&arrset, 0, sizeof(arrset));
+ arrset.set.rr = knot_rrset_new(apex_name, KNOT_RRTYPE_SOA,
+ KNOT_CLASS_IN, ttl, &pkt->mm);
+ if (kr_fails_assert(arrset.set.rr))
+ return kr_error(ENOMEM);
+ ret = knot_rrset_add_rdata(arrset.set.rr, soa_rdata,
+ sizeof(soa_rdata) - 1, &pkt->mm);
+ CHECK_RET(ret);
+ arrset.set.rank = KR_RANK_SECURE | KR_RANK_AUTH; // local data has high trust
+ arrset.set.expiring = false;
+
+ knot_wire_set_rcode(pkt->wire, KNOT_RCODE_NOERROR);
+ knot_section_t sec = name_matches && qry->stype == KNOT_RRTYPE_SOA
+ ? KNOT_ANSWER : KNOT_AUTHORITY;
+ ret = knot_pkt_begin(pkt, sec);
+ CHECK_RET(ret);
+
+ // Put links to the RR into the pkt.
+ ret = pkt_append(pkt, &arrset);
+ CHECK_RET(ret);
+
+ // Finishing touches.
+ qry->flags.EXPIRING = false;
+ qry->flags.CACHED = true;
+ qry->flags.NO_MINIMIZE = true;
+
+ VERBOSE_MSG(qry, "=> satisfied by local data (no data)\n");
+ return kr_ok();
+}
+
+knot_db_val_t zla_key(const knot_dname_t *apex, uint8_t key_data[KEY_MAXLEN])
+{
+ kr_require(the_rules);
+ knot_db_val_t key;
+ key.data = key_dname_lf(apex, key_data);
+
+ key.data -= sizeof(KEY_ZONELIKE_A);
+ memcpy(key.data, &KEY_ZONELIKE_A, sizeof(KEY_ZONELIKE_A));
+
+ const size_t rsp_len = strlen(RULESET_DEFAULT);
+ key.data -= rsp_len;
+ memcpy(key.data, RULESET_DEFAULT, rsp_len);
+ key.len = key_data + KEY_DNAME_END_OFFSET - (uint8_t *)key.data;
+ return key;
+}
+int insert_trivial_zone(val_zla_type_t ztype, uint32_t ttl,
+ const knot_dname_t *apex, kr_rule_tags_t tags)
+{
+ ENSURE_the_rules;
+ uint8_t key_data[KEY_MAXLEN];
+ knot_db_val_t key = zla_key(apex, key_data);
+
+ knot_db_val_t val = {
+ .data = NULL,
+ .len = sizeof(tags) + sizeof(ztype),
+ };
+ const bool has_ttl = ttl != RULE_TTL_DEFAULT;
+ if (has_ttl)
+ val.len += sizeof(ttl);
+ int ret = ruledb_op(write, &key, &val, 1);
+ if (ret) {
+ // ENOSPC seems to be the only expectable error.
+ kr_assert(ret == kr_error(ENOSPC));
+ return kr_error(ret);
+ }
+ memcpy(val.data, &tags, sizeof(tags));
+ val.data += sizeof(tags);
+ memcpy(val.data, &ztype, sizeof(ztype));
+ val.data += sizeof(ztype);
+ if (has_ttl) {
+ memcpy(val.data, &ttl, sizeof(ttl));
+ val.data += sizeof(ttl);
+ }
+ return kr_ok();
+}
+
+int kr_rule_local_data_emptyzone(const knot_dname_t *apex, kr_rule_tags_t tags)
+{
+ return insert_trivial_zone(VAL_ZLAT_EMPTY, RULE_TTL_DEFAULT, apex, tags);
+}
+int kr_rule_local_data_nxdomain(const knot_dname_t *apex, kr_rule_tags_t tags)
+{
+ return insert_trivial_zone(VAL_ZLAT_NXDOMAIN, RULE_TTL_DEFAULT, apex, tags);
+}
+int kr_rule_local_data_nodata(const knot_dname_t *apex, kr_rule_tags_t tags)
+{
+ return insert_trivial_zone(VAL_ZLAT_NODATA, RULE_TTL_DEFAULT, apex, tags);
+}
+int kr_rule_local_data_redirect(const knot_dname_t *apex, kr_rule_tags_t tags)
+{
+ return insert_trivial_zone(VAL_ZLAT_REDIRECT, RULE_TTL_DEFAULT, apex, tags);
+}
+
+
+/** Encode a subnet into a (longer) string.
+ *
+ * The point is to have different encodings for different subnets,
+ * with using just byte-length strings (e.g. for ::/1 vs. ::/2).
+ * And we need to preserve order: FIXME description
+ * - natural partial order on subnets, one included in another
+ * - partial order on strings, one being a prefix of another
+ * - implies lexicographical order on the encoded strings
+ *
+ * Consequently, given a set of subnets, the t
+ */
+static int subnet_encode(const struct sockaddr *addr, int sub_len, uint8_t buf[32])
+{
+ const int len = kr_inaddr_len(addr);
+ if (kr_fails_assert(len > 0))
+ return kr_error(len);
+ if (kr_fails_assert(sub_len >= 0 && sub_len <= 8 * len))
+ return kr_error(EINVAL);
+ const uint8_t *a = (const uint8_t *)/*sign*/kr_inaddr(addr);
+
+ // Algo: interleave bits of the address. Bit pairs:
+ // - 00 -> beyond the subnet's prefix
+ // - 10 -> zero bit within the subnet's prefix
+ // - 11 -> one bit within the subnet's prefix
+ // Multiplying one uint8_t by 01010101 (in binary) will do interleaving.
+ int i;
+ // Let's hope that compiler optimizes this into something reasonable.
+ for (i = 0; sub_len > 0; ++i, sub_len -= 8) {
+ uint16_t x = a[i] * 85; // interleave by zero bits
+ uint8_t sub_mask = 255 >> (8 - MIN(sub_len, 8));
+ uint16_t r = x | (sub_mask * 85 * 2);
+ buf[2*i] = r / 256;
+ buf[2*i + 1] = r % 256;
+ }
+ return i * 2;
+}
+
+// Is `a` subnet-prefix of `b`? (a byte format of subnet_encode())
+bool subnet_is_prefix(uint8_t a, uint8_t b)
+{
+ while (true) {
+ if (a >> 6 == 0)
+ return true;
+ if (a >> 6 != b >> 6) {
+ kr_assert(b >> 6 != 0);
+ return false;
+ }
+ a = (a << 2) & 0xff;
+ b = (b << 2) & 0xff;
+ }
+}
+
+#define KEY_PREPEND(key, arr) do { \
+ key.data -= sizeof(arr); \
+ key.len += sizeof(arr); \
+ memcpy(key.data, arr, sizeof(arr)); \
+ } while (false)
+
+int kr_view_insert_action(const char *subnet, const char *action)
+{
+ ENSURE_the_rules;
+ // Parse the subnet string.
+ union kr_sockaddr saddr;
+ saddr.ip.sa_family = kr_straddr_family(subnet);
+ int bitlen = kr_straddr_subnet((char *)/*const-cast*/kr_inaddr(&saddr.ip), subnet);
+ if (bitlen < 0) return kr_error(bitlen);
+
+ // Init the addr-based part of key.
+ uint8_t key_data[KEY_MAXLEN];
+ knot_db_val_t key;
+ key.data = &key_data[KEY_RULESET_MAXLEN];
+ key.len = subnet_encode(&saddr.ip, bitlen, key.data);
+ switch (saddr.ip.sa_family) {
+ case AF_INET: KEY_PREPEND(key, KEY_VIEW_SRC4); break;
+ case AF_INET6: KEY_PREPEND(key, KEY_VIEW_SRC6); break;
+ default: kr_assert(false); return kr_error(EINVAL);
+ }
+
+ { // Write ruleset-specific prefix of the key.
+ const size_t rsp_len = strlen(RULESET_DEFAULT);
+ key.data -= rsp_len;
+ memcpy(key.data, RULESET_DEFAULT, rsp_len);
+ }
+
+ // Insert & commit.
+ knot_db_val_t val = {
+ .data = (void *)/*const-cast*/action,
+ .len = strlen(action),
+ };
+ return ruledb_op(write, &key, &val, 1);
+}
+
+int kr_view_select_action(const struct kr_request *req, knot_db_val_t *selected)
+{
+ kr_require(the_rules);
+ const struct sockaddr * const addr = req->qsource.addr;
+ if (!addr) return kr_error(ENOENT); // internal request; LATER: act somehow?
+
+ // Init the addr-based part of key; it's pretty static.
+ uint8_t key_data[KEY_MAXLEN];
+ knot_db_val_t key;
+ key.data = &key_data[KEY_RULESET_MAXLEN];
+ key.len = subnet_encode(addr, kr_inaddr_len(addr) * 8, key.data);
+ switch (kr_inaddr_family(addr)) {
+ case AF_INET: KEY_PREPEND(key, KEY_VIEW_SRC4); break;
+ case AF_INET6: KEY_PREPEND(key, KEY_VIEW_SRC6); break;
+ default: kr_assert(false); return kr_error(EINVAL);
+ }
+
+ int ret;
+
+ // Init code for managing the ruleset part of the key.
+ // LATER(optim.): we might cache the ruleset list a bit
+ uint8_t * const key_data_ruleset_end = key.data;
+ knot_db_val_t rulesets = { NULL, 0 };
+ {
+ uint8_t key_rs[] = "\0rulesets";
+ knot_db_val_t key_rsk = { .data = key_rs, .len = sizeof(key_rs) };
+ ret = ruledb_op(read, &key_rsk, &rulesets, 1);
+ }
+ if (ret != 0) return ret; // including ENOENT: no rulesets -> no rule used
+ const char *rulesets_str = rulesets.data;
+
+ // Iterate over all rulesets.
+ while (rulesets.len > 0) {
+ { // Write ruleset-specific prefix of the key.
+ const size_t rsp_len = strnlen(rulesets_str, rulesets.len);
+ kr_require(rsp_len <= KEY_RULESET_MAXLEN - 1);
+ key.data = key_data_ruleset_end - rsp_len;
+ memcpy(key.data, rulesets_str, rsp_len);
+ rulesets_str += rsp_len + 1;
+ rulesets.len -= rsp_len + 1;
+ }
+
+ static_assert(sizeof(KEY_VIEW_SRC4) == sizeof(KEY_VIEW_SRC6),
+ "bad combination of constants");
+ const size_t addr_start_i = key_data_ruleset_end + sizeof(KEY_VIEW_SRC4)
+ - (const uint8_t *)key.data;
+
+ knot_db_val_t key_leq = {
+ .data = key.data,
+ .len = key.len + (key_data_ruleset_end - (uint8_t *)key.data),
+ };
+ knot_db_val_t val;
+ ret = ruledb_op(read_leq, &key_leq, &val);
+ for (; true; ret = ruledb_op(read_less, &key_leq, &val)) {
+ if (ret == -ENOENT) break;
+ if (ret < 0) return kr_error(ret);
+ if (ret > 0) { // found a previous key
+ ssize_t i = key_common_prefix(key, key_leq);
+ if (i < addr_start_i) // no suitable key can exist in DB
+ break;
+ if (i != key_leq.len) {
+ if (kr_fails_assert(i < key.len && i < key_leq.len))
+ break;
+ if (!subnet_is_prefix(((uint8_t *)key_leq.data)[i],
+ ((uint8_t *)key.data)[i])) {
+ // the key doesn't match
+ // We can shorten the key to potentially
+ // speed up by skipping over whole subtrees.
+ key_leq.len = i + 1;
+ continue;
+ }
+ }
+ }
+ // We certainly have a matching key (join of various sub-cases).
+ if (kr_log_is_debug(RULES, NULL)) {
+ // it's complex to get zero-terminated string for the action
+ char act_0t[val.len + 1];
+ memcpy(act_0t, val.data, val.len);
+ act_0t[val.len] = 0;
+ VERBOSE_MSG(req->rplan.initial, "=> view selected action: %s\n",
+ act_0t);
+ }
+ *selected = val;
+ return kr_ok();
+ }
+ }
+ return kr_error(ENOENT);
+}
diff --git a/lib/rules/api.h b/lib/rules/api.h
new file mode 100644
index 00000000..2ba45f20
--- /dev/null
+++ b/lib/rules/api.h
@@ -0,0 +1,159 @@
+/* Copyright (C) CZ.NIC, z.s.p.o. <knot-resolver@labs.nic.cz>
+ * SPDX-License-Identifier: GPL-3.0-or-later
+ */
+#pragma once
+
+#include "lib/defines.h"
+struct kr_query;
+struct kr_request;
+struct knot_pkt;
+struct sockaddr;
+#include <libknot/db/db.h>
+
+typedef uint64_t kr_rule_tags_t;
+#define KR_RULE_TAGS_ALL ((kr_rule_tags_t)0)
+/// Tags "capacity", i.e. numbered from 0 to _CAP - 1.
+#define KR_RULE_TAGS_CAP (sizeof(kr_rule_tags_t) * 8)
+
+/** Open the rule DB.
+ *
+ * You can call this to override the path or size (NULL/0 -> default).
+ * Not allowed if already open (EINVAL), so this optional call has to come
+ * before writing anything into the DB. */
+KR_EXPORT
+int kr_rules_init(const char *path, size_t maxsize);
+/** kr_rules_init() but OK if already open, and not allowing to override defaults. */
+KR_EXPORT
+int kr_rules_init_ensure(void);
+
+KR_EXPORT
+void kr_rules_deinit(void);
+
+/** Commit or abort changes done to the rule DB so far. */
+KR_EXPORT
+int kr_rules_commit(bool accept);
+
+/** Try answering the query from local data; WIP: otherwise determine data source overrides.
+ *
+ * \return kr_error() on errors, >0 if answered, 0 otherwise (also when forwarding)
+ *
+ * FIXME: we probably want to ensure AA flags in answer as appropriate.
+ * Perhaps approach it like AD? Tweak flags in ranked_rr_array_entry
+ * and at the end decide whether to set AA=1?
+ */
+int kr_rule_local_data_answer(struct kr_query *qry, struct knot_pkt *pkt);
+
+/** Set up nameserver+cut if overridden by policy. \return kr_error() */
+int kr_rule_data_src_check(struct kr_query *qry, struct knot_pkt *pkt);
+
+/** Select the view action to perform.
+ *
+ * \param selected The action string from kr_view_insert_action()
+ * \return 0 or negative error code, in particular kr_error(ENOENT)
+ */
+KR_EXPORT
+int kr_view_select_action(const struct kr_request *req, knot_db_val_t *selected);
+
+
+
+/* APIs to modify the rule DB.
+ *
+ * FIXME:
+ * - a way to read/modify a rule?
+ */
+
+/** Insert/overwrite a local data rule.
+ *
+ * Into the default rule-set ATM.
+ * Special NODATA case: use a CNAME type with zero records (TTL matters). */
+KR_EXPORT
+int kr_rule_local_data_ins(const knot_rrset_t *rrs, const knot_rdataset_t *sig_rds,
+ kr_rule_tags_t tags);
+
+/** Remove a local data rule.
+ *
+ * \return the number of deleted rules or error < 0
+ *
+ * TODO: some other matching than name+type? Currently `tags` is unused; match all types?
+ * (would be useful in del_pair)
+ */
+KR_EXPORT
+int kr_rule_local_data_del(const knot_rrset_t *rrs, kr_rule_tags_t tags);
+
+// TODO: perhaps expose an enum to unify these simple subtree rules?
+
+/** Insert an empty zone.
+ *
+ * - into the default rule-set
+ * - SOA and NS for generated answers aren't overridable.
+ * - TTL is RULE_TTL_DEFAULT
+ */
+KR_EXPORT
+int kr_rule_local_data_emptyzone(const knot_dname_t *apex, kr_rule_tags_t tags);
+
+/** Insert an "NXDOMAIN zone". TODO: SOA owner is hard. */
+KR_EXPORT
+int kr_rule_local_data_nxdomain(const knot_dname_t *apex, kr_rule_tags_t tags);
+/** Insert a "NODATA zone". These functions are all similar. */
+KR_EXPORT
+int kr_rule_local_data_nodata(const knot_dname_t *apex, kr_rule_tags_t tags);
+
+/** Insert a redirect zone.
+ * Into the default rule-set ATM. SOA for generated NODATA answers isn't overridable. */
+KR_EXPORT
+int kr_rule_local_data_redirect(const knot_dname_t *apex, kr_rule_tags_t tags);
+
+/** Insert a view action into the default ruleset.
+ *
+ * \param subnet String specifying a subnet, e.g. "192.168.0.0/16".
+ * \param action Currently a string to execute, like in old policies, e.g. `policy.REFUSE`
+ *
+ * The concept of chain actions isn't respected; the most prioritized rule wins.
+ * If exactly the same subnet is specified repeatedly, that rule gets overwritten silently.
+ * TODO: improve? (return code, warning, ...)
+ * TODO: some way to do multiple actions? Will be useful e.g. with option-setting actions.
+ * On implementation side this would probably be multi-value LMDB, cf. local_data rules.
+ */
+KR_EXPORT
+int kr_view_insert_action(const char *subnet, const char *action);
+
+/** Add a tag by name into a tag-set variable.
+ *
+ * It also ensures allocation of tag names in the DB, etc.
+ */
+KR_EXPORT
+int kr_rule_tag_add(const char *tag, kr_rule_tags_t *tagset);
+
+
+struct kr_rule_zonefile_config {
+ const char *filename; /// NULL if specifying input_str instead
+ const char *input_str; /// NULL if specifying filename instead
+ size_t input_len; /// 0 for strlen(input_str)
+
+ bool is_rpz; /// interpret either as RPZ or as plain RRsets
+ bool nodata; /// TODO: implement
+ kr_rule_tags_t tags; /// tag-set for the generated rule
+ const char *origin; /// NULL or zone origin if known
+ uint32_t ttl; /// default TTL
+};
+/** Load rules from some zonefile format, e.g. RPZ. Code in ./zonefile.c */
+KR_EXPORT
+int kr_rule_zonefile(const struct kr_rule_zonefile_config *c);
+
+
+struct kr_rule_fwd_flags {
+ /// Beware of ABI: this struct is memcpy'd to/from rule DB.
+ bool
+ is_auth : 1,
+ is_tcp : 1, /// forced TCP; unused, not needed for DoT
+ is_nods : 1; /// disable local DNSSEC validation
+};
+typedef struct kr_rule_fwd_flags kr_rule_fwd_flags_t;
+/** Insert/overwrite a forwarding rule.
+ *
+ * Into the default rule-set ATM.
+ * \param targets NULL-terminated array. */
+KR_EXPORT
+int kr_rule_forward(const knot_dname_t *apex, kr_rule_fwd_flags_t flags,
+ const struct sockaddr * targets[]);
+
diff --git a/lib/rules/defaults.c b/lib/rules/defaults.c
new file mode 100644
index 00000000..a518a98c
--- /dev/null
+++ b/lib/rules/defaults.c
@@ -0,0 +1,210 @@
+/* Copyright (C) CZ.NIC, z.s.p.o. <knot-resolver@labs.nic.cz>
+ * SPDX-License-Identifier: GPL-3.0-or-later
+ */
+
+#include "lib/rules/impl.h"
+#include "lib/rules/api.h"
+#include "lib/utils.h"
+
+#define CHECK_RET(ret) do { \
+ if ((ret) < 0) { kr_assert(false); return kr_error((ret)); } \
+} while (false)
+
+int rules_defaults_insert(void)
+{
+ static const char * names[] = {
+ /* RFC1918 Private, local, broadcast, test and special zones
+ Considerations: RFC6761, sec 6.1.
+ https://www.iana.org/assignments/locally-served-dns-zones
+ */
+ /* RFC6303 */
+ "10.in-addr.arpa.",
+ "16.172.in-addr.arpa.",
+ "17.172.in-addr.arpa.",
+ "18.172.in-addr.arpa.",
+ "19.172.in-addr.arpa.",
+ "20.172.in-addr.arpa.",
+ "21.172.in-addr.arpa.",
+ "22.172.in-addr.arpa.",
+ "23.172.in-addr.arpa.",
+ "24.172.in-addr.arpa.",
+ "25.172.in-addr.arpa.",
+ "26.172.in-addr.arpa.",
+ "27.172.in-addr.arpa.",
+ "28.172.in-addr.arpa.",
+ "29.172.in-addr.arpa.",
+ "30.172.in-addr.arpa.",
+ "31.172.in-addr.arpa.",
+ "168.192.in-addr.arpa.",
+ "0.in-addr.arpa.",
+ "127.in-addr.arpa.",
+ "254.169.in-addr.arpa.",
+ "2.0.192.in-addr.arpa.",
+ "100.51.198.in-addr.arpa.",
+ "113.0.203.in-addr.arpa.",
+ "255.255.255.255.in-addr.arpa.",
+ /* RFC7793 */
+ "64.100.in-addr.arpa.",
+ "65.100.in-addr.arpa.",
+ "66.100.in-addr.arpa.",
+ "67.100.in-addr.arpa.",
+ "68.100.in-addr.arpa.",
+ "69.100.in-addr.arpa.",
+ "70.100.in-addr.arpa.",
+ "71.100.in-addr.arpa.",
+ "72.100.in-addr.arpa.",
+ "73.100.in-addr.arpa.",
+ "74.100.in-addr.arpa.",
+ "75.100.in-addr.arpa.",
+ "76.100.in-addr.arpa.",
+ "77.100.in-addr.arpa.",
+ "78.100.in-addr.arpa.",
+ "79.100.in-addr.arpa.",
+ "80.100.in-addr.arpa.",
+ "81.100.in-addr.arpa.",
+ "82.100.in-addr.arpa.",
+ "83.100.in-addr.arpa.",
+ "84.100.in-addr.arpa.",
+ "85.100.in-addr.arpa.",
+ "86.100.in-addr.arpa.",
+ "87.100.in-addr.arpa.",
+ "88.100.in-addr.arpa.",
+ "89.100.in-addr.arpa.",
+ "90.100.in-addr.arpa.",
+ "91.100.in-addr.arpa.",
+ "92.100.in-addr.arpa.",
+ "93.100.in-addr.arpa.",
+ "94.100.in-addr.arpa.",
+ "95.100.in-addr.arpa.",
+ "96.100.in-addr.arpa.",
+ "97.100.in-addr.arpa.",
+ "98.100.in-addr.arpa.",
+ "99.100.in-addr.arpa.",
+ "100.100.in-addr.arpa.",
+ "101.100.in-addr.arpa.",
+ "102.100.in-addr.arpa.",
+ "103.100.in-addr.arpa.",
+ "104.100.in-addr.arpa.",
+ "105.100.in-addr.arpa.",
+ "106.100.in-addr.arpa.",
+ "107.100.in-addr.arpa.",
+ "108.100.in-addr.arpa.",
+ "109.100.in-addr.arpa.",
+ "110.100.in-addr.arpa.",
+ "111.100.in-addr.arpa.",
+ "112.100.in-addr.arpa.",
+ "113.100.in-addr.arpa.",
+ "114.100.in-addr.arpa.",
+ "115.100.in-addr.arpa.",
+ "116.100.in-addr.arpa.",
+ "117.100.in-addr.arpa.",
+ "118.100.in-addr.arpa.",
+ "119.100.in-addr.arpa.",
+ "120.100.in-addr.arpa.",
+ "121.100.in-addr.arpa.",
+ "122.100.in-addr.arpa.",
+ "123.100.in-addr.arpa.",
+ "124.100.in-addr.arpa.",
+ "125.100.in-addr.arpa.",
+ "126.100.in-addr.arpa.",
+ "127.100.in-addr.arpa.",
+ /* RFC6303 */
+ "0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ip6.arpa.",
+ "1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ip6.arpa.",
+ /* ^ below we inject exact-match PTR over this empty zone */
+ "d.f.ip6.arpa.",
+ "8.e.f.ip6.arpa.",
+ "9.e.f.ip6.arpa.",
+ "a.e.f.ip6.arpa.",
+ "b.e.f.ip6.arpa.",
+ "8.b.d.0.1.0.0.2.ip6.arpa.",
+ /* RFC8375 */
+ "home.arpa.",
+
+ /* More zones - empty-zone subset from:
+ https://www.iana.org/assignments/special-use-domain-names
+ TODO: perhaps review the list again.
+ */
+ "test.",
+ "onion.",
+ "invalid.",
+ "local.", // RFC 8375.4
+ };
+
+ const int names_count = sizeof(names) / sizeof(names[0]);
+ for (int i = 0; i < names_count; ++i) {
+ knot_dname_t name_buf[KNOT_DNAME_MAXLEN];
+ const knot_dname_t *dname =
+ knot_dname_from_str(name_buf, names[i], sizeof(name_buf));
+ int ret = kr_rule_local_data_emptyzone(dname, KR_RULE_TAGS_ALL);
+ CHECK_RET(ret);
+ /* The double conversion is perhaps a bit wasteful, but it should be rare. */
+ /* LATER: add extra info with explanation? policy module had an ADDITIONAL
+ * record with explanation, but perhaps extended errors are more suitable?
+ * Differentiating the message - perhaps splitting VAL_ZLAT_EMPTY into a few?
+ */
+ }
+
+ knot_dname_t localhost_dname[] = "\x09localhost\0";
+ { // forward localhost
+ int ret = kr_rule_local_data_redirect(localhost_dname, KR_RULE_TAGS_ALL);
+ CHECK_RET(ret);
+
+ knot_rrset_t rr = {
+ .owner = localhost_dname,
+ .ttl = RULE_TTL_DEFAULT,
+ .rclass = KNOT_CLASS_IN,
+ .rrs = { 0 },
+ .additional = NULL,
+ };
+ rr.type = KNOT_RRTYPE_A;
+ ret = knot_rrset_add_rdata(&rr, (const uint8_t *)"\x7f\0\0\1", 4, NULL);
+ if (!ret) ret = kr_rule_local_data_ins(&rr, NULL, KR_RULE_TAGS_ALL);
+ knot_rdataset_clear(&rr.rrs, NULL);
+ CHECK_RET(ret);
+
+ rr.type = KNOT_RRTYPE_AAAA;
+ ret = knot_rrset_add_rdata(&rr,
+ (const uint8_t *)"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\1",
+ 16, NULL);
+ if (!ret) ret = kr_rule_local_data_ins(&rr, NULL, KR_RULE_TAGS_ALL);
+ knot_rdataset_clear(&rr.rrs, NULL);
+ CHECK_RET(ret);
+
+ rr.type = KNOT_RRTYPE_NS;
+ ret = knot_rrset_add_rdata(&rr, localhost_dname, 1+9+1, NULL);
+ if (!ret) ret = kr_rule_local_data_ins(&rr, NULL, KR_RULE_TAGS_ALL);
+ knot_rdataset_clear(&rr.rrs, NULL);
+ CHECK_RET(ret);
+ }
+
+ { // reverse localhost; LATER: the situation isn't ideal with NXDOMAIN + some exact matches
+ knot_rrset_t rr = {
+ .owner = localhost_dname,
+ .ttl = RULE_TTL_DEFAULT,
+ .type = KNOT_RRTYPE_PTR,
+ .rclass = KNOT_CLASS_IN,
+ .rrs = { 0 },
+ .additional = NULL,
+ };
+ int ret = knot_rrset_add_rdata(&rr, localhost_dname, 1+9+1, NULL);
+ if (!ret) ret = kr_rule_local_data_ins(&rr, NULL, KR_RULE_TAGS_ALL);
+
+ knot_dname_t name_buf[KNOT_DNAME_MAXLEN];
+ rr.owner = knot_dname_from_str(name_buf,
+ "1.0.0.127.in-addr.arpa.",
+ sizeof(name_buf));
+ if (!ret) ret = kr_rule_local_data_ins(&rr, NULL, KR_RULE_TAGS_ALL);
+
+ rr.owner = knot_dname_from_str(name_buf,
+ "1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ip6.arpa.",
+ sizeof(name_buf));
+ if (!ret) ret = kr_rule_local_data_ins(&rr, NULL, KR_RULE_TAGS_ALL);
+
+ knot_rdataset_clear(&rr.rrs, NULL);
+ CHECK_RET(ret);
+ }
+
+ return kr_ok();
+}
+
diff --git a/lib/rules/forward.c b/lib/rules/forward.c
new file mode 100644
index 00000000..d0d261d9
--- /dev/null
+++ b/lib/rules/forward.c
@@ -0,0 +1,168 @@
+/* Copyright (C) CZ.NIC, z.s.p.o. <knot-resolver@labs.nic.cz>
+ * SPDX-License-Identifier: GPL-3.0-or-later
+ */
+
+#include "lib/rules/api.h"
+#include "lib/rules/impl.h"
+
+#include "lib/layer/iterate.h"
+#include "lib/resolve.h"
+
+static void setup_fwd_flags(struct kr_query *qry)
+{
+ if (qry->flags.FORWARD || qry->flags.STUB)
+ return; // someone else has set it unexpectedly - policy?
+ // TODO: disallow or restrict somehow?
+ //if (kr_fails_assert(!qry->flags.FORWARD && !qry->flags.STUB))
+
+ if (!qry->data_src.initialized) {
+ // no VAL_ZLAT_FORWARD -> standard iteration
+ qry->data_src.initialized = true;
+ qry->data_src.rule_depth = 0;
+ qry->data_src.flags.is_auth = true;
+ return;
+ }
+
+ const kr_rule_fwd_flags_t zf = qry->data_src.flags;
+
+ qry->flags.TCP |= zf.is_tcp;
+
+ if (!zf.is_auth && !zf.is_nods) { // mostly like policy.(TLS_)FORWARD
+ qry->flags.FORWARD = true;
+ qry->flags.NO_MINIMIZE = true;
+ // this ^^ probably won't be needed after moving iterator's produce
+ return;
+ }
+
+ if (!zf.is_auth && zf.is_nods) { // mostly like policy.STUB
+ qry->flags.STUB = true;
+ return;
+ }
+
+ if (zf.is_auth) {
+ return;
+ }
+
+ kr_require(false);
+}
+
+// Wrapper around rule_local_data_answer() to finish forwarding-related flags.
+int kr_rule_local_data_answer(struct kr_query *qry, knot_pkt_t *pkt)
+{
+ int ret = rule_local_data_answer(qry, pkt); // the main body of work
+ if (ret < 0)
+ kr_log_debug(RULES, "policy rules failed: %s\n", kr_strerror(ret));
+ // deal with setting up .FORWARD and .STUB, so that cache lookup knows
+ setup_fwd_flags(qry);
+ // unfortunately, changing flags can change this from iterator
+ if (ret == 0 && (qry->flags.FORWARD || qry->flags.STUB))
+ ret = kr_make_query(qry, pkt);
+
+ //kr_assert(qry->data_src.initialized); // TODO: broken by old policy.FORWARD, etc.
+ return ret;
+}
+
+int kr_rule_data_src_check(struct kr_query *qry, struct knot_pkt *pkt)
+{
+ if (qry->data_src.all_set)
+ return kr_ok(); // everything should be in order from before
+
+ if (/*kr_fails_assert!*/(!qry->data_src.initialized)) { // FIXME ci val_ad_qtype_ds
+ // fall back to standard iteration
+ goto fallback;
+ }
+
+ if (!qry->data_src.flags.is_auth && qry->data_src.targets_ptr.data) {
+ struct kr_request *req = qry->request;
+ // In old policy this used to be done by kr_forward_add_target()
+ // For TLS see policy.TLS_FORWARD() and net_tls_client()
+ // The mapping from address+port to parameters are in tls_client_param_t
+ kr_sockaddr_array_t *targets = &req->selection_context.forwarding_targets;
+ const size_t t_bytes = qry->data_src.targets_ptr.len;
+ kr_assert(t_bytes > 0 && t_bytes % sizeof(targets->at[0]) == 0);
+ targets->cap = targets->len = t_bytes / sizeof(targets->at[0]);
+ targets->at = mm_alloc(&req->pool, t_bytes);
+ memcpy(targets->at, qry->data_src.targets_ptr.data, t_bytes);
+ qry->data_src.all_set = true;
+
+ kr_server_selection_init(qry); // this assumes `forwarding_targets` was filled
+ return kr_ok();
+ }
+
+ if (qry->data_src.flags.is_auth) {
+ if (!qry->data_src.targets_ptr.data)
+ goto fallback; // default iteration falls here
+ const knot_dname_t *apex = qry->sname;
+ for (int labels = knot_dname_labels(apex, NULL);
+ labels > qry->data_src.rule_depth;
+ --labels, apex = knot_wire_next_label(apex, NULL));
+ kr_zonecut_set(&qry->zone_cut, apex);
+ knot_db_val_t targets = qry->data_src.targets_ptr;
+ kr_assert(targets.len > 0);
+ while (targets.len > 0) {
+ union kr_sockaddr target;
+ if (deserialize_fails_assert(&targets, &target))
+ goto fallback;
+ int ret = kr_zonecut_add(&qry->zone_cut,
+ (const knot_dname_t *)"\2ns\7invalid",
+ kr_inaddr(&target.ip), kr_inaddr_len(&target.ip));
+ if (kr_fails_assert(ret == 0))
+ goto fallback;
+ }
+ kr_assert(targets.len == 0);
+ qry->flags.AWAIT_CUT = false;
+ qry->data_src.all_set = true;
+ kr_server_selection_init(qry);
+ // unfortunately, zone cut depth might've changed
+ return kr_make_query(qry, pkt);
+ }
+
+ kr_assert(false);
+fallback:
+ qry->data_src.initialized = true;
+ qry->data_src.rule_depth = 0;
+ qry->data_src.all_set = true;
+ kr_server_selection_init(qry);
+ return kr_ok();
+}
+
+int kr_rule_forward(const knot_dname_t *apex, kr_rule_fwd_flags_t flags,
+ const struct sockaddr * targets[])
+{
+ ENSURE_the_rules;
+ const kr_rule_tags_t tags = KR_RULE_TAGS_ALL;
+ const val_zla_type_t ztype = VAL_ZLAT_FORWARD;
+
+ int count = 0;
+ if (targets) {
+ while (targets[count])
+ ++count;
+ }
+
+ uint8_t key_data[KEY_MAXLEN];
+ knot_db_val_t key = zla_key(apex, key_data);
+
+ const size_t targets_bytes = count * sizeof(union kr_sockaddr);
+ knot_db_val_t val = {
+ .data = NULL,
+ .len = sizeof(tags) + sizeof(ztype) + sizeof(flags) + targets_bytes,
+ };
+ int ret = ruledb_op(write, &key, &val, 1);
+ if (kr_fails_assert(ret >= 0))
+ return kr_error(ret);
+ memcpy(val.data, &tags, sizeof(tags));
+ val.data += sizeof(tags);
+ memcpy(val.data, &ztype, sizeof(ztype));
+ val.data += sizeof(ztype);
+ memcpy(val.data, &flags, sizeof(flags));
+ val.data += sizeof(flags);
+ for (int i = 0; i < count; ++i) {
+ // targets[i] may be shorter than union kr_sockaddr, so we zero-pad
+ // LATER: for is_auth we really drop anything but address (e.g. port!=53)
+ union kr_sockaddr a = { 0 };
+ memcpy(&a, targets[i], kr_sockaddr_len(targets[i]));
+ memcpy(val.data, &a, sizeof(a));
+ val.data += sizeof(a);
+ }
+ return kr_ok();
+}
diff --git a/lib/rules/impl.h b/lib/rules/impl.h
new file mode 100644
index 00000000..fe6c1e03
--- /dev/null
+++ b/lib/rules/impl.h
@@ -0,0 +1,101 @@
+/* Copyright (C) CZ.NIC, z.s.p.o. <knot-resolver@labs.nic.cz>
+ * SPDX-License-Identifier: GPL-3.0-or-later
+ */
+#pragma once
+
+#include "lib/rules/api.h"
+#include "lib/utils.h"
+#include <libknot/packet/pkt.h>
+
+#include "lib/cache/impl.h"
+#undef VERBOSE_MSG
+#define VERBOSE_MSG(qry, ...) kr_log_q((qry), RULES, ## __VA_ARGS__)
+
+#define RULE_TTL_DEFAULT ((uint32_t)10800)
+
+/** Insert all the default rules. in ./defaults.c */
+int rules_defaults_insert(void);
+
+/** Singleton struct used by the code in ./. */
+struct kr_rules;
+extern struct kr_rules *the_rules;
+
+#define ENSURE_the_rules \
+ if (!the_rules) { \
+ int ret = kr_rules_init(NULL, 0); \
+ if (ret) return ret; \
+ }
+
+#define KEY_RULESET_MAXLEN 16 /**< max. len of ruleset ID + 1(for kind) */
+/** When constructing a key, it's convenient that the dname_lf ends on a fixed offset.
+ * Convention: the end here is before the final '\0' byte (if any). */
+#define KEY_DNAME_END_OFFSET (KEY_RULESET_MAXLEN + KNOT_DNAME_MAXLEN)
+#define KEY_MAXLEN (KEY_DNAME_END_OFFSET + 64) //TODO: most of 64 is unused ATM
+
+/** Construct key for local_data_ins(). It's stored in `key_data`. */
+knot_db_val_t local_data_key(const knot_rrset_t *rrs, uint8_t key_data[KEY_MAXLEN],
+ const char *ruleset_name);
+/** Same as kr_rule_local_data_ins() but with precomputed `key`. */
+int local_data_ins(knot_db_val_t key, const knot_rrset_t *rrs,
+ const knot_rdataset_t *sig_rds, kr_rule_tags_t tags);
+/** Construct key for a zone-like-apex entry. It's stored in `key_data`. */
+knot_db_val_t zla_key(const knot_dname_t *apex, uint8_t key_data[KEY_MAXLEN]);
+
+/** Almost the whole kr_rule_local_data_answer() */
+int rule_local_data_answer(struct kr_query *qry, knot_pkt_t *pkt);
+
+/** The first byte of zone-like apex value is its type. */
+typedef uint8_t val_zla_type_t;
+enum {
+ /** Empty zone. No data in DB value after this byte.
+ *
+ * TODO: add
+ * - SOA rdata (maybe, optional, remainder of DB value)
+ * Same for _NXDOMAIN and _NODATA, too.
+ */
+ VAL_ZLAT_EMPTY = 1,
+ /** Forced NXDOMAIN. */
+ VAL_ZLAT_NXDOMAIN,
+ /** Forced NODATA. Does not apply on exact name (e.g. it's similar to DNAME) */
+ VAL_ZLAT_NODATA,
+ /** Redirect: anything beneath has the same data as apex (except NS+SOA). */
+ VAL_ZLAT_REDIRECT,
+ /** Forward, i.e. override upstream for this subtree (resolver or auth). */
+ VAL_ZLAT_FORWARD,
+};
+/** For now see kr_rule_local_data_emptyzone() and friends.
+ *
+ * TODO: probably make something like this the preferred API. */
+int insert_trivial_zone(val_zla_type_t ztype, uint32_t ttl,
+ const knot_dname_t *apex, kr_rule_tags_t tags);
+
+extern /*const*/ char RULESET_DEFAULT[];
+
+/// Fill *variable_ptr from a knot_db_val_t and advance it (and kr_assert it fits).
+#define deserialize_fails_assert(val_ptr, variable_ptr) \
+ deserialize_fails_assert_f_(val_ptr, (variable_ptr), sizeof(*(variable_ptr)))
+static inline bool deserialize_fails_assert_f_(knot_db_val_t *val, void *var, size_t size)
+{
+ if (kr_fails_assert(val->len >= size))
+ return true;
+ memcpy(var, val->data, size);
+ val->len -= size;
+ // avoiding void* arithmetics complicates this
+ char *tmp = val->data;
+ tmp += size;
+ val->data = tmp;
+ return false;
+}
+
+struct kr_rules {
+ /* Database for storing the rules (LMDB). */
+ kr_cdb_pt db; /**< Storage instance */
+ const struct kr_cdb_api *api; /**< Storage engine */
+ struct kr_cdb_stats stats;
+};
+#define ruledb_op(op, ...) \
+ the_rules->api->op(the_rules->db, &the_rules->stats, ## __VA_ARGS__)
+
+//TODO later, maybe. ATM it would be cumbersome to avoid void* arithmetics.
+#pragma GCC diagnostic ignored "-Wpointer-arith"
+
diff --git a/lib/rules/zonefile.c b/lib/rules/zonefile.c
new file mode 100644
index 00000000..fc5ff1f5
--- /dev/null
+++ b/lib/rules/zonefile.c
@@ -0,0 +1,272 @@
+/* Copyright (C) CZ.NIC, z.s.p.o. <knot-resolver@labs.nic.cz>
+ * SPDX-License-Identifier: GPL-3.0-or-later
+ */
+/** @file
+ *
+ * Code for loading rules from some kinds of zonefile, e.g. RPZ.
+ */
+
+#include "lib/rules/api.h"
+#include "lib/rules/impl.h"
+
+#include "lib/log.h"
+#include "lib/utils.h"
+#include "lib/generic/trie.h"
+
+#include <libzscanner/scanner.h>
+
+/// State used in zs_scanner_t::process.data
+typedef struct {
+ const struct kr_rule_zonefile_config *c; /// owned by the caller
+ trie_t *rrs; /// map: local_data_key() -> knot_rrset_t where we only use .ttl and .rrs
+ knot_mm_t *pool; /// used for everything inside s_data_t (unless noted otherwise)
+
+ // state data for owner_relativize()
+ const knot_dname_t *origin_soa;
+ bool seen_record, warned_soa, warned_bailiwick;
+} s_data_t;
+
+//TODO: logs should better include file name and position within
+
+
+/// Process scanned RR of other types, gather RRsets in a map.
+static void rr_scan2trie(zs_scanner_t *s)
+{
+ s_data_t *s_data = s->process.data;
+ uint8_t key_data[KEY_MAXLEN];
+ knot_rrset_t rrs_for_key = {
+ .owner = s->r_owner,
+ .type = s->r_type,
+ };
+ knot_db_val_t key = local_data_key(&rrs_for_key, key_data, RULESET_DEFAULT);
+ trie_val_t *rr_p = trie_get_ins(s_data->rrs, key.data, key.len);
+ knot_rrset_t *rr;
+ if (*rr_p) {
+ rr = *rr_p;
+ if (s->r_ttl < rr->ttl)
+ rr->ttl = s->r_ttl; // we could also warn here
+ } else {
+ rr = *rr_p = mm_alloc(s_data->pool, sizeof(*rr));
+ knot_rrset_init(rr, NULL, s->r_type, KNOT_CLASS_IN, s->r_ttl);
+ // we don't ^^ need owner so save allocation
+ }
+ knot_rrset_add_rdata(rr, s->r_data, s->r_data_length, s_data->pool);
+}
+/// Process an RRset of other types into a rule
+static int rr_trie2rule(const char *key_data, uint32_t key_len, trie_val_t *rr_p, void *config)
+{
+ const knot_db_val_t key = { .data = (void *)key_data, .len = key_len };
+ const knot_rrset_t *rr = *rr_p;
+ const struct kr_rule_zonefile_config *c = config;
+ return local_data_ins(key, rr, NULL, c->tags);
+ //TODO: check error logging path here (LMDB)
+}
+
+/// Process a scanned CNAME RR into a rule
+static void cname_scan2rule(zs_scanner_t *s)
+{
+ s_data_t *s_data = s->process.data;
+ const struct kr_rule_zonefile_config *c = s_data->c;
+
+ const char *last_label = NULL; // last label of the CNAME
+ for (knot_dname_t *dn = s->r_data; *dn != '\0'; dn += 1 + *dn)
+ last_label = (const char *)dn + 1;
+ if (last_label && strncmp(last_label, "rpz-", 4) == 0) {
+ kr_log_warning(RULES, "skipping unsupported CNAME target .%s\n", last_label);
+ return;
+ }
+ int ret = 0;
+ if (s->r_data[0] == 0) { // "CNAME ." i.e. NXDOMAIN
+ const knot_dname_t *apex = s->r_owner;
+ if (knot_dname_is_wildcard(apex))
+ apex += 2;
+ // RPZ_COMPAT: we NXDOMAIN the whole subtree regardless of being wildcard.
+ // Exact RPZ semantics would be hard here, it makes more sense
+ // to apply also to a subtree, and corresponding wildcard rule
+ // usually accompanies this rule anyway.
+ ret = insert_trivial_zone(VAL_ZLAT_NXDOMAIN, s->r_ttl, apex, c->tags);
+ } else if (knot_dname_is_wildcard(s->r_data) && s->r_data[2] == 0) {
+ // "CNAME *." -> NODATA
+ knot_dname_t *apex = s->r_owner;
+ if (knot_dname_is_wildcard(apex)) {
+ apex += 2;
+ ret = insert_trivial_zone(VAL_ZLAT_NODATA, s->r_ttl, apex, c->tags);
+ } else { // using special kr_rule_ semantics of empty CNAME RRset
+ knot_rrset_t rrs;
+ knot_rrset_init(&rrs, apex, KNOT_RRTYPE_CNAME,
+ KNOT_CLASS_IN, s->r_ttl);
+ ret = kr_rule_local_data_ins(&rrs, NULL, c->tags);
+ }
+ } else {
+ knot_dname_t *target = s->r_owner;
+ knot_rrset_t rrs;
+ knot_rrset_init(&rrs, target, KNOT_RRTYPE_CNAME, KNOT_CLASS_IN, s->r_ttl);
+ // TODO: implement wildcard expansion for target
+ ret = knot_rrset_add_rdata(&rrs, s->r_data, s->r_data_length, NULL);
+ if (!ret) ret = kr_rule_local_data_ins(&rrs, NULL, c->tags);
+ knot_rdataset_clear(&rrs.rrs, NULL);
+ }
+ if (ret)
+ kr_log_warning(RULES, "failure code %d\n", ret);
+}
+
+/// Relativize s->r_owner if suitable. (Also react to SOA.) Return false to skip RR.
+static bool owner_relativize(zs_scanner_t *s)
+{
+ s_data_t *d = s->process.data;
+ if (!d->c->is_rpz)
+ return true;
+
+ // SOA determines the zone apex, but lots of error/warn cases
+ if (s->r_type == KNOT_RRTYPE_SOA) {
+ if (d->seen_record && !knot_dname_is_equal(s->zone_origin, s->r_owner)) {
+ // We most likely inserted some rules wrong already, so abort.
+ kr_log_error(RULES,
+ "SOA encountered late, with unexpected owner; aborting\n");
+ s->state = ZS_STATE_STOP;
+ return false;
+ }
+ if (!d->warned_soa && (d->seen_record || d->origin_soa)) {
+ d->warned_soa = true;
+ kr_log_warning(RULES,
+ "SOA should come as the first record in a RPZ\n");
+ }
+ if (!d->origin_soa) // sticking with the first encountered SOA
+ d->origin_soa = knot_dname_copy(s->r_owner, d->pool);
+ }
+ d->seen_record = true;
+
+ // $ORIGIN as fallback if SOA is missing
+ const knot_dname_t *apex = d->origin_soa;
+ if (!apex)
+ apex = s->zone_origin;
+
+ const int labels = knot_dname_in_bailiwick(s->r_owner, apex);
+ if (labels < 0) {
+ if (!d->warned_bailiwick) {
+ d->warned_bailiwick = true;
+ KR_DNAME_GET_STR(owner_str, s->r_owner);
+ kr_log_warning(RULES,
+ "skipping out-of-zone record(s); first name %s\n",
+ owner_str);
+ }
+ return false;
+ }
+ const int len = knot_dname_prefixlen(s->r_owner, labels, NULL);
+ s->r_owner[len] = '\0'; // not very nice but safe at this point
+ return true;
+}
+
+/// Process a single scanned RR
+static void process_record(zs_scanner_t *s)
+{
+ s_data_t *s_data = s->process.data;
+ if (s->r_class != KNOT_CLASS_IN) {
+ kr_log_warning(RULES, "skipping unsupported RR class\n");
+ return;
+ }
+
+ // inspect the owner name
+ const bool ok = knot_dname_size(s->r_owner) == strlen((const char *)s->r_owner) + 1;
+ if (!ok) {
+ kr_log_warning(RULES, "skipping zero-containing RR owner name\n");
+ return;
+ }
+ // .rpz-* owner; sounds OK to warn and skip even for non-RPZ input
+ // TODO: support "rpz-client-ip"
+ const char *last_label = NULL;
+ for (knot_dname_t *dn = s->r_owner; *dn != '\0'; dn += 1 + *dn)
+ last_label = (const char *)dn + 1;
+ if (last_label && strncmp(last_label, "rpz-", 4) == 0) {
+ kr_log_warning(RULES, "skipping unsupported RR owner .%s\n", last_label);
+ return;
+ }
+ if (!owner_relativize(s))
+ return;
+
+ // RR type: mainly deal with various unsupported cases
+ switch (s->r_type) {
+ case KNOT_RRTYPE_RRSIG:
+ case KNOT_RRTYPE_NSEC:
+ case KNOT_RRTYPE_NSEC3:
+ case KNOT_RRTYPE_DNSKEY:
+ case KNOT_RRTYPE_DS:
+ unsupported_type:
+ (void)0; // C can't have a variable definition following a label
+ KR_RRTYPE_GET_STR(type_str, s->r_type);
+ kr_log_warning(RULES, "skipping unsupported RR type %s\n", type_str);
+ return;
+ }
+ if (knot_rrtype_is_metatype(s->r_type))
+ goto unsupported_type;
+ if (s_data->c->is_rpz && s->r_type == KNOT_RRTYPE_CNAME) {
+ cname_scan2rule(s);
+ return;
+ }
+ // Records in zonefile format generally may not be grouped by name and RR type,
+ // so we accumulate RR sets in a trie and push them as rules at the end.
+ rr_scan2trie(s);
+}
+
+int kr_rule_zonefile(const struct kr_rule_zonefile_config *c)
+{
+ ENSURE_the_rules;
+ zs_scanner_t s_storage, *s = &s_storage;
+ /* zs_init(), zs_set_input_file(), zs_set_processing() returns -1 in case of error,
+ * so don't print error code as it meaningless. */
+ uint32_t ttl = c->ttl ? c->ttl : RULE_TTL_DEFAULT; // 0 would be nonsense
+ int ret = zs_init(s, NULL, KNOT_CLASS_IN, ttl);
+ if (ret) {
+ kr_log_error(RULES, "error initializing zone scanner instance, error: %i (%s)\n",
+ s->error.code, zs_strerror(s->error.code));
+ return ret;
+ }
+
+ s_data_t s_data = { 0 };
+ s_data.c = c;
+ s_data.pool = mm_ctx_mempool2(64 * 1024);
+ s_data.rrs = trie_create(s_data.pool);
+ ret = zs_set_processing(s, process_record, NULL, &s_data);
+ if (kr_fails_assert(ret == 0))
+ goto finish;
+
+ // set the input to parse
+ if (c->filename) {
+ kr_assert(!c->input_str && !c->input_len);
+ ret = zs_set_input_file(s, c->filename);
+ if (ret) {
+ kr_log_error(RULES, "error opening zone file `%s`, error: %i (%s)\n",
+ c->filename, s->error.code, zs_strerror(s->error.code));
+ goto finish;
+ }
+ } else {
+ if (kr_fails_assert(c->input_str)) {
+ ret = kr_error(EINVAL);
+ } else {
+ size_t len = c->input_len ? c->input_len : strlen(c->input_str);
+ ret = zs_set_input_string(s, c->input_str, len);
+ }
+ if (ret) {
+ kr_log_error(RULES, "error %d when opening input with rules\n", ret);
+ goto finish;
+ }
+ }
+
+ /* TODO: disable $INCLUDE? In future RPZones could come from wherever.
+ * Automatic processing will do $INCLUDE, so perhaps use a manual loop instead?
+ */
+ ret = zs_parse_all(s);
+ if (ret != 0) {
+ kr_log_error(RULES, "error parsing zone file `%s`, error %i: %s\n",
+ c->filename, s->error.code, zs_strerror(s->error.code));
+ } else if (s->state == ZS_STATE_STOP) { // interrupted inside
+ ret = kr_error(EINVAL);
+ } else { // no fatal error so far
+ ret = trie_apply_with_key(s_data.rrs, rr_trie2rule, (void *)c);
+ }
+finish:
+ zs_deinit(s);
+ mm_ctx_delete(s_data.pool); // this also deletes whole s_data.rrs
+ return ret;
+}
+
diff --git a/lib/selection.c b/lib/selection.c
index 5aa2992c..ea3a85ae 100644
--- a/lib/selection.c
+++ b/lib/selection.c
@@ -173,7 +173,7 @@ int put_rtt_state(const uint8_t *ip, size_t len, struct rtt_state state,
.data = &state };
int ret = cache->api->write(db, stats, &key, &value, 1);
- cache->api->commit(db, stats);
+ cache->api->commit(db, stats, true);
free(key.data);
return ret;
@@ -686,6 +686,13 @@ void error(struct kr_query *qry, struct address_state *addr_state,
break;
case KR_SELECTION_REFUSED:
case KR_SELECTION_SERVFAIL:
+ if (qry->flags.FORWARD || qry->flags.STUB) {
+ /* The NS might not be broken, but this state is just for this query
+ * and it doesn't make sense to retry on the same NS immediately. */
+ addr_state->broken = true;
+ break;
+ }
+ /* For authoritative servers we try some fallback workarounds. */
if (qry->flags.NO_MINIMIZE && qry->flags.NO_0X20 && qry->flags.TCP) {
addr_state->broken = true;
} else if (qry->flags.NO_MINIMIZE) {
@@ -765,6 +772,15 @@ void kr_server_selection_init(struct kr_query *qry)
mempool, &qry->server_selection.local_state->private);
}
}
+void kr_server_selection_cached(struct kr_query *qry)
+{
+ qry->server_selection = (struct kr_server_selection){
+ .initialized = false,
+ // we reuse iter_error, as it's no-op if (!initialized)
+ .error = iter_error,
+ // everything else is NULL
+ };
+}
int kr_forward_add_target(struct kr_request *req, const struct sockaddr *sock)
{
diff --git a/lib/selection.h b/lib/selection.h
index 34cc69c4..0938e380 100644
--- a/lib/selection.h
+++ b/lib/selection.h
@@ -131,6 +131,12 @@ struct kr_server_selection {
*/
KR_EXPORT
void kr_server_selection_init(struct kr_query *qry);
+/**
+ * @brief Ensure server selection state suitable for processing "reply from cache".
+ *
+ * In particular, qry->server_selection.error() calls shouldn't crash.
+ */
+void kr_server_selection_cached(struct kr_query *qry);
/**
* @brief Add forwarding target to request.
diff --git a/lib/selection_forward.c b/lib/selection_forward.c
index 54f9a122..588fcb36 100644
--- a/lib/selection_forward.c
+++ b/lib/selection_forward.c
@@ -15,7 +15,7 @@ static_assert(FORWARDING_TIMEOUT >= KR_NS_TIMEOUT_MIN_DEAD_TIMEOUT,
"Bad combination of NS selection limits.");
struct forward_local_state {
- kr_sockaddr_array_t *targets;
+ const kr_sockaddr_array_t *targets; /// data owned by kr_request
struct address_state *addr_states;
/** Index of last choice in the targets array, used for error reporting. */
size_t last_choice_index;
@@ -66,7 +66,7 @@ void forward_choose_transport(struct kr_query *qry,
update_address_state(addr_state, address, addr_len, qry);
- if (addr_state->generation == -1) {
+ if (addr_state->generation == -1 || addr_state->broken) {
continue;
}
addr_state->choice_array_index = i;
diff --git a/manager/.dockerignore b/manager/.dockerignore
new file mode 100644
index 00000000..f67cf10c
--- /dev/null
+++ b/manager/.dockerignore
@@ -0,0 +1,8 @@
+node_modules/
+.mypy_cache/
+.pytest_cache/
+.tox/
+.git/
+.vscode/
+
+containers/ \ No newline at end of file
diff --git a/manager/.flake8 b/manager/.flake8
new file mode 100644
index 00000000..3a7c8e74
--- /dev/null
+++ b/manager/.flake8
@@ -0,0 +1,3 @@
+[flake8]
+max-line-length = 200
+extend-ignore = E203 \ No newline at end of file
diff --git a/manager/.gitignore b/manager/.gitignore
new file mode 100644
index 00000000..4652def8
--- /dev/null
+++ b/manager/.gitignore
@@ -0,0 +1,18 @@
+__pycache__/
+.coverage
+.mypy_cache/
+.pytest_cache/
+node_modules/
+yarn.lock
+package-lock.json
+.pytype
+dist/
+.tox/
+.vscode/
+/pkg
+.podman-cache/
+docs/_build/*
+*junit.xml
+.build_kresd/
+.install_kresd/
+build/ \ No newline at end of file
diff --git a/manager/.gitlab-ci.yml b/manager/.gitlab-ci.yml
new file mode 100644
index 00000000..60d8b361
--- /dev/null
+++ b/manager/.gitlab-ci.yml
@@ -0,0 +1,65 @@
+stages:
+ - check
+
+default:
+ image: $CI_REGISTRY/knot/knot-resolver/ci/manager:knot-$KNOT_VERSION
+ before_script:
+ - cd manager
+ - poetry --version
+ - poetry env use $PYTHON_INTERPRETER
+ tags:
+ - docker
+ - linux
+ - amd64
+
+lint:py3.11:
+ stage: check
+ script:
+ - poetry install --only main,dev,lint
+ - poe check
+ variables:
+ PYTHON_INTERPRETER: python3.11
+
+
+.unit: &unit
+ stage: check
+ script:
+ - poetry install --only main,dev,test
+ # create required directories that are in default config, otherwise unit tests fail
+ - mkdir -p /var/cache/knot-resolver
+ - poe test
+ # the following command makes sure that the source root of the coverage file is at $gitroot
+ - poetry run bash -c "cd ..; coverage combine manager/.coverage; coverage xml"
+ artifacts:
+ reports:
+ coverage_report:
+ coverage_format: cobertura
+ path: coverage.xml
+ junit: manager/unit.junit.xml
+ paths:
+ - manager/unit.junit.xml
+
+unit:py3.7:
+ <<: *unit
+ variables:
+ PYTHON_INTERPRETER: python3.7
+
+unit:py3.8:
+ <<: *unit
+ variables:
+ PYTHON_INTERPRETER: python3.8
+
+unit:py3.9:
+ <<: *unit
+ variables:
+ PYTHON_INTERPRETER: python3.9
+
+unit:py3.10:
+ <<: *unit
+ variables:
+ PYTHON_INTERPRETER: python3.10
+
+unit:py3.11:
+ <<: *unit
+ variables:
+ PYTHON_INTERPRETER: python3.11 \ No newline at end of file
diff --git a/manager/.python-version b/manager/.python-version
new file mode 100644
index 00000000..22c263a4
--- /dev/null
+++ b/manager/.python-version
@@ -0,0 +1,4 @@
+3.6.12
+3.7.9
+3.8.7
+3.9.1
diff --git a/manager/ARCHITECTURE.md b/manager/ARCHITECTURE.md
new file mode 100644
index 00000000..18df7885
--- /dev/null
+++ b/manager/ARCHITECTURE.md
@@ -0,0 +1,41 @@
+# Inner architecture of the manager
+
+![architecture diagram](docs/img/manager_architecture_diagram.svg)
+
+## API
+
+The API server is implemented using [`aiohttp`](https://docs.aiohttp.org/en/stable/). This framework provides the application skeleton and manages application runtime. The manager is actually a normal web application with the slight difference that we don't save the data in a database but rather modify systems state.
+
+## Data processing
+
+From the web framework, we receive data as simple strings. After this step, we return a fully typed object with valid configuration (or an exception with an error).
+
+### Parsing
+
+We currently support YAML and JSON and decide based on `Content-Type` header (JSON being the default if no `Content-Type` header is provided). We use the Python's [build-in JSON parser](https://docs.python.org/3/library/json.html) and [`PyYAML`](https://pyyaml.org/).
+
+### Schema and type validation
+
+The parsing step returns a dict-like object, which does not provide any guarantees about it's content. We map the values from this object to a proper class object based on Python's native type annotations. The code to do this is custom made, no libraries needed.
+
+### Normalization
+
+After we move the configuration to the typed objects, we need to normalize its values for further use. For example, all `auto` values should be replaced by real infered values. The result of this step is yet another typed object, but different than the input one so that we can statically distinguish between normalized and not-normalized config data.
+
+## Actual manager
+
+The actual core of the whole application is originally named the manager. It keeps a high-level view of the systems state and performs all necessary operations to change the state to the desired one. It does not interact with the system directly, majority of interactions are hidden behing abstract backends.
+
+Every other part of the processing pipeline is fully concurrent. The manager is a place where synchronization happens.
+
+## Backends
+
+The Knot Resolver Manager supports several backends, more specifically several service managers that can run our workers. The main one being `systemd` has several variants, so that it can run even without privileges. The other currently supported option is `supervisord`.
+
+The used backend is chosen automatically on startup based on available privileges and other running software. This decision can be overriden manually using a command line option.
+
+# Partial config updates
+
+The pipeline described above works well when the user provides full configuration through the API. However, some users might want to make only partial changes as it allows several independent client applications to change different parts of the config independently without explicit synchronization on their part.
+
+When a user submits a partial config, we parse it and change the last used config accordingly. The change happens before the normalization step as that is the first step modifing provided data. \ No newline at end of file
diff --git a/manager/ERROR_HANDLING.md b/manager/ERROR_HANDLING.md
new file mode 100644
index 00000000..770227b8
--- /dev/null
+++ b/manager/ERROR_HANDLING.md
@@ -0,0 +1,60 @@
+# Assumptions
+
+Our main design goal is, that **the manager MUST NOT BE a required component.** Domains must be resolveable even in the absense of the manager. We want this, because of backwards compatibility with the way `kresd` has worked before. But another good reason is that `kresd` has been battle tested and is reasonably reliable. We can't say the same about manager as we do not have practical experiences with it at the time of writing.
+
+This goal leads to usage of external service managers like systemd. Manager is therefore "just" a tool for configuring service managers. If we crash, the `kresd`'s will keep running.
+
+# When can we expect errors
+
+Majority of errors can meaningfully happen only when changing configuration which we do at different lifecycle stages of manager. We are changing configuration of the service managers on manager's startup and shutdown, and when change of configuration is requested (by a signal or HTTP request). Each of these situations can have a different error handling mechanisms to match user's expectations.
+
+Additional to the errors mentioned above, we can sometimes detect, that future configuration changes will fail. Manager has a periodic watchdog monitoring health of the system and detecting failures before they actually happen.
+
+To sum it up, errors can be raised:
+* on configuration changes
+ * during startup
+ * in response to a config change request
+ * on shutdown
+* proactively from our periodic watchdog
+
+
+# How should we handle errors
+
+## Errors on startup
+
+**All errors should be fatal.** If something goes wrong, it's better to stop immediately before we make anything worse. Also, if we fail to start, the user will more likely notice.
+
+## Error handling after config change requests
+
+**All errors, that stem from the configuration change, should be reported and the manager should keep running.** Before the actual change though, watchdog should be manually invoked.
+
+## Error handling during shutdown
+
+**All errors should be fatal.** It does not make sense to try to correct any problems at that point.
+
+## Error handling from watchdog
+
+```
+error_counter = 0
+
+on error:
+ if error_counter > ERROR_COUNTER_THRESHOLD:
+ raise a fatal error
+
+ error_counter += 1
+ try to fix the situation
+ if unsucessful, fatal error
+
+
+every ERROR_COUNTER_DECREASE_INTERVAL:
+ if error_counter > 0:
+ error_counter -= 1
+```
+
+Reasonable constants are probably:
+```
+ERROR_COUNTER_THRESHOLD = 2
+ERROR_COUNTER_DECREASE_INTERVAL = 30min
+```
+
+
diff --git a/manager/README.md b/manager/README.md
new file mode 100644
index 00000000..dce1eb29
--- /dev/null
+++ b/manager/README.md
@@ -0,0 +1,85 @@
+# Knot Resolver Manager
+
+Knot Resolver Manager is a configuration tool for [Knot Resolver](https://gitlab.nic.cz/knot/knot-resolver). The Manager hides the complexity of running several independent resolver processes while ensuring zero-downtime reconfiguration with YAML/JSON declarative configuration and an optional HTTP API for dynamic changes.
+
+## Development environment
+
+### Reproducible development environment
+
+Because we want to support multiple versions of Python with one codebase, we develop against the oldest supported version and then check in our CI that it works for newer Python versions. In your distro, there might be a Python runtime of a different version than what we target. We therefore attempt to isolate everything from the system we are running on.
+
+Install these tools:
+* [pyenv](https://github.com/pyenv/pyenv#installation) - a tool for switching between Python versions without affecting the system (can be installed using distro's package manager)
+* [Poetry](https://python-poetry.org/docs/#installation) - dependency management (note: do not install the package via pip, follow instructions in Poetry's official documentation)
+
+Be careful, that you need the latest version of Poetry. The setup was tested with Poetry version 1.1.7. Due to it's ability to switch between Python versions, it has to be installed separately to work correctly. Make sure to follow [the latest setup guide](https://python-poetry.org/docs/#installation).
+
+After installing the tools above, the actual fully-featured development environment can be setup using these commands:
+
+```sh
+pyenv install 3.6.12
+pyenv install 3.7.9
+pyenv install 3.8.7
+pyenv install 3.9.1
+poetry env use $(pyenv which python)
+poetry install
+```
+
+With this environment, **everything else should just work**. You can run the same checks the CI runs, all commands listed bellow should pass. If something fails and you did all the steps above, please [open a new issue](https://gitlab.nic.cz/knot/knot-resolver-manager/-/issues/new).
+
+### Minimal development environment
+
+The only global tools that are strictly required are `Python` and `pip` (or other way to install PyPI packages). You can have a look at the `pyproject.toml` file, manually install all other dependencies that you need and be done with that. All `poe` commands (see bellow) can be run manually too, see their definition in `pyproject.toml`. We can't however guarantee, that there won't be any errors.
+
+### Common tasks and interactions with the project
+
+After setting up the environment, you should be able to interract with the project by using `./poe` script. Common actions are:
+
+* `poe run` - runs the manager from the source
+* `poe docs` - creates HTML documentation
+* `poe test` - unit tests
+* `poe tox` - unit tests in all supported Python versions (must not be run outside of virtualenv, otherwise it fails to find multiple versions of Python)
+* `poe check` - static code analysis
+* `poe format` - runs code formater
+* `poe fixdeps` - update installed dependencies according to the project's configuration
+* `poe clean` - cleanup the repository from unwanted files
+* `poe integration` - run the integration tests
+
+All possible commands can be listed by running the `poe` command without arguments. The definition of these commands can be found in the `pyproject.toml` file.
+
+If you don't want to be writing the `./` prefix, you can install [PoeThePoet](https://github.com/nat-n/poethepoet) Python package globally and call `poe` directly. I would also recommend setting up its tab completition. Instructions can be found on [their GitHub page](https://github.com/nat-n/poethepoet#enable-tab-completion-for-your-shell).
+
+### Contributing
+
+Before commiting, please ensure that both `poe check` and `poe test` pass. Those commands are both run on the CI and if they don't pass, CI fails.
+
+### Packaging
+
+This project uses [`apkg`](https://gitlab.nic.cz/packaging/apkg) for packaging. See [`distro/README.md`](distro/README.md) for packaging specific instructions.
+
+## FAQ
+
+### What all those dev dependencies for?
+
+Short answer - mainly for managing other dependencies. By using dependency management systems within the project, anyone can start developing after installing just a few core tools. Everything else will be handled automagically. The main concept behind it is that there should be nothing that can be run only in CI.
+
+* core dependencies which you have to install manually
+ * pyenv
+ * A tools which allows you to install any version of Python regardless of your system's default. The version used by default in the project is configured in the file `.python-version`.
+ * We should be all developing on the same version, because otherwise we might not be able to reproduce each others bug's.
+ * Written in pure shell, no dependencies on Python. Should therefore work on any Unix-like system.
+ * Poetry
+ * A dependency management system for Python libraries. Normally, all libraries in Python are installed system-wide and dependent on system's Python version. By using virtual environments managed by Poetry, configured to use a the correct Python version through pyenv, we can specify versions of the dependencies in any way we like.
+ * Follows PEP 518 and uses the `pyproject.toml` file for all of it's configuration.
+ * Written in Python, therefore it's problematic if installed system-wide as an ordinary Python package (because it would be unavailable in its own virtual environment).
+* automatically managed dependencies
+ * PoeThePoet - A task management system, or in other words glorified switch statement calling other tools. Used for simplifying interractions with the project.
+ * pytest, pytest-cov - unit testing
+ * pylint, flake8 - linting
+ * black - autoformatter (might be removed in the future if not used in practice)
+ * tox - testing automation
+ * tox-pyenv - plugin for tox that makes use of pyenv provided Python binaries
+
+### Why Poetry? Why should I learn a new tool?
+
+This blog post explains it nicely - https://muttdata.ai/blog/2020/08/21/a-poetic-apology.html.
diff --git a/manager/build.py b/manager/build.py
new file mode 100644
index 00000000..5406433b
--- /dev/null
+++ b/manager/build.py
@@ -0,0 +1,16 @@
+from typing import Any, Dict
+
+from setuptools import Extension
+
+
+def build(setup_kwargs: Dict[Any, Any]) -> None:
+ setup_kwargs.update(
+ {
+ "ext_modules": [
+ Extension(
+ name="knot_resolver_manager.kresd_controller.supervisord.plugin.notify",
+ sources=["knot_resolver_manager/kresd_controller/supervisord/plugin/notifymodule.c"],
+ ),
+ ]
+ }
+ )
diff --git a/manager/etc/knot-resolver/.gitignore b/manager/etc/knot-resolver/.gitignore
new file mode 100644
index 00000000..fb64123a
--- /dev/null
+++ b/manager/etc/knot-resolver/.gitignore
@@ -0,0 +1,2 @@
+runtime/
+cache/ \ No newline at end of file
diff --git a/manager/etc/knot-resolver/config.dev.yml b/manager/etc/knot-resolver/config.dev.yml
new file mode 100644
index 00000000..859de8f5
--- /dev/null
+++ b/manager/etc/knot-resolver/config.dev.yml
@@ -0,0 +1,63 @@
+rundir: ./runtime
+workers: 1
+management:
+ interface: 127.0.0.1@5000
+cache:
+ storage: ./cache
+logging:
+ level: notice
+ groups:
+ - manager
+ - supervisord
+network:
+ listen:
+ - interface: 127.0.0.1@5353
+views:
+ - subnets: [127.0.0.0/24]
+ tags: [t01, t02, t03]
+ options:
+ dns64: false
+ - subnets: [0.0.0.0/0, "::/0"]
+ answer: refused
+ - subnets: [10.0.10.0/24]
+ answer: allow
+local-data:
+ ttl: 60m
+ nodata: false
+ root-fallback-addresses:
+ j.root-servers.net.: ["2001:503:c27::2:30", "192.58.128.30"]
+ l.root-servers.net.: '199.7.83.42'
+ m.root-servers.net.: '202.12.27.33'
+ # root-fallback-addresses-files: root.custom
+ addresses:
+ foo.bar: 127.0.0.1
+ # addresses-files: hosts.custom
+ records: |
+ example.net. TXT "foo bar"
+ A 192.168.2.3
+ A 192.168.2.4
+ local.example.org AAAA ::1
+ subtrees:
+ - type: empty
+ tags: [ t2 ]
+ roots: [ example1.org ]
+ - type: nxdomain
+ roots: [ sub4.example.org ]
+ rpz:
+ - file: runtime/blocklist.rpz
+ tags: [t01, t02]
+forward:
+ - subtree: '.'
+ options:
+ dnssec: true
+ authoritative: false
+ servers:
+ - address: [2001:148f:fffe::1, 185.43.135.1]
+ transport: tls
+ hostname: odvr.nic.cz
+ - address: [ 192.0.2.1, 192.0.2.2 ]
+ pin-sha256: ['YQ==', 'Wg==']
+ - subtree: 1.168.192.in-addr.arpa
+ options:
+ dnssec: false
+ servers: [ 192.0.2.1@5353 ]
diff --git a/manager/etc/knot-resolver/config.docker.yml b/manager/etc/knot-resolver/config.docker.yml
new file mode 100644
index 00000000..5c10d666
--- /dev/null
+++ b/manager/etc/knot-resolver/config.docker.yml
@@ -0,0 +1,12 @@
+workers: 2
+logging:
+ level: info
+network:
+ listen:
+ - interface: lo@53
+ - interface: lo@853
+ kind: dot
+ - interface: lo@443
+ kind: doh2
+management:
+ interface: 127.0.0.1@5000
diff --git a/manager/etc/knot-resolver/config.yml b/manager/etc/knot-resolver/config.yml
new file mode 100644
index 00000000..cf4da929
--- /dev/null
+++ b/manager/etc/knot-resolver/config.yml
@@ -0,0 +1,11 @@
+rundir: /run/knot-resolver
+workers: 2
+cache:
+ storage: /var/cache/knot-resolver
+logging:
+ level: info
+network:
+ listen:
+ - interface: 127.0.0.1@53
+management:
+ unix-socket: /run/knot-resolver/manager.sock
diff --git a/manager/knot_resolver_manager/__init__.py b/manager/knot_resolver_manager/__init__.py
new file mode 100644
index 00000000..3dc1f76b
--- /dev/null
+++ b/manager/knot_resolver_manager/__init__.py
@@ -0,0 +1 @@
+__version__ = "0.1.0"
diff --git a/manager/knot_resolver_manager/__main__.py b/manager/knot_resolver_manager/__main__.py
new file mode 100644
index 00000000..89eabd56
--- /dev/null
+++ b/manager/knot_resolver_manager/__main__.py
@@ -0,0 +1,15 @@
+# pylint: skip-file
+# flake8: noqa
+
+
+def run():
+ # throws nice syntax error on old Python versions:
+ 0_0 # Python >= 3.7 required
+
+ from knot_resolver_manager import main
+
+ main.main()
+
+
+if __name__ == "__main__":
+ run()
diff --git a/manager/knot_resolver_manager/cli/__init__.py b/manager/knot_resolver_manager/cli/__init__.py
new file mode 100644
index 00000000..80c75ae1
--- /dev/null
+++ b/manager/knot_resolver_manager/cli/__init__.py
@@ -0,0 +1,5 @@
+from pathlib import Path
+
+from knot_resolver_manager.datamodel.globals import Context, set_global_validation_context
+
+set_global_validation_context(Context(Path(".")))
diff --git a/manager/knot_resolver_manager/cli/__main__.py b/manager/knot_resolver_manager/cli/__main__.py
new file mode 100644
index 00000000..88a83a67
--- /dev/null
+++ b/manager/knot_resolver_manager/cli/__main__.py
@@ -0,0 +1,4 @@
+from knot_resolver_manager.cli.main import main
+
+if __name__ == "__main__":
+ main()
diff --git a/manager/knot_resolver_manager/cli/cmd/completion.py b/manager/knot_resolver_manager/cli/cmd/completion.py
new file mode 100644
index 00000000..87a91838
--- /dev/null
+++ b/manager/knot_resolver_manager/cli/cmd/completion.py
@@ -0,0 +1,95 @@
+import argparse
+from enum import Enum
+from typing import List, Tuple, Type
+
+from knot_resolver_manager.cli.command import Command, CommandArgs, CompWords, register_command
+
+
+class Shells(Enum):
+ BASH = 0
+ FISH = 1
+
+
+@register_command
+class CompletionCommand(Command):
+ def __init__(self, namespace: argparse.Namespace) -> None:
+ super().__init__(namespace)
+ self.shell: Shells = namespace.shell
+ self.space = namespace.space
+ self.comp_args: List[str] = namespace.comp_args
+
+ if self.space:
+ self.comp_args.append("")
+
+ @staticmethod
+ def register_args_subparser(
+ subparser: "argparse._SubParsersAction[argparse.ArgumentParser]",
+ ) -> Tuple[argparse.ArgumentParser, "Type[Command]"]:
+ completion = subparser.add_parser("completion", help="commands auto-completion")
+ completion.add_argument(
+ "--space",
+ help="space after last word, returns all possible folowing options",
+ dest="space",
+ action="store_true",
+ default=False,
+ )
+ completion.add_argument(
+ "comp_args",
+ type=str,
+ help="arguments to complete",
+ nargs="*",
+ )
+
+ shells_dest = "shell"
+ shells = completion.add_mutually_exclusive_group()
+ shells.add_argument("--bash", action="store_const", dest=shells_dest, const=Shells.BASH, default=Shells.BASH)
+ shells.add_argument("--fish", action="store_const", dest=shells_dest, const=Shells.FISH)
+
+ return completion, CompletionCommand
+
+ @staticmethod
+ def completion(args: List[str], parser: argparse.ArgumentParser) -> CompWords:
+ words: CompWords = {}
+ # for action in parser._actions:
+ # for opt in action.option_strings:
+ # words[opt] = action.help
+ # return words
+ return words
+
+ def run(self, args: CommandArgs) -> None:
+ pass
+ # subparsers = args.parser._subparsers
+ # words: CompWords = {}
+
+ # if subparsers:
+ # words = parser_words(subparsers._actions)
+
+ # uargs = iter(self.comp_args)
+ # for uarg in uargs:
+ # subparser = subparser_by_name(uarg, subparsers._actions) # pylint: disable=W0212
+
+ # if subparser:
+ # cmd: Command = subparser_command(subparser)
+ # subparser_args = self.comp_args[self.comp_args.index(uarg) + 1 :]
+ # if subparser_args:
+ # words = cmd.completion(subparser_args, subparser)
+ # break
+ # elif uarg in ["-s", "--socket"]:
+ # # if arg is socket config, skip next arg
+ # next(uargs)
+ # continue
+ # elif uarg in words:
+ # # uarg is walid arg, continue
+ # continue
+ # else:
+ # raise ValueError(f"unknown argument: {uarg}")
+
+ # # print completion words
+ # # based on required bash/fish shell format
+ # if self.shell == Shells.BASH:
+ # print(" ".join(words))
+ # elif self.shell == Shells.FISH:
+ # # TODO: FISH completion implementation
+ # pass
+ # else:
+ # raise ValueError(f"unexpected value of {Shells}: {self.shell}")
diff --git a/manager/knot_resolver_manager/cli/cmd/config.py b/manager/knot_resolver_manager/cli/cmd/config.py
new file mode 100644
index 00000000..28994d8b
--- /dev/null
+++ b/manager/knot_resolver_manager/cli/cmd/config.py
@@ -0,0 +1,255 @@
+import argparse
+import json
+import sys
+from enum import Enum
+from typing import List, Optional, Tuple, Type
+
+import yaml
+from typing_extensions import Literal
+
+from knot_resolver_manager.cli.command import Command, CommandArgs, CompWords, register_command
+from knot_resolver_manager.utils.modeling import try_to_parse
+from knot_resolver_manager.utils.requests import request
+
+
+class Operations(Enum):
+ SET = 0
+ DELETE = 1
+ GET = 2
+
+
+class Formats(Enum):
+ JSON = 0
+ YAML = 1
+
+
+def operation_to_method(operation: Operations) -> Literal["PUT", "GET", "DELETE"]:
+ if operation == Operations.SET:
+ return "PUT"
+ elif operation == Operations.DELETE:
+ return "DELETE"
+ return "GET"
+
+
+def reformat(json_str: str, req_format: Formats) -> str:
+ d = json.loads(json_str)
+ if req_format == Formats.YAML:
+ return yaml.dump(d, indent=4)
+ return json.dumps(d, indent=4)
+
+
+def json_dump(yaml_or_json_str: str) -> str:
+ return json.dumps(try_to_parse(yaml_or_json_str))
+
+
+# def _properties_words(props: Dict[str, Any]) -> CompWords:
+# words: CompWords = {}
+# for name, prop in props.items():
+# words[name] = prop["description"] if "description" in prop else None
+# return words
+
+
+# def _path_comp_words(node: str, nodes: List[str], props: Dict[str, Any]) -> CompWords:
+# i = nodes.index(node)
+# ln = len(nodes[i:])
+
+# # if node is last in path, return all possible words on thi level
+# if ln == 1:
+# return _properties_words(props)
+# # if node is valid
+# elif node in props:
+# node_schema = props[node]
+
+# if "anyOf" in node_schema:
+# for item in node_schema["anyOf"]:
+# print(item)
+
+# elif "type" not in node_schema:
+# pass
+
+# elif node_schema["type"] == "array":
+# if ln > 2:
+# # skip index for item in array
+# return _path_comp_words(nodes[i + 2], nodes, node_schema["items"]["properties"])
+# if "enum" in node_schema["items"]:
+# print(node_schema["items"]["enum"])
+# return {"0": "first array item", "-": "last array item"}
+# elif node_schema["type"] == "object":
+# if "additionalProperties" in node_schema:
+# print(node_schema)
+# return _path_comp_words(nodes[i + 1], nodes, node_schema["properties"])
+# return {}
+
+# # arrays/lists must be handled sparately
+# if node_schema["type"] == "array":
+# if ln > 2:
+# # skip index for item in array
+# return _path_comp_words(nodes[i + 2], nodes, node_schema["items"]["properties"])
+# return {"0": "first array item", "-": "last array item"}
+# return _path_comp_words(nodes[i + 1], nodes, node_schema["properties"])
+# else:
+# # if node is not last or valid, value error
+# raise ValueError(f"unknown config path node: {node}")
+
+
+@register_command
+class ConfigCommand(Command):
+ def __init__(self, namespace: argparse.Namespace) -> None:
+ super().__init__(namespace)
+ self.path: str = str(namespace.path) if hasattr(namespace, "path") else ""
+ self.format: Formats = namespace.format if hasattr(namespace, "format") else Formats.JSON
+ self.operation: Optional[Operations] = namespace.operation if hasattr(namespace, "operation") else None
+ self.file: Optional[str] = namespace.file if hasattr(namespace, "file") else None
+
+ @staticmethod
+ def register_args_subparser(
+ subparser: "argparse._SubParsersAction[argparse.ArgumentParser]",
+ ) -> Tuple[argparse.ArgumentParser, "Type[Command]"]:
+ config = subparser.add_parser("config", help="Performs operations on the running resolver's configuration.")
+ path_help = "Optional, path (JSON pointer, RFC6901) to the configuration resources. By default, the entire configuration is selected."
+
+ config_subparsers = config.add_subparsers(help="operation type")
+
+ # GET operation
+ get = config_subparsers.add_parser("get", help="Get current configuration from the resolver.")
+ get.set_defaults(operation=Operations.GET)
+
+ get.add_argument(
+ "-p",
+ "--path",
+ help=path_help,
+ action="store",
+ type=str,
+ default="",
+ )
+ get.add_argument(
+ "file",
+ help="Optional, path to the file where to save exported configuration data. If not specified, data will be printed.",
+ type=str,
+ nargs="?",
+ )
+
+ get_formats = get.add_mutually_exclusive_group()
+ get_formats.add_argument(
+ "--json",
+ help="Get configuration data in JSON format, default.",
+ const=Formats.JSON,
+ action="store_const",
+ dest="format",
+ )
+ get_formats.add_argument(
+ "--yaml",
+ help="Get configuration data in YAML format.",
+ const=Formats.YAML,
+ action="store_const",
+ dest="format",
+ )
+
+ # SET operation
+ set = config_subparsers.add_parser("set", help="Set new configuration for the resolver.")
+ set.set_defaults(operation=Operations.SET)
+
+ set.add_argument(
+ "-p",
+ "--path",
+ help=path_help,
+ action="store",
+ type=str,
+ default="",
+ )
+
+ value_or_file = set.add_mutually_exclusive_group()
+ value_or_file.add_argument(
+ "file",
+ help="Optional, path to file with new configuraion.",
+ type=str,
+ nargs="?",
+ )
+ value_or_file.add_argument(
+ "value",
+ help="Optional, new configuration value.",
+ type=str,
+ nargs="?",
+ )
+
+ set_formats = set.add_mutually_exclusive_group()
+ set_formats.add_argument(
+ "--json",
+ help="Set configuration data in JSON format, default.",
+ const=Formats.JSON,
+ action="store_const",
+ dest="format",
+ )
+ set_formats.add_argument(
+ "--yaml",
+ help="Set configuration data in YAML format.",
+ const=Formats.YAML,
+ action="store_const",
+ dest="format",
+ )
+
+ # DELETE operation
+ delete = config_subparsers.add_parser(
+ "delete", help="Delete given configuration property or list item at the given index."
+ )
+ delete.set_defaults(operation=Operations.DELETE)
+ delete.add_argument(
+ "-p",
+ "--path",
+ help=path_help,
+ action="store",
+ type=str,
+ default="",
+ )
+
+ return config, ConfigCommand
+
+ @staticmethod
+ def completion(args: List[str], parser: argparse.ArgumentParser) -> CompWords:
+ # words = parser_words(parser._actions) # pylint: disable=W0212
+
+ # for arg in args:
+ # if arg in words:
+ # continue
+ # elif arg.startswith("-"):
+ # return words
+ # elif arg == args[-1]:
+ # config_path = arg[1:].split("/") if arg.startswith("/") else arg.split("/")
+ # schema_props: Dict[str, Any] = KresConfig.json_schema()["properties"]
+ # return _path_comp_words(config_path[0], config_path, schema_props)
+ # else:
+ # break
+ return {}
+
+ def run(self, args: CommandArgs) -> None:
+ if not self.operation:
+ args.subparser.print_help()
+ sys.exit()
+
+ new_config = None
+ url = f"{args.socket}/v1/config{self.path}"
+ method = operation_to_method(self.operation)
+
+ if self.operation == Operations.SET:
+ if self.file:
+ try:
+ with open(self.file, "r") as f:
+ new_config = f.read()
+ except FileNotFoundError:
+ new_config = self.file
+ else:
+ # use STDIN also when file is not specified
+ new_config = input("Type new configuration: ")
+
+ response = request(method, url, json_dump(new_config) if new_config else None)
+
+ if response.status != 200:
+ print(response)
+ sys.exit(1)
+
+ if self.operation == Operations.GET and self.file:
+ with open(self.file, "w") as f:
+ f.write(reformat(response.body, self.format))
+ print(f"saved to: {self.file}")
+ elif response.body:
+ print(reformat(response.body, self.format))
diff --git a/manager/knot_resolver_manager/cli/cmd/convert.py b/manager/knot_resolver_manager/cli/cmd/convert.py
new file mode 100644
index 00000000..9c7e5d2a
--- /dev/null
+++ b/manager/knot_resolver_manager/cli/cmd/convert.py
@@ -0,0 +1,74 @@
+import argparse
+import sys
+from pathlib import Path
+from typing import List, Optional, Tuple, Type
+
+from knot_resolver_manager.cli.command import Command, CommandArgs, CompWords, register_command
+from knot_resolver_manager.datamodel import KresConfig
+from knot_resolver_manager.datamodel.globals import (
+ Context,
+ reset_global_validation_context,
+ set_global_validation_context,
+)
+from knot_resolver_manager.utils.modeling import try_to_parse
+from knot_resolver_manager.utils.modeling.exceptions import DataParsingError, DataValidationError
+
+
+@register_command
+class ConvertCommand(Command):
+ def __init__(self, namespace: argparse.Namespace) -> None:
+ super().__init__(namespace)
+ self.input_file: str = namespace.input_file
+ self.output_file: Optional[str] = namespace.output_file
+ self.strict: bool = namespace.strict
+
+ @staticmethod
+ def register_args_subparser(
+ subparser: "argparse._SubParsersAction[argparse.ArgumentParser]",
+ ) -> Tuple[argparse.ArgumentParser, "Type[Command]"]:
+ convert = subparser.add_parser("convert", help="Converts JSON or YAML configuration to Lua script.")
+ convert.set_defaults(strict=True)
+ convert.add_argument(
+ "--no-strict",
+ help="Ignore strict rules during validation, e.g. path/file existence.",
+ action="store_false",
+ dest="strict",
+ )
+ convert.add_argument(
+ "input_file",
+ type=str,
+ help="File with configuration in YAML or JSON format.",
+ )
+
+ convert.add_argument(
+ "output_file",
+ type=str,
+ nargs="?",
+ help="Optional, output file for converted configuration in Lua script. If not specified, converted configuration is printed.",
+ default=None,
+ )
+
+ return convert, ConvertCommand
+
+ @staticmethod
+ def completion(args: List[str], parser: argparse.ArgumentParser) -> CompWords:
+ return {}
+
+ def run(self, args: CommandArgs) -> None:
+ with open(self.input_file, "r") as f:
+ data = f.read()
+
+ try:
+ parsed = try_to_parse(data)
+ set_global_validation_context(Context(Path(Path(self.input_file).parent), self.strict))
+ lua = KresConfig(parsed).render_lua()
+ reset_global_validation_context()
+ except (DataParsingError, DataValidationError) as e:
+ print(e)
+ sys.exit(1)
+
+ if self.output_file:
+ with open(self.output_file, "w") as f:
+ f.write(lua)
+ else:
+ print(lua)
diff --git a/manager/knot_resolver_manager/cli/cmd/help.py b/manager/knot_resolver_manager/cli/cmd/help.py
new file mode 100644
index 00000000..d374005e
--- /dev/null
+++ b/manager/knot_resolver_manager/cli/cmd/help.py
@@ -0,0 +1,24 @@
+import argparse
+from typing import List, Tuple, Type
+
+from knot_resolver_manager.cli.command import Command, CommandArgs, CompWords, register_command
+
+
+@register_command
+class HelpCommand(Command):
+ def __init__(self, namespace: argparse.Namespace) -> None:
+ super().__init__(namespace)
+
+ def run(self, args: CommandArgs) -> None:
+ args.parser.print_help()
+
+ @staticmethod
+ def completion(args: List[str], parser: argparse.ArgumentParser) -> CompWords:
+ return {}
+
+ @staticmethod
+ def register_args_subparser(
+ subparser: "argparse._SubParsersAction[argparse.ArgumentParser]",
+ ) -> Tuple[argparse.ArgumentParser, "Type[Command]"]:
+ stop = subparser.add_parser("help", help="show this help message and exit")
+ return stop, HelpCommand
diff --git a/manager/knot_resolver_manager/cli/cmd/metrics.py b/manager/knot_resolver_manager/cli/cmd/metrics.py
new file mode 100644
index 00000000..8b9c8028
--- /dev/null
+++ b/manager/knot_resolver_manager/cli/cmd/metrics.py
@@ -0,0 +1,45 @@
+import argparse
+import sys
+from typing import List, Optional, Tuple, Type
+
+from knot_resolver_manager.cli.command import Command, CommandArgs, CompWords, register_command
+from knot_resolver_manager.utils.requests import request
+
+
+@register_command
+class MetricsCommand(Command):
+ def __init__(self, namespace: argparse.Namespace) -> None:
+ self.file: Optional[str] = namespace.file
+
+ super().__init__(namespace)
+
+ @staticmethod
+ def register_args_subparser(
+ subparser: "argparse._SubParsersAction[argparse.ArgumentParser]",
+ ) -> Tuple[argparse.ArgumentParser, "Type[Command]"]:
+ metrics = subparser.add_parser("metrics", help="get prometheus metrics data")
+ metrics.add_argument(
+ "file",
+ help="Optional, file where to export Prometheus metrics. If not specified, the metrics are printed.",
+ nargs="?",
+ default=None,
+ )
+ return metrics, MetricsCommand
+
+ @staticmethod
+ def completion(args: List[str], parser: argparse.ArgumentParser) -> CompWords:
+ return {}
+
+ def run(self, args: CommandArgs) -> None:
+ url = f"{args.socket}/metrics"
+ response = request("GET", url)
+
+ if response.status == 200:
+ if self.file:
+ with open(self.file, "w") as f:
+ f.write(response.body)
+ else:
+ print(response.body)
+ else:
+ print(response)
+ sys.exit(1)
diff --git a/manager/knot_resolver_manager/cli/cmd/reload.py b/manager/knot_resolver_manager/cli/cmd/reload.py
new file mode 100644
index 00000000..5818a54c
--- /dev/null
+++ b/manager/knot_resolver_manager/cli/cmd/reload.py
@@ -0,0 +1,36 @@
+import argparse
+import sys
+from typing import List, Tuple, Type
+
+from knot_resolver_manager.cli.command import Command, CommandArgs, CompWords, register_command
+from knot_resolver_manager.utils.requests import request
+
+
+@register_command
+class ReloadCommand(Command):
+ def __init__(self, namespace: argparse.Namespace) -> None:
+ super().__init__(namespace)
+
+ @staticmethod
+ def register_args_subparser(
+ subparser: "argparse._SubParsersAction[argparse.ArgumentParser]",
+ ) -> Tuple[argparse.ArgumentParser, "Type[Command]"]:
+ reload = subparser.add_parser(
+ "reload",
+ help="Tells the resolver to reload YAML configuration file."
+ " Old processes are replaced by new ones (with updated configuration) using rolling restarts."
+ " So there will be no DNS service unavailability during reload operation.",
+ )
+
+ return reload, ReloadCommand
+
+ @staticmethod
+ def completion(args: List[str], parser: argparse.ArgumentParser) -> CompWords:
+ return {}
+
+ def run(self, args: CommandArgs) -> None:
+ response = request("POST", f"{args.socket}/reload")
+
+ if response.status != 200:
+ print(response)
+ sys.exit(1)
diff --git a/manager/knot_resolver_manager/cli/cmd/schema.py b/manager/knot_resolver_manager/cli/cmd/schema.py
new file mode 100644
index 00000000..0d8a0324
--- /dev/null
+++ b/manager/knot_resolver_manager/cli/cmd/schema.py
@@ -0,0 +1,55 @@
+import argparse
+import json
+import sys
+from typing import List, Optional, Tuple, Type
+
+from knot_resolver_manager.cli.command import Command, CommandArgs, CompWords, register_command
+from knot_resolver_manager.datamodel.config_schema import KresConfig
+from knot_resolver_manager.utils.requests import request
+
+
+@register_command
+class SchemaCommand(Command):
+ def __init__(self, namespace: argparse.Namespace) -> None:
+ super().__init__(namespace)
+ self.live: bool = namespace.live
+ self.file: Optional[str] = namespace.file
+
+ @staticmethod
+ def register_args_subparser(
+ subparser: "argparse._SubParsersAction[argparse.ArgumentParser]",
+ ) -> Tuple[argparse.ArgumentParser, "Type[Command]"]:
+ schema = subparser.add_parser(
+ "schema", help="Shows JSON-schema repersentation of the Knot Resolver's configuration."
+ )
+ schema.add_argument(
+ "-l",
+ "--live",
+ help="Get configuration JSON-schema from the running resolver. Requires connection to the management API.",
+ action="store_true",
+ default=False,
+ )
+ schema.add_argument("file", help="Optional, file where to export JSON-schema.", nargs="?", default=None)
+
+ return schema, SchemaCommand
+
+ @staticmethod
+ def completion(args: List[str], parser: argparse.ArgumentParser) -> CompWords:
+ return {}
+ # return parser_words(parser._actions) # pylint: disable=W0212
+
+ def run(self, args: CommandArgs) -> None:
+ if self.live:
+ response = request("GET", f"{args.socket}/schema")
+ if response.status != 200:
+ print(response)
+ sys.exit(1)
+ schema = response.body
+ else:
+ schema = json.dumps(KresConfig.json_schema(), indent=4)
+
+ if self.file:
+ with open(self.file, "w") as f:
+ f.write(schema)
+ else:
+ print(schema)
diff --git a/manager/knot_resolver_manager/cli/cmd/stop.py b/manager/knot_resolver_manager/cli/cmd/stop.py
new file mode 100644
index 00000000..e792dec3
--- /dev/null
+++ b/manager/knot_resolver_manager/cli/cmd/stop.py
@@ -0,0 +1,32 @@
+import argparse
+import sys
+from typing import List, Tuple, Type
+
+from knot_resolver_manager.cli.command import Command, CommandArgs, CompWords, register_command
+from knot_resolver_manager.utils.requests import request
+
+
+@register_command
+class StopCommand(Command):
+ def __init__(self, namespace: argparse.Namespace) -> None:
+ super().__init__(namespace)
+
+ @staticmethod
+ def register_args_subparser(
+ subparser: "argparse._SubParsersAction[argparse.ArgumentParser]",
+ ) -> Tuple[argparse.ArgumentParser, "Type[Command]"]:
+ stop = subparser.add_parser(
+ "stop", help="Tells the resolver to shutdown everthing. No process will run after this command."
+ )
+ return stop, StopCommand
+
+ def run(self, args: CommandArgs) -> None:
+ response = request("POST", f"{args.socket}/stop")
+
+ if response.status != 200:
+ print(response)
+ sys.exit(1)
+
+ @staticmethod
+ def completion(args: List[str], parser: argparse.ArgumentParser) -> CompWords:
+ return {}
diff --git a/manager/knot_resolver_manager/cli/cmd/validate.py b/manager/knot_resolver_manager/cli/cmd/validate.py
new file mode 100644
index 00000000..0770d757
--- /dev/null
+++ b/manager/knot_resolver_manager/cli/cmd/validate.py
@@ -0,0 +1,63 @@
+import argparse
+import sys
+from pathlib import Path
+from typing import List, Tuple, Type
+
+from knot_resolver_manager.cli.command import Command, CommandArgs, CompWords, register_command
+from knot_resolver_manager.datamodel import KresConfig
+from knot_resolver_manager.datamodel.globals import (
+ Context,
+ reset_global_validation_context,
+ set_global_validation_context,
+)
+from knot_resolver_manager.utils.modeling import try_to_parse
+from knot_resolver_manager.utils.modeling.exceptions import DataParsingError, DataValidationError
+
+
+@register_command
+class ValidateCommand(Command):
+ def __init__(self, namespace: argparse.Namespace) -> None:
+ super().__init__(namespace)
+ self.input_file: str = namespace.input_file
+ self.strict: bool = namespace.strict
+
+ @staticmethod
+ def register_args_subparser(
+ subparser: "argparse._SubParsersAction[argparse.ArgumentParser]",
+ ) -> Tuple[argparse.ArgumentParser, "Type[Command]"]:
+ validate = subparser.add_parser("validate", help="Validates configuration in JSON or YAML format.")
+ validate.set_defaults(strict=True)
+ validate.add_argument(
+ "--no-strict",
+ help="Ignore strict rules during validation, e.g. path/file existence.",
+ action="store_false",
+ dest="strict",
+ )
+ validate.add_argument(
+ "input_file",
+ type=str,
+ nargs="?",
+ help="File with configuration in YAML or JSON format.",
+ default=None,
+ )
+
+ return validate, ValidateCommand
+
+ @staticmethod
+ def completion(args: List[str], parser: argparse.ArgumentParser) -> CompWords:
+ return {}
+
+ def run(self, args: CommandArgs) -> None:
+ if self.input_file:
+ with open(self.input_file, "r") as f:
+ data = f.read()
+ else:
+ data = input("Type configuration to validate: ")
+
+ try:
+ set_global_validation_context(Context(Path(self.input_file).parent, self.strict))
+ KresConfig(try_to_parse(data))
+ reset_global_validation_context()
+ except (DataParsingError, DataValidationError) as e:
+ print(e)
+ sys.exit(1)
diff --git a/manager/knot_resolver_manager/cli/command.py b/manager/knot_resolver_manager/cli/command.py
new file mode 100644
index 00000000..6533de46
--- /dev/null
+++ b/manager/knot_resolver_manager/cli/command.py
@@ -0,0 +1,66 @@
+import argparse
+from abc import ABC, abstractmethod # pylint: disable=[no-name-in-module]
+from pathlib import Path
+from typing import Dict, List, Optional, Tuple, Type, TypeVar
+from urllib.parse import quote
+
+T = TypeVar("T", bound=Type["Command"])
+
+CompWords = Dict[str, Optional[str]]
+
+_registered_commands: List[Type["Command"]] = []
+
+
+def register_command(cls: T) -> T:
+ _registered_commands.append(cls)
+ return cls
+
+
+def get_help_command() -> Type["Command"]:
+ for command in _registered_commands:
+ if command.__name__ == "HelpCommand":
+ return command
+ raise ValueError("missing HelpCommand")
+
+
+def install_commands_parsers(parser: argparse.ArgumentParser) -> None:
+ subparsers = parser.add_subparsers(help="command type")
+ for command in _registered_commands:
+ subparser, typ = command.register_args_subparser(subparsers)
+ subparser.set_defaults(command=typ, subparser=subparser)
+
+
+class CommandArgs:
+ def __init__(self, namespace: argparse.Namespace, parser: argparse.ArgumentParser) -> None:
+ self.namespace = namespace
+ self.parser = parser
+ self.subparser: argparse.ArgumentParser = namespace.subparser
+ self.command: Type["Command"] = namespace.command
+
+ self.socket: str = namespace.socket[0]
+ if Path(self.socket).exists():
+ self.socket = f'http+unix://{quote(self.socket, safe="")}/'
+ if self.socket.endswith("/"):
+ self.socket = self.socket[:-1]
+
+
+class Command(ABC):
+ @staticmethod
+ @abstractmethod
+ def register_args_subparser(
+ subparser: "argparse._SubParsersAction[argparse.ArgumentParser]",
+ ) -> Tuple[argparse.ArgumentParser, "Type[Command]"]:
+ raise NotImplementedError()
+
+ @abstractmethod
+ def __init__(self, namespace: argparse.Namespace) -> None: # pylint: disable=[unused-argument]
+ super().__init__()
+
+ @abstractmethod
+ def run(self, args: CommandArgs) -> None:
+ raise NotImplementedError()
+
+ @staticmethod
+ @abstractmethod
+ def completion(args: List[str], parser: argparse.ArgumentParser) -> CompWords:
+ raise NotImplementedError()
diff --git a/manager/knot_resolver_manager/cli/kresctl.py b/manager/knot_resolver_manager/cli/kresctl.py
new file mode 100644
index 00000000..cbcc12a3
--- /dev/null
+++ b/manager/knot_resolver_manager/cli/kresctl.py
@@ -0,0 +1,49 @@
+import argparse
+
+from knot_resolver_manager.cli.command import CommandArgs
+
+
+class Kresctl:
+ def __init__(
+ self,
+ namespace: argparse.Namespace,
+ parser: argparse.ArgumentParser,
+ prompt: str = "kresctl",
+ ) -> None:
+ self.path = None
+ self.prompt = prompt
+ self.namespace = namespace
+ self.parser = parser
+
+ def execute(self):
+ if hasattr(self.namespace, "command"):
+ args = CommandArgs(self.namespace, self.parser)
+ command = args.command(self.namespace)
+ command.run(args)
+ else:
+ self.parser.print_help()
+
+ def _prompt_format(self) -> str:
+ bolt = "\033[1m"
+ white = "\033[38;5;255m"
+ reset = "\033[0;0m"
+
+ if self.path:
+ prompt = f"{bolt}[{self.prompt} {white}{self.path}{reset}{bolt}]"
+ else:
+ prompt = f"{bolt}{self.prompt}"
+ return f"{prompt}> {reset}"
+
+ def interactive(self):
+ try:
+ while True:
+ pass
+ # TODO: not working yet
+ # cmd = input(f"{self._prompt_format()}")
+ # namespace = self.parser.parse_args(cmd.split(" "))
+ # namespace.interactive = True
+ # namespace.socket = self.namespace.socket
+ # self.namespace = namespace
+ # self.execute()
+ except KeyboardInterrupt:
+ pass
diff --git a/manager/knot_resolver_manager/cli/main.py b/manager/knot_resolver_manager/cli/main.py
new file mode 100644
index 00000000..dff41df2
--- /dev/null
+++ b/manager/knot_resolver_manager/cli/main.py
@@ -0,0 +1,56 @@
+import argparse
+import importlib
+import os
+
+from knot_resolver_manager.cli.command import install_commands_parsers
+from knot_resolver_manager.cli.kresctl import Kresctl
+
+
+def autoimport_commands() -> None:
+ prefix = "knot_resolver_manager.cli.cmd."
+ for module_name in os.listdir(os.path.dirname(__file__) + "/cmd"):
+ if module_name[-3:] != ".py":
+ continue
+ importlib.import_module(f"{prefix}{module_name[:-3]}")
+
+
+def create_main_argument_parser() -> argparse.ArgumentParser:
+ parser = argparse.ArgumentParser(
+ "kresctl",
+ description="Command-line utility that helps communicate with Knot Resolver's management API."
+ "It also provides tooling to work with declarative configuration (validate, convert).",
+ )
+ # parser.add_argument(
+ # "-i",
+ # "--interactive",
+ # action="store_true",
+ # help="Interactive mode of kresctl utility",
+ # default=False,
+ # required=False,
+ # )
+ parser.add_argument(
+ "-s",
+ "--socket",
+ action="store",
+ type=str,
+ help="Optional, path to Unix-domain socket or network interface of the management API.",
+ default=["http+unix://%2Fvar%2Frun%2Fknot-resolver%2Fmanager.sock"], # FIXME
+ nargs=1,
+ required=False,
+ )
+ return parser
+
+
+def main() -> None:
+ autoimport_commands()
+ parser = create_main_argument_parser()
+ install_commands_parsers(parser)
+
+ namespace = parser.parse_args()
+ kresctl = Kresctl(namespace, parser)
+ kresctl.execute()
+
+ # if namespace.interactive or len(vars(namespace)) == 2:
+ # kresctl.interactive()
+ # else:
+ # kresctl.execute()
diff --git a/manager/knot_resolver_manager/compat/__init__.py b/manager/knot_resolver_manager/compat/__init__.py
new file mode 100644
index 00000000..410074cd
--- /dev/null
+++ b/manager/knot_resolver_manager/compat/__init__.py
@@ -0,0 +1,3 @@
+from . import asyncio, dataclasses
+
+__all__ = ["asyncio", "dataclasses"]
diff --git a/manager/knot_resolver_manager/compat/asyncio.py b/manager/knot_resolver_manager/compat/asyncio.py
new file mode 100644
index 00000000..9362db3f
--- /dev/null
+++ b/manager/knot_resolver_manager/compat/asyncio.py
@@ -0,0 +1,137 @@
+# We disable pylint checks, because it can't find methods in newer Python versions.
+#
+# pylint: disable=no-member
+
+# We disable pyright checks because it can't find method that don't exist in this Python version
+# so the reported error is correct, but due to the version checking conditions, it never happens.
+# Due to backporting, we are also using private methods and non-existent members of classes
+#
+# pyright: reportUnknownMemberType=false
+# pyright: reportUnknownVariableType=false
+# pyright: reportGeneralTypeIssues=false
+# pyright: reportPrivateUsage=false
+
+import asyncio
+import functools
+import logging
+import sys
+from asyncio import AbstractEventLoop, coroutines, events, tasks
+from typing import Any, Awaitable, Callable, Coroutine, Optional, TypeVar
+
+logger = logging.getLogger(__name__)
+
+T = TypeVar("T")
+
+
+async def to_thread(func: Callable[..., T], *args: Any, **kwargs: Any) -> T:
+ # version 3.9 and higher, call directly
+ if sys.version_info.major >= 3 and sys.version_info.minor >= 9:
+ return await asyncio.to_thread(func, *args, **kwargs) # type: ignore[attr-defined]
+
+ # earlier versions, run with default executor
+ else:
+ loop = asyncio.get_event_loop()
+ pfunc = functools.partial(func, *args, **kwargs)
+ res = await loop.run_in_executor(None, pfunc)
+ return res
+
+
+def async_in_a_thread(func: Callable[..., T]) -> Callable[..., Coroutine[None, None, T]]:
+ async def wrapper(*args: Any, **kwargs: Any) -> T:
+ return await to_thread(func, *args, **kwargs)
+
+ return wrapper
+
+
+def create_task(coro: Awaitable[T], name: Optional[str] = None) -> "asyncio.Task[T]":
+ # version 3.8 and higher, call directly
+ if sys.version_info.major >= 3 and sys.version_info.minor >= 8:
+ # pylint: disable=unexpected-keyword-arg
+ return asyncio.create_task(coro, name=name) # type: ignore[attr-defined,arg-type,call-arg]
+
+ # version 3.7 and higher, call directly without the name argument
+ if sys.version_info.major >= 3 and sys.version_info.minor >= 8:
+ return asyncio.create_task(coro) # type: ignore[attr-defined,arg-type]
+
+ # earlier versions, use older function
+ else:
+ return asyncio.ensure_future(coro)
+
+
+def is_event_loop_running() -> bool:
+ loop = events._get_running_loop() # pylint: disable=protected-access
+ return loop is not None and loop.is_running()
+
+
+def run(coro: Awaitable[T], debug: Optional[bool] = None) -> T:
+ # Adapted version of this:
+ # https://github.com/python/cpython/blob/3.9/Lib/asyncio/runners.py#L8
+
+ # version 3.7 and higher, call directly
+ # disabled due to incompatibilities
+ if sys.version_info.major >= 3 and sys.version_info.minor >= 7:
+ return asyncio.run(coro, debug=debug) # type: ignore[attr-defined,arg-type]
+
+ # earlier versions, use backported version of the function
+ if events._get_running_loop() is not None: # pylint: disable=protected-access
+ raise RuntimeError("asyncio.run() cannot be called from a running event loop")
+
+ if not coroutines.iscoroutine(coro):
+ raise ValueError(f"a coroutine was expected, got {repr(coro)}")
+
+ loop = events.new_event_loop()
+ try:
+ events.set_event_loop(loop)
+ if debug is not None:
+ loop.set_debug(debug)
+ return loop.run_until_complete(coro)
+ finally:
+ try:
+ _cancel_all_tasks(loop)
+ loop.run_until_complete(loop.shutdown_asyncgens())
+ if hasattr(loop, "shutdown_default_executor"):
+ loop.run_until_complete(loop.shutdown_default_executor()) # type: ignore[attr-defined]
+ finally:
+ events.set_event_loop(None)
+ loop.close()
+
+
+def _cancel_all_tasks(loop: AbstractEventLoop) -> None:
+ # Backported from:
+ # https://github.com/python/cpython/blob/3.9/Lib/asyncio/runners.py#L55-L74
+ #
+ to_cancel = tasks.Task.all_tasks(loop)
+ if not to_cancel:
+ return
+
+ for task in to_cancel:
+ task.cancel()
+
+ if sys.version_info.minor >= 7:
+ # since 3.7, the loop argument is implicitely the running loop
+ # since 3.10, the loop argument is removed
+ loop.run_until_complete(tasks.gather(*to_cancel, return_exceptions=True))
+ else:
+ loop.run_until_complete(tasks.gather(*to_cancel, loop=loop, return_exceptions=True))
+
+ for task in to_cancel:
+ if task.cancelled():
+ continue
+ if task.exception() is not None:
+ loop.call_exception_handler(
+ {
+ "message": "unhandled exception during asyncio.run() shutdown",
+ "exception": task.exception(),
+ "task": task,
+ }
+ )
+
+
+def add_async_signal_handler(signal: int, callback: Callable[[], Coroutine[Any, Any, None]]) -> None:
+ loop = asyncio.get_event_loop()
+ loop.add_signal_handler(signal, lambda: create_task(callback()))
+
+
+def remove_signal_handler(signal: int) -> bool:
+ loop = asyncio.get_event_loop()
+ return loop.remove_signal_handler(signal)
diff --git a/manager/knot_resolver_manager/compat/dataclasses.py b/manager/knot_resolver_manager/compat/dataclasses.py
new file mode 100644
index 00000000..440b34c8
--- /dev/null
+++ b/manager/knot_resolver_manager/compat/dataclasses.py
@@ -0,0 +1,69 @@
+"""
+This module contains rather simplistic reimplementation of dataclasses due to them being unsupported on Python 3.6
+"""
+
+
+from typing import Any, Dict, Set, Type
+
+dataclasses_import_success = False
+try:
+ import dataclasses
+
+ dataclasses_import_success = True
+except ImportError:
+ pass
+
+
+_CUSTOM_DATACLASS_MARKER = "_CUSTOM_DATACLASS_MARKER"
+
+
+def dataclass(cls: Any) -> Any:
+ if dataclasses_import_success:
+ return dataclasses.dataclass(cls)
+
+ anot: Dict[str, Type[Any]] = cls.__dict__.get("__annotations__", {})
+
+ def ninit(slf: Any, *args: Any, **kwargs: Any) -> None:
+ nonlocal anot
+
+ ianot = iter(anot.keys())
+ used: Set[str] = set()
+
+ # set normal arguments
+ for arg in args:
+ name = next(ianot)
+ setattr(slf, name, arg)
+ used.add(name)
+
+ # set keyd arguments
+ for key, val in kwargs.items():
+ assert key in anot, (
+ f"Constructing dataclass with an argument '{key}' which is not defined with a type"
+ f" annotation in class {cls.__name__}"
+ )
+ setattr(slf, key, val)
+ used.add(key)
+
+ # set default values
+ for key in anot:
+ if key in used:
+ continue
+ assert hasattr(
+ cls, key
+ ), f"Field '{key}' does not have default value and was not defined in the constructor"
+ dfl = getattr(cls, key)
+ setattr(slf, key, dfl)
+
+ setattr(cls, "__init__", ninit)
+ setattr(cls, _CUSTOM_DATACLASS_MARKER, ...)
+ return cls
+
+
+def is_dataclass(obj: Any) -> bool:
+ if dataclasses_import_success:
+ return dataclasses.is_dataclass(obj)
+
+ return hasattr(obj, _CUSTOM_DATACLASS_MARKER)
+
+
+__all__ = ["dataclass", "is_dataclass"]
diff --git a/manager/knot_resolver_manager/config_store.py b/manager/knot_resolver_manager/config_store.py
new file mode 100644
index 00000000..03519602
--- /dev/null
+++ b/manager/knot_resolver_manager/config_store.py
@@ -0,0 +1,77 @@
+import asyncio
+from asyncio import Lock
+from typing import Any, Awaitable, Callable, List, Tuple
+
+from knot_resolver_manager.datamodel import KresConfig
+from knot_resolver_manager.exceptions import KresManagerException
+from knot_resolver_manager.utils.functional import Result
+from knot_resolver_manager.utils.modeling.exceptions import DataParsingError
+
+VerifyCallback = Callable[[KresConfig, KresConfig], Awaitable[Result[None, str]]]
+UpdateCallback = Callable[[KresConfig], Awaitable[None]]
+
+
+class ConfigStore:
+ def __init__(self, initial_config: KresConfig):
+ self._config = initial_config
+ self._verifiers: List[VerifyCallback] = []
+ self._callbacks: List[UpdateCallback] = []
+ self._update_lock: Lock = Lock()
+
+ async def update(self, config: KresConfig) -> None:
+ # invoke pre-change verifiers
+ results: Tuple[Result[None, str], ...] = tuple(
+ await asyncio.gather(*[ver(self._config, config) for ver in self._verifiers])
+ )
+ err_res = filter(lambda r: r.is_err(), results)
+ errs = list(map(lambda r: r.unwrap_err(), err_res))
+ if len(errs) > 0:
+ raise KresManagerException(
+ "Validation of the new config failed. The reasons are:\n - " + "\n - ".join(errs)
+ )
+
+ async with self._update_lock:
+ # update the stored config with the new version
+ self._config = config
+
+ # invoke change callbacks
+ for call in self._callbacks:
+ await call(config)
+
+ async def register_verifier(self, verifier: VerifyCallback) -> None:
+ self._verifiers.append(verifier)
+ res = await verifier(self.get(), self.get())
+ if res.is_err():
+ raise DataParsingError(f"Initial config verification failed with error: {res.unwrap_err()}")
+
+ async def register_on_change_callback(self, callback: UpdateCallback) -> None:
+ """
+ Registers new callback and immediatelly calls it with current config
+ """
+
+ self._callbacks.append(callback)
+ await callback(self.get())
+
+ def get(self) -> KresConfig:
+ return self._config
+
+
+def only_on_real_changes(selector: Callable[[KresConfig], Any]) -> Callable[[UpdateCallback], UpdateCallback]:
+ def decorator(orig_func: UpdateCallback) -> UpdateCallback:
+ original_value_set: Any = False
+ original_value: Any = None
+
+ async def new_func(config: KresConfig) -> None:
+ nonlocal original_value_set
+ nonlocal original_value
+ if not original_value_set:
+ original_value_set = True
+ original_value = selector(config)
+ await orig_func(config)
+ elif original_value != selector(config):
+ original_value = selector(config)
+ await orig_func(config)
+
+ return new_func
+
+ return decorator
diff --git a/manager/knot_resolver_manager/constants.py b/manager/knot_resolver_manager/constants.py
new file mode 100644
index 00000000..f9f73449
--- /dev/null
+++ b/manager/knot_resolver_manager/constants.py
@@ -0,0 +1,86 @@
+import logging
+from pathlib import Path
+from typing import TYPE_CHECKING, Optional
+
+from knot_resolver_manager.utils import which
+
+if TYPE_CHECKING:
+ from knot_resolver_manager.config_store import ConfigStore
+ from knot_resolver_manager.datamodel.config_schema import KresConfig
+ from knot_resolver_manager.kresd_controller.interface import KresID
+
+STARTUP_LOG_LEVEL = logging.DEBUG
+DEFAULT_MANAGER_CONFIG_FILE = Path("/etc/knot-resolver/config.yml")
+MANAGER_FIX_ATTEMPT_MAX_COUNTER = 2
+FIX_COUNTER_DECREASE_INTERVAL_SEC = 30 * 60
+PID_FILE_NAME = "manager.pid"
+MAX_WORKERS = 256
+
+
+def kresd_executable() -> Path:
+ return which.which("kresd")
+
+
+def kres_gc_executable() -> Path:
+ return which.which("kres-cache-gc")
+
+
+def kresd_cache_dir(config: "KresConfig") -> Path:
+ return config.cache.storage.to_path()
+
+
+def kresd_config_file(_config: "KresConfig", kres_id: "KresID") -> Path:
+ return Path(f"kresd{int(kres_id)}.conf")
+
+
+def kresd_config_file_supervisord_pattern(_config: "KresConfig") -> Path:
+ return Path("kresd%(process_num)d.conf")
+
+
+def supervisord_config_file(_config: "KresConfig") -> Path:
+ return Path("supervisord.conf")
+
+
+def supervisord_config_file_tmp(_config: "KresConfig") -> Path:
+ return Path("supervisord.conf.tmp")
+
+
+def supervisord_pid_file(_config: "KresConfig") -> Path:
+ return Path("supervisord.pid")
+
+
+def supervisord_sock_file(_config: "KresConfig") -> Path:
+ return Path("supervisord.sock")
+
+
+def supervisord_subprocess_log_dir(_config: "KresConfig") -> Path:
+ return Path("logs")
+
+
+WATCHDOG_INTERVAL: float = 5
+"""
+Used in KresdManager. It's a number of seconds in between system health checks.
+"""
+
+
+class _UserConstants:
+ """
+ Class for accessing constants, which are technically not constants as they are user configurable.
+ """
+
+ def __init__(self, config_store: "ConfigStore", working_directory_on_startup: str) -> None:
+ self._config_store = config_store
+ self.working_directory_on_startup = working_directory_on_startup
+
+
+_user_constants: Optional[_UserConstants] = None
+
+
+async def init_user_constants(config_store: "ConfigStore", working_directory_on_startup: str) -> None:
+ global _user_constants
+ _user_constants = _UserConstants(config_store, working_directory_on_startup)
+
+
+def user_constants() -> _UserConstants:
+ assert _user_constants is not None
+ return _user_constants
diff --git a/manager/knot_resolver_manager/datamodel/__init__.py b/manager/knot_resolver_manager/datamodel/__init__.py
new file mode 100644
index 00000000..a0174acc
--- /dev/null
+++ b/manager/knot_resolver_manager/datamodel/__init__.py
@@ -0,0 +1,3 @@
+from .config_schema import KresConfig
+
+__all__ = ["KresConfig"]
diff --git a/manager/knot_resolver_manager/datamodel/cache_schema.py b/manager/knot_resolver_manager/datamodel/cache_schema.py
new file mode 100644
index 00000000..61341fc3
--- /dev/null
+++ b/manager/knot_resolver_manager/datamodel/cache_schema.py
@@ -0,0 +1,82 @@
+from typing import List, Optional, Union
+
+from typing_extensions import Literal
+
+from knot_resolver_manager.datamodel.types import Dir, DomainName, File, IntNonNegative, Percent, SizeUnit, TimeUnit
+from knot_resolver_manager.utils.modeling import ConfigSchema
+from knot_resolver_manager.utils.modeling.base_schema import lazy_default
+
+
+class PrefillSchema(ConfigSchema):
+ """
+ Prefill the cache periodically by importing zone data obtained over HTTP.
+
+ ---
+ origin: Origin for the imported data. Cache prefilling is only supported for the root zone ('.').
+ url: URL of the zone data to be imported.
+ refresh_interval: Time interval between consecutive refreshes of the imported zone data.
+ ca_file: Path to the file containing a CA certificate bundle that is used to authenticate the HTTPS connection.
+ """
+
+ origin: DomainName
+ url: str
+ refresh_interval: TimeUnit = TimeUnit("1d")
+ ca_file: Optional[File] = None
+
+ def _validate(self) -> None:
+ if str(self.origin) != ".":
+ raise ValueError("cache prefilling is not yet supported for non-root zones")
+
+
+class GarbageCollectorSchema(ConfigSchema):
+ """
+ Configuration options of the cache garbage collector (kres-cache-gc).
+
+ ---
+ interval: Time interval how often the garbage collector will be run.
+ threshold: Cache usage in percent that triggers the garbage collector.
+ release: Percent of used cache to be freed by the garbage collector.
+ temp_keys_space: Maximum amount of temporary memory for copied keys (0 = unlimited).
+ rw_deletes: Maximum number of deleted records per read-write transaction (0 = unlimited).
+ rw_reads: Maximum number of readed records per read-write transaction (0 = unlimited).
+ rw_duration: Maximum duration of read-write transaction (0 = unlimited).
+ rw_delay: Wait time between two read-write transactions.
+ dry_run: Run the garbage collector in dry-run mode.
+ """
+
+ interval: TimeUnit = TimeUnit("1s")
+ threshold: Percent = Percent(80)
+ release: Percent = Percent(10)
+ temp_keys_space: SizeUnit = SizeUnit(0)
+ rw_deletes: IntNonNegative = IntNonNegative(100)
+ rw_reads: IntNonNegative = IntNonNegative(200)
+ rw_duration: TimeUnit = TimeUnit(0)
+ rw_delay: TimeUnit = TimeUnit(0)
+ dry_run: bool = False
+
+
+class CacheSchema(ConfigSchema):
+ """
+ DNS resolver cache configuration.
+
+ ---
+ storage: Cache storage of the DNS resolver.
+ size_max: Maximum size of the cache.
+ garbage_collector: Use the garbage collector (kres-cache-gc) to periodically clear cache.
+ ttl_min: Minimum time-to-live for the cache entries.
+ ttl_max: Maximum time-to-live for the cache entries.
+ ns_timeout: Time interval for which a nameserver address will be ignored after determining that it does not return (useful) answers.
+ prefill: Prefill the cache periodically by importing zone data obtained over HTTP.
+ """
+
+ storage: Dir = lazy_default(Dir, "/var/cache/knot-resolver")
+ size_max: SizeUnit = SizeUnit("100M")
+ garbage_collector: Union[GarbageCollectorSchema, Literal[False]] = GarbageCollectorSchema()
+ ttl_min: TimeUnit = TimeUnit("5s")
+ ttl_max: TimeUnit = TimeUnit("6d")
+ ns_timeout: TimeUnit = TimeUnit("1000ms")
+ prefill: Optional[List[PrefillSchema]] = None
+
+ def _validate(self):
+ if self.ttl_min.seconds() >= self.ttl_max.seconds():
+ raise ValueError("'ttl-max' must be larger then 'ttl-min'")
diff --git a/manager/knot_resolver_manager/datamodel/config_schema.py b/manager/knot_resolver_manager/datamodel/config_schema.py
new file mode 100644
index 00000000..4fdf5801
--- /dev/null
+++ b/manager/knot_resolver_manager/datamodel/config_schema.py
@@ -0,0 +1,209 @@
+import logging
+import os
+import socket
+import sys
+from typing import Any, Dict, List, Optional, Union
+
+from jinja2 import Environment, FileSystemLoader, Template
+from typing_extensions import Literal
+
+from knot_resolver_manager.constants import MAX_WORKERS
+from knot_resolver_manager.datamodel.cache_schema import CacheSchema
+from knot_resolver_manager.datamodel.dns64_schema import Dns64Schema
+from knot_resolver_manager.datamodel.dnssec_schema import DnssecSchema
+from knot_resolver_manager.datamodel.forward_schema import ForwardSchema
+from knot_resolver_manager.datamodel.local_data_schema import LocalDataSchema
+from knot_resolver_manager.datamodel.logging_schema import LoggingSchema
+from knot_resolver_manager.datamodel.lua_schema import LuaSchema
+from knot_resolver_manager.datamodel.management_schema import ManagementSchema
+from knot_resolver_manager.datamodel.monitoring_schema import MonitoringSchema
+from knot_resolver_manager.datamodel.network_schema import NetworkSchema
+from knot_resolver_manager.datamodel.options_schema import OptionsSchema
+from knot_resolver_manager.datamodel.policy_schema import PolicySchema
+from knot_resolver_manager.datamodel.slice_schema import SliceSchema
+from knot_resolver_manager.datamodel.types import Dir, IntPositive
+from knot_resolver_manager.datamodel.view_schema import ViewSchema
+from knot_resolver_manager.datamodel.webmgmt_schema import WebmgmtSchema
+from knot_resolver_manager.utils.modeling import ConfigSchema
+from knot_resolver_manager.utils.modeling.base_schema import lazy_default
+
+logger = logging.getLogger(__name__)
+
+
+def _get_templates_dir() -> str:
+ module = sys.modules["knot_resolver_manager.datamodel"].__file__
+ if module:
+ templates_dir = os.path.join(os.path.dirname(module), "templates")
+ if os.path.isdir(templates_dir):
+ return templates_dir
+ raise NotADirectoryError(f"the templates dir '{templates_dir}' is not a directory or does not exist")
+ raise OSError("package 'knot_resolver_manager.datamodel' cannot be located or loaded")
+
+
+_TEMPLATES_DIR = _get_templates_dir()
+
+
+def template_from_str(template: str) -> Template:
+ ldr = FileSystemLoader(_TEMPLATES_DIR)
+ env = Environment(trim_blocks=True, lstrip_blocks=True, loader=ldr)
+ return env.from_string(template)
+
+
+def _import_lua_template() -> Template:
+ path = os.path.join(_TEMPLATES_DIR, "config.lua.j2")
+ with open(path, "r", encoding="UTF-8") as file:
+ template = file.read()
+ return template_from_str(template)
+
+
+_MAIN_TEMPLATE = _import_lua_template()
+
+
+def _cpu_count() -> Optional[int]:
+ try:
+ return len(os.sched_getaffinity(0))
+ except (NotImplementedError, AttributeError):
+ logger.warning("The number of usable CPUs could not be determined using 'os.sched_getaffinity()'.")
+ cpus = os.cpu_count()
+ if cpus is None:
+ logger.warning("The number of usable CPUs could not be determined using 'os.cpu_count()'.")
+ return cpus
+
+
+def _default_max_worker_count() -> int:
+ c = _cpu_count()
+ if c:
+ return c * 10
+ return MAX_WORKERS
+
+
+class KresConfig(ConfigSchema):
+ class Raw(ConfigSchema):
+ """
+ Knot Resolver declarative configuration.
+
+ ---
+ version: Version of the configuration schema. By default it is the latest supported by the resolver, but couple of versions back are be supported as well.
+ nsid: Name Server Identifier (RFC 5001) which allows DNS clients to request resolver to send back its NSID along with the reply to a DNS request.
+ hostname: Internal DNS resolver hostname. Default is machine hostname.
+ rundir: Directory where the resolver can create files and which will be it's cwd.
+ workers: The number of running kresd (Knot Resolver daemon) workers. If set to 'auto', it is equal to number of CPUs available.
+ max_workers: The maximum number of workers allowed. Cannot be changed in runtime.
+ management: Configuration of management HTTP API.
+ webmgmt: Configuration of legacy web management endpoint.
+ options: Fine-tuning global parameters of DNS resolver operation.
+ network: Network connections and protocols configuration.
+ views: List of views and its configuration.
+ local_data: Local data for forward records (A/AAAA) and reverse records (PTR).
+ slices: Split the entire DNS namespace into distinct slices.
+ policy: List of policy rules and its configuration.
+ forward: List of Forward Zones and its configuration.
+ cache: DNS resolver cache configuration.
+ dnssec: Disable DNSSEC, enable with defaults or set new configuration.
+ dns64: Disable DNS64 (RFC 6147), enable with defaults or set new configuration.
+ logging: Logging and debugging configuration.
+ monitoring: Metrics exposisition configuration (Prometheus, Graphite)
+ lua: Custom Lua configuration.
+ """
+
+ version: int = 1
+ nsid: Optional[str] = None
+ hostname: Optional[str] = None
+ rundir: Dir = lazy_default(Dir, "/var/run/knot-resolver")
+ workers: Union[Literal["auto"], IntPositive] = IntPositive(1)
+ max_workers: IntPositive = IntPositive(_default_max_worker_count())
+ management: ManagementSchema = lazy_default(ManagementSchema, {"unix-socket": "./manager.sock"})
+ webmgmt: Optional[WebmgmtSchema] = None
+ options: OptionsSchema = OptionsSchema()
+ network: NetworkSchema = NetworkSchema()
+ views: Optional[List[ViewSchema]] = None
+ local_data: LocalDataSchema = LocalDataSchema()
+ slices: Optional[List[SliceSchema]] = None
+ policy: Optional[List[PolicySchema]] = None
+ forward: Optional[List[ForwardSchema]] = None
+ cache: CacheSchema = lazy_default(CacheSchema, {})
+ dnssec: Union[bool, DnssecSchema] = True
+ dns64: Union[bool, Dns64Schema] = False
+ logging: LoggingSchema = LoggingSchema()
+ monitoring: MonitoringSchema = MonitoringSchema()
+ lua: LuaSchema = LuaSchema()
+
+ _LAYER = Raw
+
+ nsid: Optional[str]
+ hostname: str
+ rundir: Dir
+ workers: IntPositive
+ max_workers: IntPositive
+ management: ManagementSchema
+ webmgmt: Optional[WebmgmtSchema]
+ options: OptionsSchema
+ network: NetworkSchema
+ views: Optional[List[ViewSchema]]
+ local_data: LocalDataSchema
+ slices: Optional[List[SliceSchema]]
+ policy: Optional[List[PolicySchema]]
+ forward: Optional[List[ForwardSchema]]
+ cache: CacheSchema
+ dnssec: Union[Literal[False], DnssecSchema]
+ dns64: Union[Literal[False], Dns64Schema]
+ logging: LoggingSchema
+ monitoring: MonitoringSchema
+ lua: LuaSchema
+
+ def _hostname(self, obj: Raw) -> Any:
+ if obj.hostname is None:
+ return socket.gethostname()
+ return obj.hostname
+
+ def _workers(self, obj: Raw) -> Any:
+ if obj.workers == "auto":
+ count = _cpu_count()
+ if count:
+ return IntPositive(count)
+ raise ValueError(
+ "The number of available CPUs to automatically set the number of running 'kresd' workers could not be determined."
+ "The number of workers can be configured manually in 'workers' option."
+ )
+ return obj.workers
+
+ def _dnssec(self, obj: Raw) -> Any:
+ if obj.dnssec is True:
+ return DnssecSchema()
+ return obj.dnssec
+
+ def _dns64(self, obj: Raw) -> Any:
+ if obj.dns64 is True:
+ return Dns64Schema()
+ return obj.dns64
+
+ def _validate(self) -> None:
+ # enforce max-workers config
+ if int(self.workers) > int(self.max_workers):
+ raise ValueError(f"can't run with more workers then the configured maximum {self.max_workers}")
+
+ # sanity check
+ cpu_count = _cpu_count()
+ if cpu_count and int(self.workers) > 10 * cpu_count:
+ raise ValueError(
+ "refusing to run with more then 10 workers per cpu core, the system wouldn't behave nicely"
+ )
+
+ def render_lua(self) -> str:
+ # FIXME the `cwd` argument is used only for configuring control socket path
+ # it should be removed and relative path used instead as soon as issue
+ # https://gitlab.nic.cz/knot/knot-resolver/-/issues/720 is fixed
+ return _MAIN_TEMPLATE.render(cfg=self, cwd=os.getcwd()) # pyright: reportUnknownMemberType=false
+
+
+def get_rundir_without_validation(data: Dict[str, Any]) -> Dir:
+ """
+ Without fully parsing, try to get a rundir from a raw config data. When it fails,
+ attempts a full validation to produce a good error message.
+
+ Used for initial manager startup.
+ """
+
+ if "rundir" in data:
+ return Dir(data["rundir"], object_path="/rundir")
+ return KresConfig(data).rundir # this should throw a descriptive error
diff --git a/manager/knot_resolver_manager/datamodel/design-notes.yml b/manager/knot_resolver_manager/datamodel/design-notes.yml
new file mode 100644
index 00000000..fb909acc
--- /dev/null
+++ b/manager/knot_resolver_manager/datamodel/design-notes.yml
@@ -0,0 +1,237 @@
+###### Working notes about configuration schema
+
+
+## TODO nit: nest one level deeper inside `dnssec`, probably
+dnssec:
+ keep-removed: 0
+ refresh-time: 10s
+ hold-down-time: 30d
+
+## TODO nit: I don't like this name, at least not for the experimental thing we have there
+network:
+ tls:
+ auto_discovery: boolean
+
+#### General questions
+Plurals: do we name attributes in plural if they're a list;
+ some of them even allow a non-list if using a single element.
+
+
+#### New-policy brainstorming
+
+dnssec:
+ # Convert to key: style instead of list?
+ # - easier to handle in API/CLI (which might be a common action on names with broken DNSSEC)
+ # - allows to supply a value - stamp for expiration of that NTA
+ # (absolute time, but I can imagine API/CLI converting from duration when executed)
+ # - syntax isn't really more difficult, mainly it forces one entry per line (seems OK)
+ negative-trust-anchors:
+ example.org:
+ my.example.net:
+
+
+view:
+ # When a client request arrives, based on the `view` class of rules we may either
+ # decide for a direct answer or for marking the request with a set of tags.
+ # The concepts of matching and actions are a very good fit for this,
+ # and that matches our old policy approach. Matching here should avoid QNAME+QTYPE;
+ # instead it's e.g. suitable for access control.
+ # RPZ files also support rules that fall into this `view` class.
+ #
+ # Selecting a single rule: the most specific client-IP prefix
+ # that also matches additional conditions.
+ - subnet: [ 0.0.0.0/0, ::/0 ]
+ answer: refused
+ # some might prefer `allow: refused` ?
+ # Also, RCODEs are customary in CAPITALS though maybe not in configs.
+
+ - subnet: [ 10.0.0.0/8, 192.168.0.0/16 ]
+ # Adding `tags` implies allowing the query.
+ tags: [ t1, t2, t3 ] # theoretically we could use space-separated string
+ options: # only some of the global options can be overridden in view
+ minimize: true
+ dns64: true
+ rate-limit: # future option, probably (optionally?) structured
+ # LATER: rulesets are a relatively unclear feature for now.
+ # Their main point is to allow prioritization and avoid
+ # intermixing rules that come from different sources.
+ # Also some properties might be specifyable per ruleset.
+ ruleset: tt
+
+ - subnet: [ 10.0.10.0/24 ] # maybe allow a single value instead of a list?
+ # LATER: special addresses?
+ # - for kresd-internal requests
+ # - shorthands for all private IPv4 and/or IPv6;
+ # though yaml's repeated nodes could mostly cover that
+ # or just copy&paste from docs
+ answer: allow
+
+# Or perhaps a more complex approach? Probably not.
+# We might have multiple conditions at once and multiple actions at once,
+# but I don't expect these to be common, so the complication is probably not worth it.
+# An advantage would be that the separation of the two parts would be more visible.
+view:
+ - match:
+ subnet: [ 10.0.0.0/8, 192.168.0.0/16 ]
+ do:
+ tags: [ t1, t2, t3 ]
+ options: # ...
+
+
+local-data: # TODO: name
+ #FIXME: tags - allow assigning them to (groups of) addresses/records.
+
+ addresses: # automatically adds PTR records and NODATA (LATER: overridable NODATA?)
+ foo.bar: [ 127.0.0.1, ::1 ]
+ my.pc.corp: 192.168.12.95
+ addresses-files: # files in /etc/hosts format (and semantics like `addresses`)
+ - /etc/hosts
+
+ # Zonefile format seems quite handy here. Details:
+ # - probably use `local-data.ttl` from model as the default
+ # - and . root to avoid confusion if someone misses a final dot.
+ records: |
+ example.net. TXT "foo bar"
+ A 192.168.2.3
+ A 192.168.2.4
+ local.example.org AAAA ::1
+
+ subtrees:
+ nodata: true # impl ATM: defaults to false, set (only) for each rule/name separately
+ # impl: options like `ttl` and `nodata` might make sense to be settable (only?) per ruleset
+
+ subtrees: # TODO: perhaps just allow in the -tagged style, if we can't avoid lists anyway?
+ - type: empty
+ roots: [ sub2.example.org ] # TODO: name it the same as for forwarding
+ tags: [ t2 ]
+ - type: nxdomain
+ # Will we need to support multiple file formats in future and choose here?
+ roots-file: /path/to/file.txt
+ - type: empty
+ roots-url: https://example.org/blocklist.txt
+ refresh: 1d
+ # Is it a separate ruleset? Optionally? Persistence?
+ # (probably the same questions for local files as well)
+
+ - type: redirect
+ roots: [ sub4.example.org ]
+ addresses: [ 127.0.0.1, ::1 ]
+
+local-data-tagged: # TODO: name (view?); and even structure seems unclear.
+ # TODO: allow only one "type" per list entry? (addresses / addresses-files / subtrees / ...)
+ - tags: [ t1, t2 ]
+ addresses: #... otherwise the same as local-data
+ - tags: [ t2 ]
+ records: # ...
+ - tags: [ t3 ]
+ subtrees: empty
+ roots: [ sub2.example.org ]
+
+local-data-tagged: # this avoids lists, so it's relatively easy to amend through API
+ "t1 t2": # perhaps it's not nice that tags don't form a proper list?
+ addresses:
+ foo.bar: [ 127.0.0.1, ::1 ]
+ t4:
+ addresses:
+ foo.bar: [ 127.0.0.1, ::1 ]
+local-data: # avoids lists and merges into the untagged `local-data` config subtree
+ tagged: # (getting quite deep, though)
+ t1 t2:
+ addresses:
+ foo.bar: [ 127.0.0.1, ::1 ]
+# or even this ugly thing:
+local-data-tagged t1 t2:
+ addresses:
+ foo.bar: [ 127.0.0.1, ::1 ]
+
+forward: # TODO: "name" is from Unbound, but @vcunat would prefer "subtree" or something.
+ - name: '.' # Root is the default so could be omitted?
+ servers: [2001:148f:fffe::1, 2001:148f:ffff::1, 185.43.135.1, 193.14.47.1]
+ # TLS forward, server authenticated using hostname and system-wide CA certificates
+ # https://knot-resolver.readthedocs.io/en/stable/modules-policy.html?highlight=forward#tls-examples
+ - name: '.'
+ servers:
+ - address: [ 192.0.2.1, 192.0.2.2@5353 ]
+ transport: tls
+ pin-sha256: Wg==
+ - address: 2001:DB8::d0c
+ transport: tls
+ hostname: res.example.com
+ ca-file: /etc/knot-resolver/tlsca.crt
+ options:
+ # LATER: allow a subset of options here, per sub-tree?
+ # Though that's not necessarily related to forwarding (e.g. TTL limits),
+ # especially implementation-wise it probably won't matter.
+
+
+# Too confusing approach, I suppose? Different from usual way of thinking but closer to internal model.
+# Down-sides:
+# - multiple rules for the same name won't be possible (future, with different tags)
+# - loading names from a file won't be possible (or URL, etc.)
+rules:
+ example.org: &fwd_odvr
+ type: forward
+ servers: [2001:148f:fffe::1, 2001:148f:ffff::1, 185.43.135.1, 193.14.47.1]
+ sub2.example.org:
+ type: empty
+ tags: [ t3, t5 ]
+ sub3.example.org:
+ type: forward-auth
+ dnssec: no
+
+
+# @amrazek: current valid config
+
+views:
+ - subnets: [ 0.0.0.0/0, "::/0" ]
+ answer: refused
+ - subnets: [ 0.0.0.0/0, "::/0" ]
+ tags: [t01, t02, t03]
+ options:
+ minimize: true # default
+ dns64: true # default
+ - subnets: 10.0.10.0/24 # can be single value
+ answer: allow
+
+local-data:
+ ttl: 1d
+ nodata: true
+ addresses:
+ foo.bar: [ 127.0.0.1, "::1" ]
+ my.pc.corp: 192.168.12.95
+ addresses-files:
+ - /etc/hosts
+ records: |
+ example.net. TXT "foo bar"
+ A 192.168.2.3
+ A 192.168.2.4
+ local.example.org AAAA ::1
+ subtrees:
+ - type: empty
+ roots: [ sub2.example.org ]
+ tags: [ t2 ]
+ - type: nxdomain
+ roots-file: /path/to/file.txt
+ - type: empty
+ roots-url: https://example.org/blocklist.txt
+ refresh: 1d
+ - type: redirect
+ roots: [ sub4.example.org ]
+ addresses: [ 127.0.0.1, "::1" ]
+
+forward:
+ - subtree: '.'
+ servers:
+ - address: [ 192.0.2.1, 192.0.2.2@5353 ]
+ transport: tls
+ pin-sha256: Wg==
+ - address: 2001:DB8::d0c
+ transport: tls
+ hostname: res.example.com
+ ca-file: /etc/knot-resolver/tlsca.crt
+ options:
+ dnssec: true # default
+ - subtree: 1.168.192.in-addr.arpa
+ servers: [ 192.0.2.1@5353 ]
+ options:
+ dnssec: false # policy.STUB? \ No newline at end of file
diff --git a/manager/knot_resolver_manager/datamodel/dns64_schema.py b/manager/knot_resolver_manager/datamodel/dns64_schema.py
new file mode 100644
index 00000000..55d3200a
--- /dev/null
+++ b/manager/knot_resolver_manager/datamodel/dns64_schema.py
@@ -0,0 +1,13 @@
+from knot_resolver_manager.datamodel.types import IPv6Network96
+from knot_resolver_manager.utils.modeling import ConfigSchema
+
+
+class Dns64Schema(ConfigSchema):
+ """
+ DNS64 (RFC 6147) configuration.
+
+ ---
+ prefix: IPv6 prefix to be used for synthesizing AAAA records.
+ """
+
+ prefix: IPv6Network96 = IPv6Network96("64:ff9b::/96")
diff --git a/manager/knot_resolver_manager/datamodel/dnssec_schema.py b/manager/knot_resolver_manager/datamodel/dnssec_schema.py
new file mode 100644
index 00000000..3eb5ec3d
--- /dev/null
+++ b/manager/knot_resolver_manager/datamodel/dnssec_schema.py
@@ -0,0 +1,45 @@
+from typing import List, Optional
+
+from knot_resolver_manager.datamodel.types import IntNonNegative, TimeUnit
+from knot_resolver_manager.utils.modeling import ConfigSchema
+
+
+class TrustAnchorFileSchema(ConfigSchema):
+ """
+ Trust-anchor zonefile configuration.
+
+ ---
+ file: Path to the zonefile that stores trust-anchors.
+ read_only: Blocks zonefile updates according to RFC 5011.
+
+ """
+
+ file: str
+ read_only: bool = False
+
+
+class DnssecSchema(ConfigSchema):
+ """
+ DNSSEC configuration.
+
+ ---
+ trust_anchor_sentinel: Allows users of DNSSEC validating resolver to detect which root keys are configured in resolver's chain of trust. (RFC 8509)
+ trust_anchor_signal_query: Signaling Trust Anchor Knowledge in DNSSEC Using Key Tag Query, according to (RFC 8145#section-5).
+ time_skew_detection: Detection of difference between local system time and expiration time bounds in DNSSEC signatures for '. NS' records.
+ keep_removed: How many removed keys should be held in history (and key file) before being purged.
+ refresh_time: Force trust-anchors to be updated every defined time periodically instead of relying on (RFC 5011) logic and TTLs. Intended only for testing purposes.
+ hold_down_time: Modify hold-down timer (RFC 5011). Intended only for testing purposes.
+ trust_anchors: List of trust-anchors in DS/DNSKEY records format.
+ negative_trust_anchors: List of domain names representing negative trust-anchors. (RFC 7646)
+ trust_anchors_files: List of zonefiles where trust-anchors are stored.
+ """
+
+ trust_anchor_sentinel: bool = True
+ trust_anchor_signal_query: bool = True
+ time_skew_detection: bool = True
+ keep_removed: IntNonNegative = IntNonNegative(0)
+ refresh_time: Optional[TimeUnit] = None
+ hold_down_time: TimeUnit = TimeUnit("30d")
+ trust_anchors: Optional[List[str]] = None
+ negative_trust_anchors: Optional[List[str]] = None
+ trust_anchors_files: Optional[List[TrustAnchorFileSchema]] = None
diff --git a/manager/knot_resolver_manager/datamodel/forward_schema.py b/manager/knot_resolver_manager/datamodel/forward_schema.py
new file mode 100644
index 00000000..4a003d6a
--- /dev/null
+++ b/manager/knot_resolver_manager/datamodel/forward_schema.py
@@ -0,0 +1,57 @@
+from typing import List, Optional, Union
+
+from typing_extensions import Literal
+
+from knot_resolver_manager.datamodel.types import DomainName, File, IPAddressOptionalPort, ListOrItem
+from knot_resolver_manager.utils.modeling import ConfigSchema
+
+
+class ForwardServerSchema(ConfigSchema):
+ """
+ Forward server configuration options.
+
+ ---
+ address: IP address(es) of a forward server.
+ transport: Transport protocol for a forward server.
+ pin_sha256: Hash of accepted CA certificate.
+ hostname: Hostname of the Forward server.
+ ca_file: Path to CA certificate file.
+ """
+
+ address: ListOrItem[IPAddressOptionalPort]
+ transport: Optional[Literal["tls"]] = None
+ pin_sha256: Optional[ListOrItem[str]] = None
+ hostname: Optional[DomainName] = None
+ ca_file: Optional[File] = None
+
+ def _validate(self) -> None:
+ if self.pin_sha256 and (self.hostname or self.ca_file):
+ ValueError("'pin-sha256' cannot be configurad together with 'hostname' or 'ca-file'")
+
+
+class ForwardOptionsSchema(ConfigSchema):
+ """
+ Configuration options for forward subtree.
+
+ ---
+ authoritative: The forwarding target is an authoritative server.
+ dnssec: Enable/disable DNSSEC.
+ """
+
+ authoritative: bool = False
+ dnssec: bool = True
+
+
+class ForwardSchema(ConfigSchema):
+ """
+ Configuration of forward subtree.
+
+ ---
+ subtree: Subtree to forward.
+ servers: Forward server configuration.
+ options: Configuration options for forward subtree.
+ """
+
+ subtree: DomainName
+ servers: Union[List[IPAddressOptionalPort], List[ForwardServerSchema]]
+ options: ForwardOptionsSchema = ForwardOptionsSchema()
diff --git a/manager/knot_resolver_manager/datamodel/globals.py b/manager/knot_resolver_manager/datamodel/globals.py
new file mode 100644
index 00000000..610323fa
--- /dev/null
+++ b/manager/knot_resolver_manager/datamodel/globals.py
@@ -0,0 +1,57 @@
+"""
+The parsing and validation of the datamodel is dependent on a global state:
+- a file system path used for resolving relative paths
+
+
+Commentary from @vsraier:
+=========================
+
+While this is not ideal, it is the best we can do at the moment. When I created this module,
+the datamodel was dependent on the global state implicitely. The validation procedures just read
+the current working directory. This module is the first step in removing the global dependency.
+
+At some point in the future, it might be interesting to add something like a "validation context"
+to the modelling tools. It is not technically complicated, but it requires
+massive model changes I am not willing to make at the moment. Ideally, when implementing this,
+the BaseSchema would turn into an empty class without any logic. Not even a constructor. All logic
+would be in the ObjectMapper class. Similar to how Gson works in Java or AutoMapper in C#.
+"""
+
+from pathlib import Path
+from typing import Optional
+
+
+class Context:
+ resolve_root: Optional[Path]
+ strict_validation: bool
+
+ def __init__(self, resolve_root: Optional[Path], strict_validation: bool = True) -> None:
+ self.resolve_root = resolve_root
+ self.strict_validation = strict_validation
+
+
+_global_context: Context = Context(None)
+
+
+def set_global_validation_context(context: Context) -> None:
+ global _global_context
+ _global_context = context
+
+
+def reset_global_validation_context() -> None:
+ global _global_context
+ _global_context = Context(None)
+
+
+def get_resolve_root() -> Path:
+ if _global_context.resolve_root is None:
+ raise RuntimeError(
+ "Global validation context 'resolve_root' is not set!"
+ " Before validation, you have to set it using `set_global_validation_context()` function!"
+ )
+
+ return _global_context.resolve_root
+
+
+def get_strict_validation() -> bool:
+ return _global_context.strict_validation
diff --git a/manager/knot_resolver_manager/datamodel/local_data_schema.py b/manager/knot_resolver_manager/datamodel/local_data_schema.py
new file mode 100644
index 00000000..65fa7688
--- /dev/null
+++ b/manager/knot_resolver_manager/datamodel/local_data_schema.py
@@ -0,0 +1,82 @@
+from typing import Dict, List, Optional
+
+from typing_extensions import Literal
+
+from knot_resolver_manager.datamodel.types import DomainName, File, IDPattern, IPAddress, ListOrItem, TimeUnit
+from knot_resolver_manager.utils.modeling import ConfigSchema
+
+
+class SubtreeSchema(ConfigSchema):
+ """
+ Local data and configuration of subtree.
+
+ ---
+ type: Type of the subtree.
+ tags: Tags to link with other policy rules.
+ ttl: Default TTL value used for added local subtree.
+ nodata: Use NODATA synthesis. NODATA will be synthesised for matching name, but mismatching type(e.g. AAAA query when only A exists).
+ addresses: Subtree addresses.
+ roots: Subtree roots.
+ roots_file: Subtree roots from given file.
+ roots_url: Subtree roots form given URL.
+ refresh: Refresh time to update data from 'roots-file' or 'roots-url'.
+ """
+
+ type: Literal["empty", "nxdomain", "redirect"]
+ tags: Optional[List[IDPattern]] = None
+ ttl: Optional[TimeUnit] = None
+ nodata: bool = True
+ addresses: Optional[List[IPAddress]] = None
+ roots: Optional[List[DomainName]] = None
+ roots_file: Optional[File] = None
+ roots_url: Optional[str] = None
+ refresh: Optional[TimeUnit] = None
+
+ def _validate(self) -> None:
+ options_sum = sum([bool(self.roots), bool(self.roots_file), bool(self.roots_url)])
+ if options_sum > 1:
+ raise ValueError("only one of, 'roots', 'roots-file' or 'roots-url' can be configured")
+ elif options_sum < 1:
+ raise ValueError("one of, 'roots', 'roots-file' or 'roots-url' must be configured")
+ if self.refresh and not (self.roots_file or self.roots_url):
+ raise ValueError("'refresh' can be only configured with 'roots-file' or 'roots-url'")
+
+
+class RPZSchema(ConfigSchema):
+ """
+ Configuration or Response Policy Zone (RPZ).
+
+ ---
+ file: Path to the RPZ zone file.
+ tags: Tags to link with other policy rules.
+ """
+
+ file: File
+ tags: Optional[List[IDPattern]] = None
+
+
+class LocalDataSchema(ConfigSchema):
+ """
+ Local data for forward records (A/AAAA) and reverse records (PTR).
+
+ ---
+ ttl: Default TTL value used for added local data/records.
+ nodata: Use NODATA synthesis. NODATA will be synthesised for matching name, but mismatching type(e.g. AAAA query when only A exists).
+ root_fallback_addresses: Direct replace of root hints.
+ root_fallback_addresses_files: Direct replace of root hints from a zonefile.
+ addresses: Direct addition of hostname and IP addresses pairs.
+ addresses_files: Direct addition of hostname and IP addresses pairs from files in '/etc/hosts' like format.
+ records: Direct addition of records in DNS zone file format.
+ subtrees: Direct addition of subtrees.
+ rpz: List of Response Policy Zones and its configuration.
+ """
+
+ ttl: Optional[TimeUnit] = None
+ nodata: bool = True
+ root_fallback_addresses: Optional[Dict[DomainName, ListOrItem[IPAddress]]] = None
+ root_fallback_addresses_files: Optional[ListOrItem[File]] = None
+ addresses: Optional[Dict[DomainName, IPAddress]] = None
+ addresses_files: Optional[ListOrItem[File]] = None
+ records: Optional[str] = None
+ subtrees: Optional[List[SubtreeSchema]] = None
+ rpz: Optional[List[RPZSchema]] = None
diff --git a/manager/knot_resolver_manager/datamodel/logging_schema.py b/manager/knot_resolver_manager/datamodel/logging_schema.py
new file mode 100644
index 00000000..fb05b826
--- /dev/null
+++ b/manager/knot_resolver_manager/datamodel/logging_schema.py
@@ -0,0 +1,150 @@
+import os
+from typing import Any, List, Optional, Set, Type, Union, cast
+
+from typing_extensions import Literal
+
+from knot_resolver_manager.datamodel.types import FilePath, TimeUnit
+from knot_resolver_manager.utils.modeling import ConfigSchema
+from knot_resolver_manager.utils.modeling.base_schema import is_obj_type_valid
+
+try:
+ # On Debian 10, the typing_extensions library does not contain TypeAlias.
+ # We don't strictly need the import for anything except for type checking,
+ # so this try-except makes sure it works either way.
+ from typing_extensions import TypeAlias # pylint: disable=ungrouped-imports
+except ImportError:
+ TypeAlias = None # type: ignore
+
+
+LogLevelEnum = Literal["crit", "err", "warning", "notice", "info", "debug"]
+LogTargetEnum = Literal["syslog", "stderr", "stdout"]
+LogGroupsEnum: TypeAlias = Literal[
+ "manager",
+ "supervisord",
+ "cache-gc",
+ "system",
+ "cache",
+ "io",
+ "net",
+ "ta",
+ "tasent",
+ "tasign",
+ "taupd",
+ "tls",
+ "gnutls",
+ "tls_cl",
+ "xdp",
+ "zimprt",
+ "zscann",
+ "doh",
+ "dnssec",
+ "hint",
+ "plan",
+ "iterat",
+ "valdtr",
+ "resolv",
+ "select",
+ "zonecut",
+ "cookie",
+ "statis",
+ "rebind",
+ "worker",
+ "policy",
+ "daf",
+ "timejm",
+ "timesk",
+ "graphi",
+ "prefil",
+ "primin",
+ "srvstl",
+ "wtchdg",
+ "nsid",
+ "dnstap",
+ "tests",
+ "dotaut",
+ "http",
+ "contrl",
+ "module",
+ "devel",
+ "reqdbg",
+]
+
+
+class DnstapSchema(ConfigSchema):
+ """
+ Logging DNS queries and responses to a unix socket.
+
+ ---
+ unix_socket: Path to unix domain socket where dnstap messages will be sent.
+ log_queries: Log queries from downstream in wire format.
+ log_responses: Log responses to downstream in wire format.
+ log_tcp_rtt: Log TCP RTT (Round-trip time).
+ """
+
+ unix_socket: FilePath
+ log_queries: bool = True
+ log_responses: bool = True
+ log_tcp_rtt: bool = True
+
+
+class DebuggingSchema(ConfigSchema):
+ """
+ Advanced debugging parameters for kresd (Knot Resolver daemon).
+
+ ---
+ assertion_abort: Allow the process to be aborted in case it encounters a failed assertion.
+ assertion_fork: Fork and abord child kresd process to obtain a coredump, while the parent process recovers and keeps running.
+ """
+
+ assertion_abort: bool = False
+ assertion_fork: TimeUnit = TimeUnit("5m")
+
+
+class LoggingSchema(ConfigSchema):
+ class Raw(ConfigSchema):
+ """
+ Logging and debugging configuration.
+
+ ---
+ level: Global logging level.
+ target: Global logging stream target. "from-env" uses $KRES_LOG_TARGET and defaults to "stdout".
+ groups: List of groups for which 'debug' logging level is set.
+ dnssec_bogus: Logging a message for each DNSSEC validation failure.
+ dnstap: Logging DNS requests and responses to a unix socket.
+ debugging: Advanced debugging parameters for kresd (Knot Resolver daemon).
+ """
+
+ level: LogLevelEnum = "notice"
+ target: Union[LogTargetEnum, Literal["from-env"]] = "from-env"
+ groups: Optional[List[LogGroupsEnum]] = None
+ dnssec_bogus: bool = False
+ dnstap: Union[Literal[False], DnstapSchema] = False
+ debugging: DebuggingSchema = DebuggingSchema()
+
+ _LAYER = Raw
+
+ level: LogLevelEnum
+ target: LogTargetEnum
+ groups: Optional[List[LogGroupsEnum]]
+ dnssec_bogus: bool
+ dnstap: Union[Literal[False], DnstapSchema]
+ debugging: DebuggingSchema
+
+ def _target(self, raw: Raw) -> LogTargetEnum:
+ if raw.target == "from-env":
+ target = os.environ.get("KRES_LOGGING_TARGET") or "stdout"
+ if not is_obj_type_valid(target, cast(Type[Any], LogTargetEnum)):
+ raise ValueError(f"logging target '{target}' read from $KRES_LOGGING_TARGET is invalid")
+ return cast(LogTargetEnum, target)
+ else:
+ return raw.target
+
+ def _validate(self):
+ if self.groups is None:
+ return
+
+ checked: Set[str] = set()
+ for i, g in enumerate(self.groups):
+ if g in checked:
+ raise ValueError(f"duplicate logging group '{g}' on index {i}")
+ checked.add(g)
diff --git a/manager/knot_resolver_manager/datamodel/lua_schema.py b/manager/knot_resolver_manager/datamodel/lua_schema.py
new file mode 100644
index 00000000..bff8e289
--- /dev/null
+++ b/manager/knot_resolver_manager/datamodel/lua_schema.py
@@ -0,0 +1,22 @@
+from typing import Optional
+
+from knot_resolver_manager.utils.modeling import ConfigSchema
+
+
+class LuaSchema(ConfigSchema):
+ """
+ Custom Lua configuration.
+
+ ---
+ script_only: Ignore declarative configuration and use only Lua script or file defined in this section.
+ script: Custom Lua configuration script.
+ script_file: Path to file that contains Lua configuration script.
+ """
+
+ script_only: bool = False
+ script: Optional[str] = None
+ script_file: Optional[str] = None
+
+ def _validate(self) -> None:
+ if self.script and self.script_file:
+ raise ValueError("'lua.script' and 'lua.script-file' are both defined, only one can be used")
diff --git a/manager/knot_resolver_manager/datamodel/management_schema.py b/manager/knot_resolver_manager/datamodel/management_schema.py
new file mode 100644
index 00000000..09daa3ff
--- /dev/null
+++ b/manager/knot_resolver_manager/datamodel/management_schema.py
@@ -0,0 +1,21 @@
+from typing import Optional
+
+from knot_resolver_manager.datamodel.types import FilePath, IPAddressPort
+from knot_resolver_manager.utils.modeling import ConfigSchema
+
+
+class ManagementSchema(ConfigSchema):
+ """
+ Configuration of management HTTP API.
+
+ ---
+ unix_socket: Path to unix domain socket to listen to.
+ interface: IP address and port number to listen to.
+ """
+
+ unix_socket: Optional[FilePath] = None
+ interface: Optional[IPAddressPort] = None
+
+ def _validate(self) -> None:
+ if bool(self.unix_socket) == bool(self.interface):
+ raise ValueError("One of 'interface' or 'unix-socket' must be configured.")
diff --git a/manager/knot_resolver_manager/datamodel/monitoring_schema.py b/manager/knot_resolver_manager/datamodel/monitoring_schema.py
new file mode 100644
index 00000000..e4cdabe8
--- /dev/null
+++ b/manager/knot_resolver_manager/datamodel/monitoring_schema.py
@@ -0,0 +1,25 @@
+from typing import Union
+
+from typing_extensions import Literal
+
+from knot_resolver_manager.datamodel.types import DomainName, IPAddress, PortNumber, TimeUnit
+from knot_resolver_manager.utils.modeling import ConfigSchema
+
+
+class GraphiteSchema(ConfigSchema):
+ host: Union[IPAddress, DomainName]
+ port: PortNumber = PortNumber(2003)
+ prefix: str = ""
+ interval: TimeUnit = TimeUnit("5s")
+ tcp: bool = False
+
+
+class MonitoringSchema(ConfigSchema):
+ """
+ ---
+ enabled: configures, whether statistics module will be loaded into resolver
+ graphite: optionally configures where should graphite metrics be sent to
+ """
+
+ enabled: Literal["manager-only", "lazy", "always"] = "lazy"
+ graphite: Union[Literal[False], GraphiteSchema] = False
diff --git a/manager/knot_resolver_manager/datamodel/network_schema.py b/manager/knot_resolver_manager/datamodel/network_schema.py
new file mode 100644
index 00000000..2349bdc5
--- /dev/null
+++ b/manager/knot_resolver_manager/datamodel/network_schema.py
@@ -0,0 +1,179 @@
+from typing import List, Optional, Union
+
+from typing_extensions import Literal
+
+from knot_resolver_manager.datamodel.types import (
+ File,
+ FilePath,
+ Int0_512,
+ Int0_65535,
+ InterfaceOptionalPort,
+ IPAddress,
+ IPNetwork,
+ IPv4Address,
+ IPv6Address,
+ ListOrItem,
+ PortNumber,
+ SizeUnit,
+)
+from knot_resolver_manager.utils.modeling import ConfigSchema
+
+KindEnum = Literal["dns", "xdp", "dot", "doh-legacy", "doh2"]
+
+
+class EdnsBufferSizeSchema(ConfigSchema):
+ """
+ EDNS payload size advertised in DNS packets.
+
+ ---
+ upstream: Maximum EDNS upstream (towards other DNS servers) payload size.
+ downstream: Maximum EDNS downstream (towards clients) payload size for communication.
+ """
+
+ upstream: SizeUnit = SizeUnit("1232B")
+ downstream: SizeUnit = SizeUnit("1232B")
+
+
+class AddressRenumberingSchema(ConfigSchema):
+ """
+ Renumbers addresses in answers to different address space.
+
+ ---
+ source: Source subnet.
+ destination: Destination address prefix.
+ """
+
+ source: IPNetwork
+ destination: IPAddress
+
+
+class TLSSchema(ConfigSchema):
+ """
+ TLS configuration, also affects DNS over TLS and DNS over HTTPS.
+
+ ---
+ cert_file: Path to certificate file.
+ key_file: Path to certificate key file.
+ sticket_secret: Secret for TLS session resumption via tickets. (RFC 5077).
+ sticket_secret_file: Path to file with secret for TLS session resumption via tickets. (RFC 5077).
+ auto_discovery: Automatic discovery of authoritative servers supporting DNS-over-TLS.
+ padding: EDNS(0) padding of answers to queries that arrive over TLS transport.
+ """
+
+ cert_file: Optional[File] = None
+ key_file: Optional[File] = None
+ sticket_secret: Optional[str] = None
+ sticket_secret_file: Optional[File] = None
+ auto_discovery: bool = False
+ padding: Union[bool, Int0_512] = True
+
+ def _validate(self):
+ if self.sticket_secret and self.sticket_secret_file:
+ raise ValueError("'sticket_secret' and 'sticket_secret_file' are both defined, only one can be used")
+
+
+class ListenSchema(ConfigSchema):
+ class Raw(ConfigSchema):
+ """
+ Configuration of listening interface.
+
+ ---
+ unix_socket: Path to unix domain socket to listen to.
+ interface: IP address or interface name with optional port number to listen to.
+ port: Port number to listen to.
+ kind: Specifies DNS query transport protocol.
+ freebind: Used for binding to non-local address.
+ """
+
+ interface: Optional[ListOrItem[InterfaceOptionalPort]] = None
+ unix_socket: Optional[ListOrItem[FilePath]] = None
+ port: Optional[PortNumber] = None
+ kind: KindEnum = "dns"
+ freebind: bool = False
+
+ _LAYER = Raw
+
+ interface: Optional[ListOrItem[InterfaceOptionalPort]]
+ unix_socket: Optional[ListOrItem[FilePath]]
+ port: Optional[PortNumber]
+ kind: KindEnum
+ freebind: bool
+
+ def _interface(self, origin: Raw) -> Optional[ListOrItem[InterfaceOptionalPort]]:
+ if origin.interface:
+ port_set: Optional[bool] = None
+ for intrfc in origin.interface: # type: ignore[attr-defined]
+ if origin.port and intrfc.port:
+ raise ValueError("The port number is defined in two places ('port' option and '@<port>' syntax).")
+ if port_set is not None and (bool(intrfc.port) != port_set):
+ raise ValueError(
+ "The '@<port>' syntax must be used either for all or none of the interface in the list."
+ )
+ port_set = bool(intrfc.port)
+ return origin.interface
+
+ def _port(self, origin: Raw) -> Optional[PortNumber]:
+ if origin.port:
+ return origin.port
+ # default port number based on kind
+ elif origin.interface:
+ if origin.kind == "dot":
+ return PortNumber(853)
+ elif origin.kind in ["doh-legacy", "doh2"]:
+ return PortNumber(443)
+ return PortNumber(53)
+ return None
+
+ def _validate(self) -> None:
+ if bool(self.unix_socket) == bool(self.interface):
+ raise ValueError("One of 'interface' or 'unix-socket' must be configured.")
+ if self.port and self.unix_socket:
+ raise ValueError(
+ "'unix-socket' and 'port' are not compatible options."
+ " Port configuration can only be used with 'interface' option."
+ )
+
+
+class ProxyProtocolSchema(ConfigSchema):
+ """
+ PROXYv2 protocol configuration.
+
+ ---
+ allow: Allow usage of the PROXYv2 protocol headers by clients on the specified addresses.
+ """
+
+ allow: List[Union[IPAddress, IPNetwork]]
+
+
+class NetworkSchema(ConfigSchema):
+ """
+ Network connections and protocols configuration.
+
+ ---
+ do_ipv4: Enable/disable using IPv4 for contacting upstream nameservers.
+ do_ipv6: Enable/disable using IPv6 for contacting upstream nameservers.
+ out_interface_v4: IPv4 address used to perform queries. Not set by default, which lets the OS choose any address.
+ out_interface_v6: IPv6 address used to perform queries. Not set by default, which lets the OS choose any address.
+ tcp_pipeline: TCP pipeline limit. The number of outstanding queries that a single client connection can make in parallel.
+ edns_tcp_keepalive: Allows clients to discover the connection timeout. (RFC 7828)
+ edns_buffer_size: Maximum EDNS payload size advertised in DNS packets. Different values can be configured for communication downstream (towards clients) and upstream (towards other DNS servers).
+ address_renumbering: Renumbers addresses in answers to different address space.
+ tls: TLS configuration, also affects DNS over TLS and DNS over HTTPS.
+ proxy_protocol: PROXYv2 protocol configuration.
+ listen: List of interfaces to listen to and its configuration.
+ """
+
+ do_ipv4: bool = True
+ do_ipv6: bool = True
+ out_interface_v4: Optional[IPv4Address] = None
+ out_interface_v6: Optional[IPv6Address] = None
+ tcp_pipeline: Int0_65535 = Int0_65535(100)
+ edns_tcp_keepalive: bool = True
+ edns_buffer_size: EdnsBufferSizeSchema = EdnsBufferSizeSchema()
+ address_renumbering: Optional[List[AddressRenumberingSchema]] = None
+ tls: TLSSchema = TLSSchema()
+ proxy_protocol: Union[Literal[False], ProxyProtocolSchema] = False
+ listen: List[ListenSchema] = [
+ ListenSchema({"interface": "127.0.0.1"}),
+ ListenSchema({"interface": "::1", "freebind": True}),
+ ]
diff --git a/manager/knot_resolver_manager/datamodel/options_schema.py b/manager/knot_resolver_manager/datamodel/options_schema.py
new file mode 100644
index 00000000..e95e5f88
--- /dev/null
+++ b/manager/knot_resolver_manager/datamodel/options_schema.py
@@ -0,0 +1,75 @@
+from typing import Any, Union
+
+from typing_extensions import Literal
+
+from knot_resolver_manager.datamodel.types import IntNonNegative, TimeUnit
+from knot_resolver_manager.utils.modeling import ConfigSchema
+
+GlueCheckingEnum = Literal["normal", "strict", "permissive"]
+
+
+class PredictionSchema(ConfigSchema):
+ """
+ Helps keep the cache hot by prefetching expiring records and learning usage patterns and repetitive queries.
+
+ ---
+ window: Sampling window length.
+ period: Number of windows that can be kept in memory.
+ """
+
+ window: TimeUnit = TimeUnit("15m")
+ period: IntNonNegative = IntNonNegative(24)
+
+
+class OptionsSchema(ConfigSchema):
+ class Raw(ConfigSchema):
+ """
+ Fine-tuning global parameters of DNS resolver operation.
+
+ ---
+ glue_checking: Glue records scrictness checking level.
+ minimize: Send minimum amount of information in recursive queries to enhance privacy.
+ query_loopback: Permits queries to loopback addresses.
+ reorder_rrset: Controls whether resource records within a RRSet are reordered each time it is served from the cache.
+ query_case_randomization: Randomize Query Character Case.
+ priming: Initializing DNS resolver cache with Priming Queries (RFC 8109)
+ rebinding_protection: Protection against DNS Rebinding attack.
+ refuse_no_rd: Queries without RD (recursion desired) bit set in query are answered with REFUSED.
+ time_jump_detection: Detection of difference between local system time and expiration time bounds in DNSSEC signatures for '. NS' records.
+ violators_workarounds: Workarounds for known DNS protocol violators.
+ serve_stale: Allows using timed-out records in case DNS resolver is unable to contact upstream servers.
+ prediction: Helps keep the cache hot by prefetching expiring records and learning usage patterns and repetitive queries.
+ """
+
+ glue_checking: GlueCheckingEnum = "normal"
+ minimize: bool = True
+ query_loopback: bool = False
+ reorder_rrset: bool = True
+ query_case_randomization: bool = True
+ priming: bool = True
+ rebinding_protection: bool = False
+ refuse_no_rd: bool = True
+ time_jump_detection: bool = True
+ violators_workarounds: bool = False
+ serve_stale: bool = False
+ prediction: Union[bool, PredictionSchema] = False
+
+ _LAYER = Raw
+
+ glue_checking: GlueCheckingEnum
+ minimize: bool
+ query_loopback: bool
+ reorder_rrset: bool
+ query_case_randomization: bool
+ priming: bool
+ rebinding_protection: bool
+ refuse_no_rd: bool
+ time_jump_detection: bool
+ violators_workarounds: bool
+ serve_stale: bool
+ prediction: Union[Literal[False], PredictionSchema]
+
+ def _prediction(self, obj: Raw) -> Any:
+ if obj.prediction is True:
+ return PredictionSchema()
+ return obj.prediction
diff --git a/manager/knot_resolver_manager/datamodel/policy_schema.py b/manager/knot_resolver_manager/datamodel/policy_schema.py
new file mode 100644
index 00000000..bbc61cd1
--- /dev/null
+++ b/manager/knot_resolver_manager/datamodel/policy_schema.py
@@ -0,0 +1,126 @@
+from typing import List, Optional, Union
+
+from knot_resolver_manager.datamodel.forward_schema import ForwardServerSchema
+from knot_resolver_manager.datamodel.network_schema import AddressRenumberingSchema
+from knot_resolver_manager.datamodel.types import (
+ DNSRecordTypeEnum,
+ IPAddressOptionalPort,
+ PolicyActionEnum,
+ PolicyFlagEnum,
+ TimeUnit,
+)
+from knot_resolver_manager.utils.modeling import ConfigSchema
+
+
+class FilterSchema(ConfigSchema):
+ """
+ Query filtering configuration.
+
+ ---
+ suffix: Filter based on the suffix of the query name.
+ pattern: Filter based on the pattern that match query name.
+ qtype: Filter based on the DNS query type.
+ """
+
+ suffix: Optional[str] = None
+ pattern: Optional[str] = None
+ qtype: Optional[DNSRecordTypeEnum] = None
+
+
+class AnswerSchema(ConfigSchema):
+ """
+ Configuration of custom resource record for DNS answer.
+
+ ---
+ rtype: Type of DNS resource record.
+ rdata: Data of DNS resource record.
+ ttl: Time-to-live value for defined answer.
+ nodata: Answer with NODATA If requested type is not configured in the answer. Otherwise policy rule is ignored.
+ """
+
+ rtype: DNSRecordTypeEnum
+ rdata: str
+ ttl: TimeUnit = TimeUnit("1s")
+ nodata: bool = False
+
+
+def _validate_policy_action(policy_action: Union["ActionSchema", "PolicySchema"]) -> None:
+ servers = ["mirror", "forward", "stub"]
+
+ def _field(ac: str) -> str:
+ if ac in servers:
+ return "servers"
+ return "message" if ac == "deny" else ac
+
+ configurable_actions = ["deny", "reroute", "answer"] + servers
+
+ # checking for missing mandatory fields for actions
+ field = _field(policy_action.action)
+ if policy_action.action in configurable_actions and not getattr(policy_action, field):
+ raise ValueError(f"missing mandatory field '{field}' for '{policy_action.action}' action")
+
+ # checking for unnecessary fields
+ for ac in configurable_actions + ["deny"]:
+ field = _field(ac)
+ if getattr(policy_action, field) and _field(policy_action.action) != field:
+ raise ValueError(f"'{field}' field can only be defined for '{ac}' action")
+
+ # ForwardServerSchema is valid only for 'forward' action
+ if policy_action.servers:
+ for server in policy_action.servers: # pylint: disable=not-an-iterable
+ if policy_action.action != "forward" and isinstance(server, ForwardServerSchema):
+ raise ValueError(
+ f"'ForwardServerSchema' in 'servers' is valid only for 'forward' action, got '{policy_action.action}'"
+ )
+
+
+class ActionSchema(ConfigSchema):
+ """
+ Configuration of policy action.
+
+ ---
+ action: Policy action.
+ message: Deny message for 'deny' action.
+ reroute: Configuration for 'reroute' action.
+ answer: Answer definition for 'answer' action.
+ servers: Servers configuration for 'mirror', 'forward' and 'stub' action.
+ """
+
+ action: PolicyActionEnum
+ message: Optional[str] = None
+ reroute: Optional[List[AddressRenumberingSchema]] = None
+ answer: Optional[AnswerSchema] = None
+ servers: Optional[Union[List[IPAddressOptionalPort], List[ForwardServerSchema]]] = None
+
+ def _validate(self) -> None:
+ _validate_policy_action(self)
+
+
+class PolicySchema(ConfigSchema):
+ """
+ Configuration of policy rule.
+
+ ---
+ action: Policy rule action.
+ priority: Policy rule priority.
+ filter: Query filtering configuration.
+ views: Use policy rule only for clients defined by views.
+ options: Configuration flags for policy rule.
+ message: Deny message for 'deny' action.
+ reroute: Configuration for 'reroute' action.
+ answer: Answer definition for 'answer' action.
+ servers: Servers configuration for 'mirror', 'forward' and 'stub' action.
+ """
+
+ action: PolicyActionEnum
+ priority: Optional[int] = None
+ filter: Optional[FilterSchema] = None
+ views: Optional[List[str]] = None
+ options: Optional[List[PolicyFlagEnum]] = None
+ message: Optional[str] = None
+ reroute: Optional[List[AddressRenumberingSchema]] = None
+ answer: Optional[AnswerSchema] = None
+ servers: Optional[Union[List[IPAddressOptionalPort], List[ForwardServerSchema]]] = None
+
+ def _validate(self) -> None:
+ _validate_policy_action(self)
diff --git a/manager/knot_resolver_manager/datamodel/rpz_schema.py b/manager/knot_resolver_manager/datamodel/rpz_schema.py
new file mode 100644
index 00000000..633e34a5
--- /dev/null
+++ b/manager/knot_resolver_manager/datamodel/rpz_schema.py
@@ -0,0 +1,29 @@
+from typing import List, Optional
+
+from knot_resolver_manager.datamodel.types import File, PolicyActionEnum, PolicyFlagEnum
+from knot_resolver_manager.utils.modeling import ConfigSchema
+
+
+class RPZSchema(ConfigSchema):
+ """
+ Configuration or Response Policy Zone (RPZ).
+
+ ---
+ action: RPZ rule action, typically 'deny'.
+ file: Path to the RPZ zone file.
+ watch: Reload the file when it changes.
+ views: Use RPZ rule only for clients defined by views.
+ options: Configuration flags for RPZ rule.
+ message: Deny message for 'deny' action.
+ """
+
+ action: PolicyActionEnum
+ file: File
+ watch: bool = True
+ views: Optional[List[str]] = None
+ options: Optional[List[PolicyFlagEnum]] = None
+ message: Optional[str] = None
+
+ def _validate(self) -> None:
+ if self.message and not self.action == "deny":
+ raise ValueError("'message' field can only be defined for 'deny' action")
diff --git a/manager/knot_resolver_manager/datamodel/slice_schema.py b/manager/knot_resolver_manager/datamodel/slice_schema.py
new file mode 100644
index 00000000..0c7cdea1
--- /dev/null
+++ b/manager/knot_resolver_manager/datamodel/slice_schema.py
@@ -0,0 +1,21 @@
+from typing import List, Optional
+
+from typing_extensions import Literal
+
+from knot_resolver_manager.datamodel.policy_schema import ActionSchema
+from knot_resolver_manager.utils.modeling import ConfigSchema
+
+
+class SliceSchema(ConfigSchema):
+ """
+ Split the entire DNS namespace into distinct slices.
+
+ ---
+ function: Slicing function that returns index based on query
+ views: Use this Slice only for clients defined by views.
+ actions: Actions for slice.
+ """
+
+ function: Literal["randomize-psl"] = "randomize-psl"
+ views: Optional[List[str]] = None
+ actions: List[ActionSchema]
diff --git a/manager/knot_resolver_manager/datamodel/static_hints_schema.py b/manager/knot_resolver_manager/datamodel/static_hints_schema.py
new file mode 100644
index 00000000..7d39fcf4
--- /dev/null
+++ b/manager/knot_resolver_manager/datamodel/static_hints_schema.py
@@ -0,0 +1,27 @@
+from typing import Dict, List, Optional
+
+from knot_resolver_manager.datamodel.types import DomainName, File, IPAddress, TimeUnit
+from knot_resolver_manager.utils.modeling import ConfigSchema
+
+
+class StaticHintsSchema(ConfigSchema):
+ """
+ Static hints for forward records (A/AAAA) and reverse records (PTR)
+
+ ---
+ ttl: TTL value used for records added from static hints.
+ nodata: Use NODATA synthesis. NODATA will be synthesised for matching hint name, but mismatching type.
+ etc_hosts: Add hints from '/etc/hosts' file.
+ root_hints: Direct addition of root hints pairs (hostname, list of addresses).
+ root_hints_file: Path to root hints in zonefile. Replaces all current root hints.
+ hints: Direct addition of hints pairs (hostname, list of addresses).
+ hints_files: Path to hints in hosts-like file.
+ """
+
+ ttl: Optional[TimeUnit] = None
+ nodata: bool = True
+ etc_hosts: bool = False
+ root_hints: Optional[Dict[DomainName, List[IPAddress]]] = None
+ root_hints_file: Optional[File] = None
+ hints: Optional[Dict[DomainName, List[IPAddress]]] = None
+ hints_files: Optional[List[File]] = None
diff --git a/manager/knot_resolver_manager/datamodel/stub_zone_schema.py b/manager/knot_resolver_manager/datamodel/stub_zone_schema.py
new file mode 100644
index 00000000..b9945ecc
--- /dev/null
+++ b/manager/knot_resolver_manager/datamodel/stub_zone_schema.py
@@ -0,0 +1,32 @@
+from typing import List, Optional, Union
+
+from knot_resolver_manager.datamodel.types import DomainName, IPAddressOptionalPort, PolicyFlagEnum
+from knot_resolver_manager.utils.modeling import ConfigSchema
+
+
+class StubServerSchema(ConfigSchema):
+ """
+ Configuration of Stub server.
+
+ ---
+ address: IP address of Stub server.
+ """
+
+ address: IPAddressOptionalPort
+
+
+class StubZoneSchema(ConfigSchema):
+ """
+ Configuration of Stub Zone.
+
+ ---
+ subtree: Domain name of the zone.
+ servers: IP address of Stub server.
+ views: Use this Stub Zone only for clients defined by views.
+ options: Configuration flags for Stub Zone.
+ """
+
+ subtree: DomainName
+ servers: Union[List[IPAddressOptionalPort], List[StubServerSchema]]
+ views: Optional[List[str]] = None
+ options: Optional[List[PolicyFlagEnum]] = None
diff --git a/manager/knot_resolver_manager/datamodel/templates/cache.lua.j2 b/manager/knot_resolver_manager/datamodel/templates/cache.lua.j2
new file mode 100644
index 00000000..35715771
--- /dev/null
+++ b/manager/knot_resolver_manager/datamodel/templates/cache.lua.j2
@@ -0,0 +1,18 @@
+cache.open({{ cfg.cache.size_max.bytes() }}, 'lmdb://{{ cfg.cache.storage }}')
+cache.min_ttl({{ cfg.cache.ttl_min.seconds() }})
+cache.max_ttl({{ cfg.cache.ttl_max.seconds() }})
+cache.ns_tout({{ cfg.cache.ns_timeout.millis() }})
+
+{% if cfg.cache.prefill %}
+-- cache.prefill
+modules.load('prefill')
+prefill.config({
+{% for item in cfg.cache.prefill %}
+ ['{{ item.origin.punycode() }}'] = {
+ url = '{{ item.url }}',
+ interval = {{ item.refresh_interval.seconds() }}
+ {{ "ca_file = '"+item.ca_file+"'," if item.ca_file }}
+ }
+{% endfor %}
+})
+{% endif %} \ No newline at end of file
diff --git a/manager/knot_resolver_manager/datamodel/templates/config.lua.j2 b/manager/knot_resolver_manager/datamodel/templates/config.lua.j2
new file mode 100644
index 00000000..442354ab
--- /dev/null
+++ b/manager/knot_resolver_manager/datamodel/templates/config.lua.j2
@@ -0,0 +1,70 @@
+{% if not cfg.lua.script_only %}
+
+-- FFI library
+ffi = require('ffi')
+local C = ffi.C
+
+-- hostname
+hostname('{{ cfg.hostname }}')
+
+{% if cfg.nsid %}
+-- nsid
+modules.load('nsid')
+nsid.name('{{ cfg.nsid }}_' .. worker.id)
+{% endif %}
+
+-- LOGGING section ----------------------------------
+{% include "logging.lua.j2" %}
+
+-- MONITORING section -------------------------------
+{% include "monitoring.lua.j2" %}
+
+-- WEBMGMT section ----------------------------------
+{% include "webmgmt.lua.j2" %}
+
+-- OPTIONS section ----------------------------------
+{% include "options.lua.j2" %}
+
+-- NETWORK section ----------------------------------
+{% include "network.lua.j2" %}
+
+-- VIEWS section ------------------------------------
+{% include "views.lua.j2" %}
+
+-- LOCAL-DATA section -------------------------------
+{% include "local_data.lua.j2" %}
+
+-- SLICES section -----------------------------------
+{# {% include "slices.lua.j2" %} #}
+
+-- POLICY section -----------------------------------
+{# {% include "policy.lua.j2" %} #}
+
+-- RPZ section --------------------------------------
+{# {% include "rpz.lua.j2" %} #}
+
+-- FORWARD section ----------------------------------
+{% include "forward.lua.j2" %}
+
+-- CACHE section ------------------------------------
+{% include "cache.lua.j2" %}
+
+-- DNSSEC section -----------------------------------
+{% include "dnssec.lua.j2" %}
+
+-- DNS64 section ------------------------------------
+{% include "dns64.lua.j2" %}
+
+{% endif %}
+
+-- LUA section --------------------------------------
+-- Custom Lua code cannot be validated
+
+{% if cfg.lua.script_file %}
+{% import cfg.lua.script_file as script_file %}
+{{ script_file }}
+{% endif %}
+
+{% if cfg.lua.script %}
+{{ cfg.lua.script }}
+{% endif %}
diff --git a/manager/knot_resolver_manager/datamodel/templates/dns64.lua.j2 b/manager/knot_resolver_manager/datamodel/templates/dns64.lua.j2
new file mode 100644
index 00000000..d4fdf28f
--- /dev/null
+++ b/manager/knot_resolver_manager/datamodel/templates/dns64.lua.j2
@@ -0,0 +1,7 @@
+{% if cfg.dns64 %}
+-- load dns64 module
+modules.load('dns64')
+
+-- dns64.prefix
+dns64.config('{{ cfg.dns64.prefix.to_std().network_address|string }}')
+{% endif %} \ No newline at end of file
diff --git a/manager/knot_resolver_manager/datamodel/templates/dnssec.lua.j2 b/manager/knot_resolver_manager/datamodel/templates/dnssec.lua.j2
new file mode 100644
index 00000000..31a29bea
--- /dev/null
+++ b/manager/knot_resolver_manager/datamodel/templates/dnssec.lua.j2
@@ -0,0 +1,58 @@
+{% from 'macros/common_macros.lua.j2' import boolean %}
+
+{% if not cfg.dnssec %}
+-- disable dnssec
+trust_anchors.remove('.')
+{% endif %}
+
+-- options.trust-anchor-sentinel
+{% if cfg.dnssec.trust_anchor_sentinel %}
+modules.load('ta_sentinel')
+{% else %}
+modules.unload('ta_sentinel')
+{% endif %}
+
+-- options.trust-anchor-signal-query
+{% if cfg.dnssec.trust_anchor_signal_query %}
+modules.load('ta_signal_query')
+{% else %}
+modules.unload('ta_signal_query')
+{% endif %}
+
+-- options.time-skew-detection
+{% if cfg.dnssec.time_skew_detection %}
+modules.load('detect_time_skew')
+{% else %}
+modules.unload('detect_time_skew')
+{% endif %}
+
+-- dnssec.keep-removed
+trust_anchors.keep_removed = {{ cfg.dnssec.keep_removed }}
+
+{% if cfg.dnssec.refresh_time %}
+-- dnssec.refresh-time
+trust_anchors.refresh_time = {{ cfg.dnssec.refresh_time.seconds()|string }}
+{% endif %}
+
+{% if cfg.dnssec.trust_anchors %}
+-- dnssec.trust-anchors
+{% for ta in cfg.dnssec.trust_anchors %}
+trust_anchors.add('{{ ta }}')
+{% endfor %}
+{% endif %}
+
+{% if cfg.dnssec.negative_trust_anchors %}
+-- dnssec.negative-trust-anchors
+trust_anchors.set_insecure({
+{% for nta in cfg.dnssec.negative_trust_anchors %}
+ '{{ nta }}',
+{% endfor %}
+})
+{% endif %}
+
+{% if cfg.dnssec.trust_anchors_files %}
+-- dnssec.trust-anchors-files
+{% for taf in cfg.dnssec.trust_anchors_files %}
+trust_anchors.add_file('{{ taf.file }}', readonly = {{ boolean(taf.read_only) }})
+{% endfor %}
+{% endif %} \ No newline at end of file
diff --git a/manager/knot_resolver_manager/datamodel/templates/forward.lua.j2 b/manager/knot_resolver_manager/datamodel/templates/forward.lua.j2
new file mode 100644
index 00000000..afb2c492
--- /dev/null
+++ b/manager/knot_resolver_manager/datamodel/templates/forward.lua.j2
@@ -0,0 +1,7 @@
+{% from 'macros/forward_macros.lua.j2' import policy_rule_forward_add %}
+
+{% if cfg.forward %}
+{% for fwd in cfg.forward %}
+{{ policy_rule_forward_add(fwd) }}
+{% endfor %}
+{% endif %}
diff --git a/manager/knot_resolver_manager/datamodel/templates/forward_zones.lua.j2 b/manager/knot_resolver_manager/datamodel/templates/forward_zones.lua.j2
new file mode 100644
index 00000000..26e0a9e8
--- /dev/null
+++ b/manager/knot_resolver_manager/datamodel/templates/forward_zones.lua.j2
@@ -0,0 +1,72 @@
+{% from 'macros/policy_macros.lua.j2' import policy_flags, policy_add, policy_suffix, policy_todname, policy_forward, policy_tls_forward %}
+{% from 'macros/view_macros.lua.j2' import view_tsig, view_addr %}
+
+{% if cfg.forward_zones %}
+{% for zone in cfg.forward_zones %}
+-- forward-zone: {{ zone.subtree }}
+{% if zone.views -%}
+{# views set for forward-zone #}
+{% for view_id in zone.views -%}
+{%- set view = cfg.views[view_id] -%}
+
+{# merge options from view and forward-zone #}
+{%- set options = none -%}
+{% if zone.options and view.options -%}
+{% set options = zone.options|list + view.options|list %}
+{% elif zone.options %}
+{% set options = zone.options|list %}
+{% elif view.options %}
+{% set options = view.options|list %}
+{%- endif %}
+
+{# view tsig #}
+{% if view.tsig %}
+{% for tsig in view.tsig %}
+
+{%- if options -%}
+{{ view_tsig(tsig|string, policy_suffix(policy_flags(options|list), policy_todname(zone.subtree|string))) }}
+{%- endif %}
+
+{% if zone.tls -%}
+{{ view_tsig(tsig|string, policy_suffix(policy_tls_forward(zone.servers|list), policy_todname(zone.subtree|string))) }}
+{% else %}
+{{ view_tsig(tsig|string, policy_suffix(policy_forward(zone.servers|list), policy_todname(zone.subtree|string))) }}
+{%- endif %}
+
+{% endfor %}
+{% endif %}
+
+{# view addr #}
+{% if view.subnets %}
+{% for addr in view.subnets %}
+
+{%- if options -%}
+{{ view_addr(addr|string, policy_suffix(policy_flags(options|list), policy_todname(zone.subtree|string))) }}
+{%- endif %}
+
+{% if zone.tls -%}
+{{ view_addr(addr|string, policy_suffix(policy_tls_forward(zone.servers|list), policy_todname(zone.subtree|string))) }}
+{% else %}
+{{ view_addr(addr|string, policy_suffix(policy_forward(zone.servers|list), policy_todname(zone.subtree|string))) }}
+{%- endif %}
+
+{% endfor %}
+{% endif %}
+
+{% endfor %}
+{% else %}
+{# no views set for forward-zone #}
+
+{% if zone.options -%}
+{{ policy_add(policy_suffix(policy_flags(zone.options|list), policy_todname(zone.subtree|string))) }}
+{%- endif %}
+
+{% if zone.tls -%}
+{{ policy_add(policy_suffix(policy_tls_forward(zone.servers|list), policy_todname(zone.subtree|string))) }}
+{% else %}
+{{ policy_add(policy_suffix(policy_forward(zone.servers|list), policy_todname(zone.subtree|string))) }}
+{%- endif %}
+
+{% endif %}
+{% endfor %}
+{% endif %}
diff --git a/manager/knot_resolver_manager/datamodel/templates/local_data.lua.j2 b/manager/knot_resolver_manager/datamodel/templates/local_data.lua.j2
new file mode 100644
index 00000000..4764041f
--- /dev/null
+++ b/manager/knot_resolver_manager/datamodel/templates/local_data.lua.j2
@@ -0,0 +1,51 @@
+{% from 'macros/local_data_macros.lua.j2' import local_data_subtree_root, local_data_records, local_data_root_fallback_addresses, local_data_root_fallback_addresses_files, local_data_addresses, local_data_addresses_files %}
+
+
+modules = { 'hints > iterate' }
+
+{# root-fallback-addresses #}
+{% if cfg.local_data.root_fallback_addresses -%}
+{{ local_data_root_fallback_addresses(cfg.local_data.root_fallback_addresses) }}
+{%- endif %}
+
+{# root-fallback-addresses-files #}
+{% if cfg.local_data.root_fallback_addresses_files -%}
+{{ local_data_root_fallback_addresses_files(cfg.local_data.root_fallback_addresses_files) }}
+{%- endif %}
+
+{# addresses #}
+{% if cfg.local_data.addresses -%}
+{{ local_data_addresses(cfg.local_data.addresses) }}
+{%- endif %}
+
+{# addresses-files #}
+{% if cfg.local_data.addresses_files -%}
+{{ local_data_addresses_files(cfg.local_data.addresses_files) }}
+{%- endif %}
+
+{# records #}
+{% if cfg.local_data.records -%}
+{{ local_data_records(cfg.local_data.records, false, cfg.local_data.ttl, cfg.local_data.nodata) }}
+{%- endif %}
+
+{# subtrees #}
+{% if cfg.local_data.subtrees -%}
+{% for subtree in cfg.local_data.subtrees %}
+{% if subtree.roots -%}
+{% for root in subtree.roots %}
+{{ local_data_subtree_root(subtree.type, root, subtree.tags) }}
+{% endfor %}
+{%- elif subtree.roots_file -%}
+{# TODO: not implemented yet #}
+{%- elif subtree.roots_url -%}
+{# TODO: not implemented yet #}
+{%- endif %}
+{% endfor %}
+{%- endif %}
+
+{# rpz #}
+{% if cfg.local_data.rpz -%}
+{% for rpz in cfg.local_data.rpz %}
+{{ local_data_records(rpz.file, true, cfg.local_data.ttl, cfg.local_data.nodata, rpz.tags) }}
+{% endfor %}
+{%- endif %}
diff --git a/manager/knot_resolver_manager/datamodel/templates/logging.lua.j2 b/manager/knot_resolver_manager/datamodel/templates/logging.lua.j2
new file mode 100644
index 00000000..2fb398e9
--- /dev/null
+++ b/manager/knot_resolver_manager/datamodel/templates/logging.lua.j2
@@ -0,0 +1,43 @@
+{% from 'macros/common_macros.lua.j2' import boolean %}
+
+-- logging.level
+log_level('{{ cfg.logging.level }}')
+
+{% if cfg.logging.target -%}
+-- logging.target
+log_target('{{ cfg.logging.target }}')
+{%- endif %}
+
+{% if cfg.logging.groups %}
+-- logging.groups
+log_groups({
+{% for g in cfg.logging.groups %}
+{% if g != "manager" and g != "supervisord" %}
+ '{{ g }}',
+{% endif %}
+{% endfor %}
+})
+{% endif %}
+
+{% if cfg.logging.dnssec_bogus %}
+modules.load('bogus_log')
+{% endif %}
+
+{% if cfg.logging.dnstap -%}
+-- logging.dnstap
+modules.load('dnstap')
+dnstap.config({
+ socket_path = '{{ cfg.logging.dnstap.unix_socket }}',
+ client = {
+ log_queries = {{ boolean(cfg.logging.dnstap.log_queries) }},
+ log_responses = {{ boolean(cfg.logging.dnstap.log_responses) }},
+ log_tcp_rtt = {{ boolean(cfg.logging.dnstap.log_tcp_rtt) }}
+ }
+})
+{%- endif %}
+
+-- logging.debugging.assertion-abort
+debugging.assertion_abort = {{ boolean(cfg.logging.debugging.assertion_abort) }}
+
+-- logging.debugging.assertion-fork
+debugging.assertion_fork = {{ cfg.logging.debugging.assertion_fork.millis() }}
diff --git a/manager/knot_resolver_manager/datamodel/templates/macros/common_macros.lua.j2 b/manager/knot_resolver_manager/datamodel/templates/macros/common_macros.lua.j2
new file mode 100644
index 00000000..4c2ba11a
--- /dev/null
+++ b/manager/knot_resolver_manager/datamodel/templates/macros/common_macros.lua.j2
@@ -0,0 +1,101 @@
+{% macro quotes(string) -%}
+'{{ string }}'
+{%- endmacro %}
+
+{% macro boolean(val, negation=false) -%}
+{%- if negation -%}
+{{ 'false' if val else 'true' }}
+{%- else-%}
+{{ 'true' if val else 'false' }}
+{%- endif -%}
+{%- endmacro %}
+
+{# Return string or table of strings #}
+{% macro string_table(table) -%}
+{%- if table is string -%}
+'{{ table|string }}'
+{%- else-%}
+{
+{%- for item in table -%}
+'{{ item|string }}',
+{%- endfor -%}
+}
+{%- endif -%}
+{%- endmacro %}
+
+{# Return str2ip or table of str2ip #}
+{% macro str2ip_table(table) -%}
+{%- if table is string -%}
+kres.str2ip('{{ table|string }}')
+{%- else-%}
+{
+{%- for item in table -%}
+kres.str2ip('{{ item|string }}'),
+{%- endfor -%}
+}
+{%- endif -%}
+{%- endmacro %}
+
+{# Return qtype or table of qtype #}
+{% macro qtype_table(table) -%}
+{%- if table is string -%}
+kres.type.{{ table|string }}
+{%- else-%}
+{
+{%- for item in table -%}
+kres.type.{{ item|string }},
+{%- endfor -%}
+}
+{%- endif -%}
+{%- endmacro %}
+
+{# Return server address or table of server addresses #}
+{% macro servers_table(servers) -%}
+{%- if servers is string -%}
+'{{ servers|string }}'
+{%- else-%}
+{
+{%- for item in servers -%}
+{%- if item.address -%}
+'{{ item.address|string }}',
+{%- else -%}
+'{{ item|string }}',
+{%- endif -%}
+{%- endfor -%}
+}
+{%- endif -%}
+{%- endmacro %}
+
+{# Return server address or table of server addresses #}
+{% macro tls_servers_table(servers) -%}
+{
+{%- for item in servers -%}
+{%- if item.address -%}
+{'{{ item.address|string }}',{{ tls_server_auth(item) }}},
+{%- else -%}
+'{{ item|string }}',
+{%- endif -%}
+{%- endfor -%}
+}
+{%- endmacro %}
+
+{% macro tls_server_auth(server) -%}
+{%- if server.hostname -%}
+hostname='{{ server.hostname|string }}',
+{%- endif -%}
+{%- if server.ca_file -%}
+ca_file='{{ server.ca_file|string }}',
+{%- endif -%}
+{%- if server.pin_sha256 -%}
+pin_sha256=
+{%- if server.pin_sha256 is string -%}
+'{{ server.pin_sha256|string }}',
+{%- else -%}
+{
+{%- for pin in server.pin_sha256 -%}
+'{{ pin|string }}',
+{%- endfor -%}
+}
+{%- endif -%}
+{%- endif -%}
+{%- endmacro %}
diff --git a/manager/knot_resolver_manager/datamodel/templates/macros/forward_macros.lua.j2 b/manager/knot_resolver_manager/datamodel/templates/macros/forward_macros.lua.j2
new file mode 100644
index 00000000..f6777324
--- /dev/null
+++ b/manager/knot_resolver_manager/datamodel/templates/macros/forward_macros.lua.j2
@@ -0,0 +1,42 @@
+{% from 'macros/common_macros.lua.j2' import boolean, string_table %}
+
+{% macro forward_options(options) -%}
+{dnssec={{ boolean(options.dnssec) }},auth={{ boolean(options.authoritative) }}}
+{%- endmacro %}
+
+{% macro forward_server(server) -%}
+{%- if server.address -%}
+{%- for addr in server.address -%}
+{'{{ addr }}',
+{%- if server.transport == 'tls' -%}
+tls=true,
+{%- else -%}
+tls=false,
+{%- endif -%}
+{%- if server.hostname -%}
+hostname='{{ server.hostname }}',
+{%- endif -%}
+{%- if server.pin_sha256 -%}
+pin_sha256={{ string_table(server.pin_sha256) }},
+{%- endif -%}
+{%- if server.ca_file -%}
+ca_file='{{ server.ca_file }}',
+{%- endif -%}
+},
+{%- endfor -%}
+{% else %}
+{'{{ server }}'},
+{%- endif -%}
+{%- endmacro %}
+
+{% macro forward_servers(servers) -%}
+{
+{%- for server in servers -%}
+{{ forward_server(server) }}
+{%- endfor -%}
+}
+{%- endmacro %}
+
+{% macro policy_rule_forward_add(forward) -%}
+policy.rule_forward_add('{{ forward.subtree }}',{{ forward_options(forward.options) }},{{ forward_servers(forward.servers) }})
+{%- endmacro %}
diff --git a/manager/knot_resolver_manager/datamodel/templates/macros/local_data_macros.lua.j2 b/manager/knot_resolver_manager/datamodel/templates/macros/local_data_macros.lua.j2
new file mode 100644
index 00000000..c3104b7d
--- /dev/null
+++ b/manager/knot_resolver_manager/datamodel/templates/macros/local_data_macros.lua.j2
@@ -0,0 +1,75 @@
+{% from 'macros/common_macros.lua.j2' import string_table, boolean %}
+{% from 'macros/policy_macros.lua.j2' import policy_get_tagset, policy_todname %}
+
+
+{% macro local_data_root_fallback_addresses(pairs) -%}
+hints.root({
+{% for name, addresses in pairs.items() %}
+ ['{{ name }}']={{ string_table(addresses) }},
+{% endfor %}
+})
+{%- endmacro %}
+
+
+{% macro local_data_root_fallback_addresses_files(files) -%}
+{% for file in files %}
+hints.root_file('{{ file }}')
+{% endfor %}
+{%- endmacro %}
+
+
+{% macro local_data_addresses(pairs) -%}
+{% for name, address in pairs.items() %}
+assert(hints.set('{{ name }} {{ address }}').result == true)
+{% endfor %}
+{%- endmacro %}
+
+
+{% macro local_data_addresses_files(files) -%}
+{% for file in files %}
+assert(hints.add_hosts('{{ file }}').result == true)
+{% endfor %}
+{%- endmacro %}
+
+
+{% macro local_data_records(input_str, is_rpz, ttl, nodata, tags=none, id='rrs') -%}
+{{ id }} = ffi.new('struct kr_rule_zonefile_config')
+{% if ttl %}
+{{ id }}.ttl = {{ ttl.millis() }}
+{% endif %}
+{% if tags %}
+{{ id }}.tags = {{ policy_get_tagset(tags) }}
+{% endif %}
+{{ id }}.nodata = {{ boolean(nodata) }}
+{{ id }}.is_rpz = {{ boolean(is_rpz) }}
+{% if is_rpz -%}
+{{ id }}.filename = '{{ input_str }}'
+{% else %}
+{{ id }}.input_str = [[
+{{ input_str }}]]
+{% endif %}
+assert(C.kr_rule_zonefile({{ id }})==0)
+{%- endmacro %}
+
+{% macro local_data_emptyzone(dname, tags) -%}
+assert(C.kr_rule_local_data_emptyzone({{ dname }},{{ tags }})==0)
+{%- endmacro %}
+
+{% macro local_data_nxdomain(dname, tags) -%}
+assert(C.kr_rule_local_data_nxdomain({{ dname }},{{ tags }})==0)
+{%- endmacro %}
+
+{% macro local_data_subtree_root(type, root, tags) -%}
+{%- if tags -%}
+{%- set get_tags = policy_get_tagset(tags) -%}
+{%- else -%}
+{%- set get_tags = '0' -%}
+{%- endif -%}
+{%- if type == 'empty' -%}
+{{ local_data_emptyzone(policy_todname(root), get_tags) }}
+{%- elif type == 'nxdomain' -%}
+{{ local_data_nxdomain(policy_todname(root), get_tags) }}
+{%- else -%}
+{# TODO: implement other possible types #}
+{%- endif -%}
+{%- endmacro %}
diff --git a/manager/knot_resolver_manager/datamodel/templates/macros/network_macros.lua.j2 b/manager/knot_resolver_manager/datamodel/templates/macros/network_macros.lua.j2
new file mode 100644
index 00000000..ff78fbd8
--- /dev/null
+++ b/manager/knot_resolver_manager/datamodel/templates/macros/network_macros.lua.j2
@@ -0,0 +1,55 @@
+{% macro http_config(http_cfg, kind, tls=true) -%}
+http.config({tls={{ 'true' if tls else 'false'}},
+{%- if http_cfg.cert_file -%}
+ cert='{{ http_cfg.cert_file }}',
+{%- endif -%}
+{%- if http_cfg.key_file -%}
+ key='{{ http_cfg.key_file }}',
+{%- endif -%}
+},'{{ kind }}')
+{%- endmacro %}
+
+
+{% macro listen_kind(kind) -%}
+{%- if kind == "dot" -%}
+'tls'
+{%- elif kind == "doh-legacy" -%}
+'doh_legacy'
+{%- else -%}
+'{{ kind }}'
+{%- endif -%}
+{%- endmacro %}
+
+
+{% macro net_listen_unix_socket(path, kind, freebind) -%}
+net.listen('{{ path }}',nil,{kind={{ listen_kind(kind) }},freebind={{ 'true' if freebind else 'false'}}})
+{%- endmacro %}
+
+
+{% macro net_listen_interface(interface, kind, freebind, port) -%}
+net.listen(
+{%- if interface.addr -%}
+'{{ interface.addr }}',
+{%- elif interface.if_name -%}
+net.{{ interface.if_name }},
+{%- endif -%}
+{%- if interface.port -%}
+{{ interface.port }},
+{%- else -%}
+{{ port }},
+{%- endif -%}
+{kind={{ listen_kind(kind) }},freebind={{ 'true' if freebind else 'false'}}})
+{%- endmacro %}
+
+
+{% macro network_listen(listen) -%}
+{%- if listen.unix_socket -%}
+{% for path in listen.unix_socket %}
+{{ net_listen_unix_socket(path, listen.kind, listen.freebind) }}
+{% endfor %}
+{%- elif listen.interface -%}
+{% for interface in listen.interface %}
+{{ net_listen_interface(interface, listen.kind, listen.freebind, listen.port) }}
+{% endfor %}
+{%- endif -%}
+{%- endmacro %} \ No newline at end of file
diff --git a/manager/knot_resolver_manager/datamodel/templates/macros/policy_macros.lua.j2 b/manager/knot_resolver_manager/datamodel/templates/macros/policy_macros.lua.j2
new file mode 100644
index 00000000..36ce102f
--- /dev/null
+++ b/manager/knot_resolver_manager/datamodel/templates/macros/policy_macros.lua.j2
@@ -0,0 +1,275 @@
+{% from 'macros/common_macros.lua.j2' import string_table, str2ip_table, qtype_table, servers_table, tls_servers_table %}
+
+
+{# Add policy #}
+
+{% macro policy_add(rule, postrule=false) -%}
+{%- if postrule -%}
+policy.add({{ rule }},true)
+{%- else -%}
+policy.add({{ rule }})
+{%- endif -%}
+{%- endmacro %}
+
+
+{# Slice #}
+
+{% macro policy_slice_randomize_psl(seed='') -%}
+{%- if seed == '' -%}
+policy.slice_randomize_psl()
+{%- else -%}
+policy.slice_randomize_psl(seed={{ seed }})
+{%- endif -%}
+{%- endmacro %}
+
+{% macro policy_slice(func, actions) -%}
+policy.slice(
+{%- if func == 'randomize-psl' -%}
+policy.slice_randomize_psl()
+{%- else -%}
+policy.slice_randomize_psl()
+{%- endif -%}
+,{{ actions }})
+{%- endmacro %}
+
+
+{# Flags #}
+
+{% macro policy_flags(flags) -%}
+policy.FLAGS({
+{{- flags -}}
+})
+{%- endmacro %}
+
+
+{# Tags assign #}
+
+{% macro policy_tags_assign(tags) -%}
+policy.TAGS_ASSIGN({{ string_table(tags) }})
+{%- endmacro %}
+
+{% macro policy_get_tagset(tags) -%}
+policy.get_tagset({{ string_table(tags) }})
+{%- endmacro %}
+
+
+{# Filters #}
+
+{% macro policy_all(action) -%}
+policy.all({{ action }})
+{%- endmacro %}
+
+{% macro policy_suffix(action, suffix_table) -%}
+policy.suffix({{ action }},{{ suffix_table }})
+{%- endmacro %}
+
+{% macro policy_suffix_common(action, suffix_table, common_suffix=none) -%}
+policy.suffix_common({{ action }},{{ suffix_table }}
+{%- if common_suffix -%}
+,{{ common_suffix }}
+{%- endif -%}
+)
+{%- endmacro %}
+
+{% macro policy_pattern(action, pattern) -%}
+policy.pattern({{ action }},'{{ pattern }}')
+{%- endmacro %}
+
+{% macro policy_rpz(action, path, watch=true) -%}
+policy.rpz({{ action|string }},'{{ path|string }}',{{ 'true' if watch else 'false' }})
+{%- endmacro %}
+
+
+{# Custom filters #}
+
+{% macro declare_policy_qtype_custom_filter() -%}
+function policy_qtype(action, qtype)
+
+ local function has_value (tab, val)
+ for index, value in ipairs(tab) do
+ if value == val then
+ return true
+ end
+ end
+
+ return false
+ end
+
+ return function (state, query)
+ if query.stype == qtype then
+ return action
+ elseif has_value(qtype, query.stype) then
+ return action
+ else
+ return nil
+ end
+ end
+end
+{%- endmacro %}
+
+{% macro policy_qtype_custom_filter(action, qtype) -%}
+policy_qtype({{ action }}, {{ qtype }})
+{%- endmacro %}
+
+
+{# Auto Filter #}
+
+{% macro policy_auto_filter(action, filter=none) -%}
+{%- if filter.suffix -%}
+{{ policy_suffix(action, policy_todname(filter.suffix)) }}
+{%- elif filter.pattern -%}
+{{ policy_pattern(action, filter.pattern) }}
+{%- elif filter.qtype -%}
+{{ policy_qtype_custom_filter(action, qtype_table(filter.qtype)) }}
+{%- else -%}
+{{ policy_all(action) }}
+{%- endif %}
+{%- endmacro %}
+
+
+{# Non-chain actions #}
+
+{% macro policy_pass() -%}
+policy.PASS
+{%- endmacro %}
+
+{% macro policy_deny() -%}
+policy.DENY
+{%- endmacro %}
+
+{% macro policy_deny_msg(message) -%}
+policy.DENY_MSG('{{ message|string }}')
+{%- endmacro %}
+
+{% macro policy_drop() -%}
+policy.DROP
+{%- endmacro %}
+
+{% macro policy_refuse() -%}
+policy.REFUSE
+{%- endmacro %}
+
+{% macro policy_tc() -%}
+policy.TC
+{%- endmacro %}
+
+{% macro policy_reroute(reroute) -%}
+policy.REROUTE(
+{%- for item in reroute -%}
+{['{{ item.source }}']='{{ item.destination }}'},
+{%- endfor -%}
+)
+{%- endmacro %}
+
+{% macro policy_answer(answer) -%}
+policy.ANSWER({[kres.type.{{ answer.rtype }}]={rdata=
+{%- if answer.rtype in ['A','AAAA'] -%}
+{{ str2ip_table(answer.rdata) }},
+{%- elif answer.rtype == '' -%}
+{# TODO: Do the same for other record types that require a special rdata type in Lua.
+By default, the raw string from config is used. #}
+{%- else -%}
+{{ string_table(answer.rdata) }},
+{%- endif -%}
+ttl={{ answer.ttl.seconds()|int }}}},{{ 'true' if answer.nodata else 'false' }})
+{%- endmacro %}
+
+{# policy.ANSWER( { [kres.type.A] = { rdata=kres.str2ip('192.0.2.7'), ttl=300 }}) #}
+
+{# Chain actions #}
+
+{% macro policy_mirror(mirror) -%}
+policy.MIRROR(
+{% if mirror is string %}
+'{{ mirror }}'
+{% else %}
+{
+{%- for addr in mirror -%}
+'{{ addr }}',
+{%- endfor -%}
+}
+{%- endif -%}
+)
+{%- endmacro %}
+
+{% macro policy_debug_always() -%}
+policy.DEBUG_ALWAYS
+{%- endmacro %}
+
+{% macro policy_debug_cache_miss() -%}
+policy.DEBUG_CACHE_MISS
+{%- endmacro %}
+
+{% macro policy_qtrace() -%}
+policy.QTRACE
+{%- endmacro %}
+
+{% macro policy_reqtrace() -%}
+policy.REQTRACE
+{%- endmacro %}
+
+{% macro policy_stub(servers) -%}
+policy.STUB({{ servers_table(servers) }})
+{%- endmacro %}
+
+{% macro policy_forward(servers) -%}
+policy.FORWARD({{ servers_table(servers) }})
+{%- endmacro %}
+
+{% macro policy_tls_forward(servers) -%}
+policy.TLS_FORWARD({{ tls_servers_table(servers) }})
+{%- endmacro %}
+
+
+{# Auto action #}
+
+{% macro policy_auto_action(rule) -%}
+{%- if rule.action == 'pass' -%}
+{{ policy_pass() }}
+{%- elif rule.action == 'deny' -%}
+{%- if rule.message -%}
+{{ policy_deny_msg(rule.message) }}
+{%- else -%}
+{{ policy_deny() }}
+{%- endif -%}
+{%- elif rule.action == 'drop' -%}
+{{ policy_drop() }}
+{%- elif rule.action == 'refuse' -%}
+{{ policy_refuse() }}
+{%- elif rule.action == 'tc' -%}
+{{ policy_tc() }}
+{%- elif rule.action == 'reroute' -%}
+{{ policy_reroute(rule.reroute) }}
+{%- elif rule.action == 'answer' -%}
+{{ policy_answer(rule.answer) }}
+{%- elif rule.action == 'mirror' -%}
+{{ policy_mirror(rule.mirror) }}
+{%- elif rule.action == 'debug-always' -%}
+{{ policy_debug_always() }}
+{%- elif rule.action == 'debug-cache-miss' -%}
+{{ policy_sebug_cache_miss() }}
+{%- elif rule.action == 'qtrace' -%}
+{{ policy_qtrace() }}
+{%- elif rule.action == 'reqtrace' -%}
+{{ policy_reqtrace() }}
+{%- endif -%}
+{%- endmacro %}
+
+
+{# Other #}
+
+{% macro policy_todname(name) -%}
+todname('{{ name.punycode()|string }}')
+{%- endmacro %}
+
+{% macro policy_todnames(names) -%}
+policy.todnames({
+{%- if names is string -%}
+'{{ names.punycode()|string }}'
+{%- else -%}
+{%- for name in names -%}
+'{{ name.punycode()|string }}',
+{%- endfor -%}
+{%- endif -%}
+})
+{%- endmacro %} \ No newline at end of file
diff --git a/manager/knot_resolver_manager/datamodel/templates/macros/view_macros.lua.j2 b/manager/knot_resolver_manager/datamodel/templates/macros/view_macros.lua.j2
new file mode 100644
index 00000000..efd03211
--- /dev/null
+++ b/manager/knot_resolver_manager/datamodel/templates/macros/view_macros.lua.j2
@@ -0,0 +1,22 @@
+{% macro view_insert_action(subnet, action) -%}
+assert(C.kr_view_insert_action('{{ subnet }}',{{ action }})==0)
+{%- endmacro %}
+
+{% macro view_flags(options) -%}
+{% if not options.minimize -%}
+"NO_MINIMIZE",
+{%- endif %}
+{% if not options.dns64 -%}
+"DNS64_DISABLE",
+{%- endif %}
+{%- endmacro %}
+
+{% macro view_answer(answer) -%}
+{%- if answer == 'allow' -%}
+policy.TAGS_ASSIGN({})
+{%- elif answer == 'refused' -%}
+'policy.REFUSE'
+{%- elif answer == 'noanswer' -%}
+'policy.NO_ANSWER'
+{%- endif -%}
+{%- endmacro %}
diff --git a/manager/knot_resolver_manager/datamodel/templates/monitoring.lua.j2 b/manager/knot_resolver_manager/datamodel/templates/monitoring.lua.j2
new file mode 100644
index 00000000..ebaaa3b3
--- /dev/null
+++ b/manager/knot_resolver_manager/datamodel/templates/monitoring.lua.j2
@@ -0,0 +1,33 @@
+--- control socket location
+local ffi = require('ffi')
+local id = os.getenv('SYSTEMD_INSTANCE')
+if not id then
+ log_err(ffi.C.LOG_GRP_SYSTEM, 'environment variable $SYSTEMD_INSTANCE not set, which should not have been possible due to running under manager')
+else
+ -- Bind to control socket in CWD (= rundir in config)
+ -- FIXME replace with relative path after fixing https://gitlab.nic.cz/knot/knot-resolver/-/issues/720
+ local path = '{{ cwd }}/control/'..id
+ log_warn(ffi.C.LOG_GRP_SYSTEM, 'path = ' .. path)
+ local ok, err = pcall(net.listen, path, nil, { kind = 'control' })
+ if not ok then
+ log_warn(ffi.C.LOG_GRP_NETWORK, 'bind to '..path..' failed '..err)
+ end
+end
+
+{% if cfg.monitoring.enabled == "always" %}
+modules.load('stats')
+{% endif %}
+
+--- function used for statistics collection
+function collect_lazy_statistics()
+ if stats == nil then
+ modules.load('stats')
+ end
+
+ return tojson(stats.list())
+end
+
+--- function used for statistics collection
+function collect_statistics()
+ return tojson(stats.list())
+end
diff --git a/manager/knot_resolver_manager/datamodel/templates/network.lua.j2 b/manager/knot_resolver_manager/datamodel/templates/network.lua.j2
new file mode 100644
index 00000000..665ee454
--- /dev/null
+++ b/manager/knot_resolver_manager/datamodel/templates/network.lua.j2
@@ -0,0 +1,102 @@
+{% from 'macros/common_macros.lua.j2' import boolean %}
+{% from 'macros/network_macros.lua.j2' import network_listen, http_config %}
+
+-- network.do-ipv4/6
+net.ipv4 = {{ boolean(cfg.network.do_ipv4) }}
+net.ipv6 = {{ boolean(cfg.network.do_ipv6) }}
+
+{% if cfg.network.out_interface_v4 %}
+-- network.out-interface-v4
+net.outgoing_v4('{{ cfg.network.out_interface_v4 }}')
+{% endif %}
+
+{% if cfg.network.out_interface_v6 %}
+-- network.out-interface-v6
+net.outgoing_v6('{{ cfg.network.out_interface_v6 }}')
+{% endif %}
+
+-- network.tcp-pipeline
+net.tcp_pipeline({{ cfg.network.tcp_pipeline }})
+
+-- network.edns-keep-alive
+{% if cfg.network.edns_tcp_keepalive %}
+modules.load('edns_keepalive')
+{% else %}
+modules.unload('edns_keepalive')
+{% endif %}
+
+-- network.edns-buffer-size
+net.bufsize(
+ {{ cfg.network.edns_buffer_size.upstream.bytes() }},
+ {{ cfg.network.edns_buffer_size.downstream.bytes() }}
+)
+
+{% if cfg.network.tls.cert_file and cfg.network.tls.key_file %}
+-- network.tls
+net.tls('{{ cfg.network.tls.cert_file }}', '{{ cfg.network.tls.key_file }}')
+{% endif %}
+
+{% if cfg.network.tls.sticket_secret %}
+-- network.tls.sticket-secret
+net.tls_sticket_secret('{{ cfg.network.tls.sticket_secret }}')
+{% endif %}
+
+{% if cfg.network.tls.sticket_secret_file %}
+-- network.tls.sticket-secret-file
+net.tls_sticket_secret_file('{{ cfg.network.tls.sticket_secret_file }}')
+{% endif %}
+
+{% if cfg.network.tls.auto_discovery %}
+-- network.tls.auto-discovery
+modules.load('experimental_dot_auth')
+{% else %}
+-- modules.unload('experimental_dot_auth')
+{% endif %}
+
+-- network.tls.padding
+net.tls_padding(
+{%- if cfg.network.tls.padding == true -%}
+true
+{%- elif cfg.network.tls.padding == false -%}
+false
+{%- else -%}
+{{ cfg.network.tls.padding }}
+{%- endif -%}
+)
+
+{% if cfg.network.address_renumbering %}
+-- network.address-renumbering
+modules.load('renumber')
+renumber.config = {
+{% for item in cfg.network.address_renumbering %}
+ {'{{ item.source }}', '{{ item.destination }}'},
+{% endfor %}
+}
+{% endif %}
+
+{%- set vars = {'doh_legacy': False} -%}
+{% for listen in cfg.network.listen if listen.kind == "doh-legacy" -%}
+{%- if vars.update({'doh_legacy': True}) -%}{%- endif -%}
+{%- endfor %}
+
+{% if vars.doh_legacy %}
+-- doh_legacy http config
+modules.load('http')
+{{ http_config(cfg.network.tls,"doh_legacy") }}
+{% endif %}
+
+{% if cfg.network.proxy_protocol %}
+-- network.proxy-protocol
+net.proxy_allowed({
+{% for item in cfg.network.proxy_protocol.allow %}
+'{{ item }}',
+{% endfor %}
+})
+{% else %}
+net.proxy_allowed({})
+{% endif %}
+
+-- network.listen
+{% for listen in cfg.network.listen %}
+{{ network_listen(listen) }}
+{% endfor %} \ No newline at end of file
diff --git a/manager/knot_resolver_manager/datamodel/templates/options.lua.j2 b/manager/knot_resolver_manager/datamodel/templates/options.lua.j2
new file mode 100644
index 00000000..8210fb6d
--- /dev/null
+++ b/manager/knot_resolver_manager/datamodel/templates/options.lua.j2
@@ -0,0 +1,52 @@
+{% from 'macros/common_macros.lua.j2' import boolean %}
+
+-- options.glue-checking
+mode('{{ cfg.options.glue_checking }}')
+
+{% if cfg.options.rebinding_protection %}
+-- options.rebinding-protection
+modules.load('rebinding < iterate')
+{% endif %}
+
+{% if cfg.options.violators_workarounds %}
+-- options.violators-workarounds
+modules.load('workarounds < iterate')
+{% endif %}
+
+{% if cfg.options.serve_stale %}
+-- options.serve-stale
+modules.load('serve_stale < cache')
+{% endif %}
+
+-- options.query-priming
+{% if cfg.options.priming %}
+modules.load('priming')
+{% else %}
+modules.unload('priming')
+{% endif %}
+
+-- options.time-jump-detection
+{% if cfg.options.time_jump_detection %}
+modules.load('detect_time_jump')
+{% else %}
+modules.unload('detect_time_jump')
+{% endif %}
+
+-- options.refuse-no-rd
+{% if cfg.options.refuse_no_rd %}
+modules.load('refuse_nord')
+{% else %}
+modules.unload('refuse_nord')
+{% endif %}
+
+-- options.qname-minimisation
+option('NO_MINIMIZE', {{ boolean(cfg.options.minimize,true) }})
+
+-- options.query-loopback
+option('ALLOW_LOCAL', {{ boolean(cfg.options.query_loopback) }})
+
+-- options.reorder-rrset
+option('REORDER_RR', {{ boolean(cfg.options.reorder_rrset) }})
+
+-- options.query-case-randomization
+option('NO_0X20', {{ boolean(cfg.options.query_case_randomization,true) }}) \ No newline at end of file
diff --git a/manager/knot_resolver_manager/datamodel/templates/policy.lua.j2 b/manager/knot_resolver_manager/datamodel/templates/policy.lua.j2
new file mode 100644
index 00000000..c5b86f74
--- /dev/null
+++ b/manager/knot_resolver_manager/datamodel/templates/policy.lua.j2
@@ -0,0 +1,62 @@
+{% from 'macros/policy_macros.lua.j2' import declare_policy_qtype_custom_filter, policy_flags, policy_add, policy_auto_filter, policy_auto_action %}
+{% from 'macros/view_macros.lua.j2' import view_tsig, view_addr %}
+
+{% if cfg.policy -%}
+
+{{ declare_policy_qtype_custom_filter() }}
+
+{% for rule in cfg.policy %}
+{% if rule.views -%}
+{# views set for rule #}
+{% for view_id in rule.views -%}
+{%- set view = cfg.views[view_id] -%}
+
+{# merge options from view and policy rule #}
+{%- set options = none -%}
+{% if rule.options and view.options -%}
+{% set options = rule.options|list + view.options|list %}
+{% elif rule.options %}
+{% set options = rule.options|list %}
+{% elif view.options %}
+{% set options = view.options|list %}
+{%- endif %}
+
+{# view tsig #}
+{% if view.tsig -%}
+{% for tsig in view.tsig -%}
+
+{% if options -%}
+{{ view_tsig(tsig|string, policy_auto_filter(policy_flags(options|list), rule.filter)) }}
+{%- endif %}
+
+{{ view_tsig(tsig|string, policy_auto_filter(policy_auto_action(rule), rule.filter)) }}
+
+{% endfor %}
+{% endif -%}
+
+{# view addr #}
+{% if view.subnets -%}
+{% for addr in view.subnets -%}
+
+{% if options -%}
+{{ view_addr(addr|string, policy_auto_filter(policy_flags(options|list), rule.filter)) }}
+{%- endif %}
+
+{{ view_addr(addr|string, policy_auto_filter(policy_auto_action(rule), rule.filter)) }}
+
+{% endfor %}
+{% endif %}
+
+{%- endfor -%}
+{%- else -%}
+{# no views set for policy rule #}
+
+{% if rule.options -%}
+{{ policy_add(policy_auto_filter(policy_flags(rule.options|list), rule.filter)) }}
+{%- endif %}
+
+{{ policy_add(policy_auto_filter(policy_auto_action(rule), rule.filter)) }}
+
+{% endif %}
+{% endfor %}
+{% endif %} \ No newline at end of file
diff --git a/manager/knot_resolver_manager/datamodel/templates/rpz.lua.j2 b/manager/knot_resolver_manager/datamodel/templates/rpz.lua.j2
new file mode 100644
index 00000000..08061c39
--- /dev/null
+++ b/manager/knot_resolver_manager/datamodel/templates/rpz.lua.j2
@@ -0,0 +1,57 @@
+{% from 'macros/policy_macros.lua.j2' import policy_flags, policy_add, policy_rpz, policy_action %}
+{% from 'macros/view_macros.lua.j2' import view_tsig, view_addr %}
+
+{% if cfg.rpz %}
+{% for rpz in cfg.rpz %}
+{% if rpz.views -%}
+{# views set for rpz #}
+{% for view_id in rpz.views -%}
+{%- set view = cfg.views[view_id] -%}
+
+{# merge options from view and rpz #}
+{%- set options = none -%}
+{% if rpz.options and view.options -%}
+{% set options = rpz.options|list + view.options|list %}
+{% elif rpz.options %}
+{% set options = rpz.options|list %}
+{% elif view.options %}
+{% set options = view.options|list %}
+{%- endif %}
+
+{% if view.tsig -%}
+{% for tsig in view.tsig -%}
+
+{%- if options -%}
+{{ view_tsig(tsig|string,policy_rpz(policy_flags(rpz.options), rpz.file, rpz.watch)) }}
+{%- endif %}
+
+{{ view_tsig(tsig|string,policy_rpz(policy_action(rpz), rpz.file, rpz.watch )) }}
+
+{% endfor %}
+{%- endif -%}
+
+{% if view.subnets -%}
+{% for addr in view.subnets -%}
+
+{%- if options -%}
+{{ view_addr(addr|string,policy_rpz(policy_flags(rpz.options), rpz.file, rpz.watch)) }}
+{%- endif %}
+
+{{ view_addr(addr|string,policy_rpz(policy_action(rpz), rpz.file, rpz.watch )) }}
+
+{% endfor %}
+{% endif %}
+
+{% endfor %}
+{% else %}
+{# no views set for rpz #}
+
+{% if rpz.options -%}
+{{ policy_add(policy_rpz(policy_flags(rpz.options), rpz.file, rpz.watch)) }}
+{%- endif %}
+
+{{ policy_add(policy_rpz(policy_action(rpz), rpz.file, rpz.watch )) }}
+
+{% endif %}
+{% endfor %}
+{% endif %} \ No newline at end of file
diff --git a/doc/.packaging/centos/7/NOTSUPPORTED b/manager/knot_resolver_manager/datamodel/templates/slices.lua.j2
index e69de29b..e69de29b 100644
--- a/doc/.packaging/centos/7/NOTSUPPORTED
+++ b/manager/knot_resolver_manager/datamodel/templates/slices.lua.j2
diff --git a/manager/knot_resolver_manager/datamodel/templates/static_hints.lua.j2 b/manager/knot_resolver_manager/datamodel/templates/static_hints.lua.j2
new file mode 100644
index 00000000..130facf9
--- /dev/null
+++ b/manager/knot_resolver_manager/datamodel/templates/static_hints.lua.j2
@@ -0,0 +1,51 @@
+{% if cfg.static_hints.etc_hosts or cfg.static_hints.root_hints_file or cfg.static_hints.hints_files or cfg.static_hints.root_hints or cfg.static_hints.hints %}
+modules.load('hints > iterate')
+
+{% if cfg.static_hints.ttl %}
+-- static-hints.ttl
+hints.ttl({{ cfg.static_hints.ttl.seconds()|string }})
+{% endif %}
+
+-- static-hints.no-data
+hints.use_nodata({{ 'true' if cfg.static_hints.nodata else 'false' }})
+
+{% if cfg.static_hints.etc_hosts %}
+-- static-hints.etc-hosts
+hints.add_hosts('/etc/hosts')
+{% endif %}
+
+{% if cfg.static_hints.root_hints_file %}
+-- static-hints.root-hints-file
+hints.root_file('{{ cfg.static_hints.root_hints_file }}')
+{% endif %}
+
+{% if cfg.static_hints.hints_files %}
+-- static-hints.hints-files
+{% for item in cfg.static_hints.hints_files %}
+hints.add_hosts('{{ item }}')
+{% endfor %}
+{% endif %}
+
+{% if cfg.static_hints.root_hints %}
+-- static-hints.root-hints
+hints.root({
+{% for name, addrs in cfg.static_hints.root_hints.items() %}
+['{{ name.punycode() }}'] = {
+{% for addr in addrs %}
+ '{{ addr }}',
+{% endfor %}
+ },
+{% endfor %}
+})
+{% endif %}
+
+{% if cfg.static_hints.hints %}
+-- static-hints.hints
+{% for name, addrs in cfg.static_hints.hints.items() %}
+{% for addr in addrs %}
+hints.set('{{ name.punycode() }} {{ addr }}')
+{% endfor %}
+{% endfor %}
+{% endif %}
+
+{% endif %} \ No newline at end of file
diff --git a/manager/knot_resolver_manager/datamodel/templates/stub_zones.lua.j2 b/manager/knot_resolver_manager/datamodel/templates/stub_zones.lua.j2
new file mode 100644
index 00000000..85290982
--- /dev/null
+++ b/manager/knot_resolver_manager/datamodel/templates/stub_zones.lua.j2
@@ -0,0 +1,58 @@
+{% from 'macros/policy_macros.lua.j2' import policy_flags, policy_add, policy_suffix, policy_todname, policy_stub %}
+{% from 'macros/view_macros.lua.j2' import view_tsig, view_addr %}
+
+{% if cfg.stub_zones %}
+{% for zone in cfg.stub_zones %}
+-- stub-zone: {{ zone.subtree }}
+{% if zone.views -%}
+{# views set for stub-zone #}
+{% for view_id in zone.views -%}
+{%- set view = cfg.views[view_id] -%}
+
+{# merge options from view and stub-zone #}
+{%- set options = none -%}
+{% if zone.options and view.options -%}
+{% set options = zone.options|list + view.options|list %}
+{% elif zone.options %}
+{% set options = zone.options|list %}
+{% elif view.options %}
+{% set options = view.options|list %}
+{%- endif %}
+
+{% if view.tsig -%}
+{% for tsig in view.tsig -%}
+
+{%- if options -%}
+{{ view_tsig(tsig|string, policy_suffix(policy_flags(options|list), policy_todname(zone.subtree|string))) }}
+{%- endif %}
+
+{{ view_tsig(tsig|string, policy_suffix(policy_stub(zone.servers|list), policy_todname(zone.subtree|string))) }}
+
+{% endfor %}
+{%- endif -%}
+
+{% if view.subnets -%}
+{% for addr in view.subnets -%}
+
+{%- if options -%}
+{{ view_addr(addr|string, policy_suffix(policy_flags(options|list), policy_todname(zone.subtree|string))) }}
+{%- endif %}
+
+{{ view_addr(addr|string, policy_suffix(policy_stub(zone.servers|list), policy_todname(zone.subtree|string))) }}
+
+{% endfor %}
+{% endif %}
+
+{% endfor %}
+{% else %}
+{# no views set for stub-zone #}
+
+{% if zone.options -%}
+{{ policy_add(policy_suffix(policy_flags(zone.options|list), policy_todname(zone.subtree|string))) }}
+{%- endif %}
+
+{{ policy_add(policy_suffix(policy_stub(zone.servers|list), policy_todname(zone.subtree|string))) }}
+
+{% endif %}
+{% endfor %}
+{% endif %} \ No newline at end of file
diff --git a/manager/knot_resolver_manager/datamodel/templates/views.lua.j2 b/manager/knot_resolver_manager/datamodel/templates/views.lua.j2
new file mode 100644
index 00000000..99c654c9
--- /dev/null
+++ b/manager/knot_resolver_manager/datamodel/templates/views.lua.j2
@@ -0,0 +1,22 @@
+{% from 'macros/common_macros.lua.j2' import quotes %}
+{% from 'macros/view_macros.lua.j2' import view_insert_action, view_flags, view_answer %}
+{% from 'macros/policy_macros.lua.j2' import policy_flags, policy_tags_assign %}
+
+{% if cfg.views %}
+{% for view in cfg.views %}
+{% for subnet in view.subnets %}
+
+{% if view.tags -%}
+{{ view_insert_action(subnet, policy_tags_assign(view.tags)) }}
+{% elif view.answer %}
+{{ view_insert_action(subnet, view_answer(view.answer)) }}
+{%- endif %}
+
+{%- set flags = view_flags(view.options) -%}
+{% if flags -%}
+{{ view_insert_action(subnet, quotes(policy_flags(flags))) }}
+{%- endif %}
+
+{% endfor %}
+{% endfor %}
+{% endif %}
diff --git a/manager/knot_resolver_manager/datamodel/templates/webmgmt.lua.j2 b/manager/knot_resolver_manager/datamodel/templates/webmgmt.lua.j2
new file mode 100644
index 00000000..938ea8da
--- /dev/null
+++ b/manager/knot_resolver_manager/datamodel/templates/webmgmt.lua.j2
@@ -0,0 +1,25 @@
+{% from 'macros/common_macros.lua.j2' import boolean %}
+
+{% if cfg.webmgmt -%}
+-- webmgmt
+modules.load('http')
+http.config({tls = {{ boolean(cfg.webmgmt.tls) }},
+{%- if cfg.webmgmt.cert_file -%}
+ cert = '{{ cfg.webmgmt.cert_file }}',
+{%- endif -%}
+{%- if cfg.webmgmt.cert_file -%}
+ key = '{{ cfg.webmgmt.key_file }}',
+{%- endif -%}
+}, 'webmgmt')
+net.listen(
+{%- if cfg.webmgmt.unix_socket -%}
+ '{{ cfg.webmgmt.unix_socket }}',nil,
+{%- elif cfg.webmgmt.interface -%}
+ {%- if cfg.webmgmt.interface.addr -%}
+ '{{ cfg.webmgmt.interface.addr }}',{{ cfg.webmgmt.interface.port }},
+ {%- elif cfg.webmgmt.interface.if_name -%}
+ net.{{ cfg.webmgmt.interface.if_name }},{{ cfg.webmgmt.interface.port }},
+ {%- endif -%}
+{%- endif -%}
+{ kind = 'webmgmt' })
+{%- endif %} \ No newline at end of file
diff --git a/manager/knot_resolver_manager/datamodel/types/__init__.py b/manager/knot_resolver_manager/datamodel/types/__init__.py
new file mode 100644
index 00000000..33d8c90d
--- /dev/null
+++ b/manager/knot_resolver_manager/datamodel/types/__init__.py
@@ -0,0 +1,56 @@
+from .enums import DNSRecordTypeEnum, PolicyActionEnum, PolicyFlagEnum
+from .files import AbsoluteDir, Dir, File, FilePath
+from .generic_types import ListOrItem
+from .types import (
+ DomainName,
+ IDPattern,
+ Int0_512,
+ Int0_65535,
+ InterfaceName,
+ InterfaceOptionalPort,
+ InterfacePort,
+ IntNonNegative,
+ IntPositive,
+ IPAddress,
+ IPAddressOptionalPort,
+ IPAddressPort,
+ IPNetwork,
+ IPv4Address,
+ IPv6Address,
+ IPv6Network96,
+ Percent,
+ PortNumber,
+ SizeUnit,
+ TimeUnit,
+)
+
+__all__ = [
+ "PolicyActionEnum",
+ "PolicyFlagEnum",
+ "DNSRecordTypeEnum",
+ "DomainName",
+ "IDPattern",
+ "Int0_512",
+ "Int0_65535",
+ "InterfaceName",
+ "InterfaceOptionalPort",
+ "InterfacePort",
+ "IntNonNegative",
+ "IntPositive",
+ "IPAddress",
+ "IPAddressOptionalPort",
+ "IPAddressPort",
+ "IPNetwork",
+ "IPv4Address",
+ "IPv6Address",
+ "IPv6Network96",
+ "ListOrItem",
+ "Percent",
+ "PortNumber",
+ "SizeUnit",
+ "TimeUnit",
+ "AbsoluteDir",
+ "File",
+ "FilePath",
+ "Dir",
+]
diff --git a/manager/knot_resolver_manager/datamodel/types/base_types.py b/manager/knot_resolver_manager/datamodel/types/base_types.py
new file mode 100644
index 00000000..96c0a393
--- /dev/null
+++ b/manager/knot_resolver_manager/datamodel/types/base_types.py
@@ -0,0 +1,193 @@
+import re
+from typing import Any, Dict, Pattern, Type
+
+from knot_resolver_manager.utils.modeling import BaseValueType
+
+
+class IntBase(BaseValueType):
+ """
+ Base class to work with integer value.
+ """
+
+ _value: int
+
+ def __int__(self) -> int:
+ return self._value
+
+ def __str__(self) -> str:
+ return str(self._value)
+
+ def __eq__(self, o: object) -> bool:
+ return isinstance(o, IntBase) and o._value == self._value
+
+ def serialize(self) -> Any:
+ return self._value
+
+ @classmethod
+ def json_schema(cls: Type["IntBase"]) -> Dict[Any, Any]:
+ return {"type": "integer"}
+
+
+class StrBase(BaseValueType):
+ """
+ Base class to work with string value.
+ """
+
+ _value: str
+
+ def __int__(self) -> int:
+ raise ValueError("Can't convert string to an integer.")
+
+ def __str__(self) -> str:
+ return self._value
+
+ def to_std(self) -> str:
+ return self._value
+
+ def __hash__(self) -> int:
+ return hash(self._value)
+
+ def __eq__(self, o: object) -> bool:
+ return isinstance(o, StrBase) and o._value == self._value
+
+ def serialize(self) -> Any:
+ return self._value
+
+ @classmethod
+ def json_schema(cls: Type["StrBase"]) -> Dict[Any, Any]:
+ return {"type": "string"}
+
+
+class IntRangeBase(IntBase):
+ """
+ Base class to work with integer value in range.
+ Just inherit the class and set the values for '_min' and '_max'.
+
+ class IntNonNegative(IntRangeBase):
+ _min: int = 0
+ """
+
+ _min: int
+ _max: int
+
+ def __init__(self, source_value: Any, object_path: str = "/") -> None:
+ super().__init__(source_value)
+ if isinstance(source_value, int) and not isinstance(source_value, bool):
+ if hasattr(self, "_min") and (source_value < self._min):
+ raise ValueError(f"value {source_value} is lower than the minimum {self._min}.")
+ if hasattr(self, "_max") and (source_value > self._max):
+ raise ValueError(f"value {source_value} is higher than the maximum {self._max}")
+ self._value = source_value
+ else:
+ raise ValueError(
+ f"expected integer, got '{type(source_value)}'",
+ object_path,
+ )
+
+ @classmethod
+ def json_schema(cls: Type["IntRangeBase"]) -> Dict[Any, Any]:
+ typ: Dict[str, Any] = {"type": "integer"}
+ if hasattr(cls, "_min"):
+ typ["minimum"] = cls._min
+ if hasattr(cls, "_max"):
+ typ["maximum"] = cls._max
+ return typ
+
+
+class PatternBase(StrBase):
+ """
+ Base class to work with string value that match regex pattern.
+ Just inherit the class and set regex pattern for '_re'.
+
+ class ABPattern(PatternBase):
+ _re: Pattern[str] = re.compile(r"ab*")
+ """
+
+ _re: Pattern[str]
+
+ def __init__(self, source_value: Any, object_path: str = "/") -> None:
+ super().__init__(source_value)
+ if isinstance(source_value, str):
+ if type(self)._re.match(source_value):
+ self._value: str = source_value
+ else:
+ raise ValueError(f"'{source_value}' does not match '{self._re.pattern}' pattern")
+ else:
+ raise ValueError(
+ f"expected string, got '{type(source_value)}'",
+ object_path,
+ )
+
+ @classmethod
+ def json_schema(cls: Type["PatternBase"]) -> Dict[Any, Any]:
+ return {"type": "string", "pattern": rf"{cls._re.pattern}"}
+
+
+class UnitBase(IntBase):
+ """
+ Base class to work with string value that match regex pattern.
+ Just inherit the class and set '_units'.
+
+ class CustomUnit(PatternBase):
+ _units = {"b": 1, "kb": 1000}
+ """
+
+ _re: Pattern[str]
+ _units: Dict[str, int]
+ _value_orig: str
+
+ def __init__(self, source_value: Any, object_path: str = "/") -> None:
+ super().__init__(source_value)
+ type(self)._re = re.compile(rf"^(\d+)({r'|'.join(type(self)._units.keys())})$")
+ if isinstance(source_value, str) and self._re.match(source_value):
+ self._value_orig = source_value
+ grouped = self._re.search(source_value)
+ if grouped:
+ val, unit = grouped.groups()
+ if unit is None:
+ raise ValueError(f"Missing units. Accepted units are {list(type(self)._units.keys())}")
+ elif unit not in type(self)._units:
+ raise ValueError(
+ f"Used unexpected unit '{unit}' for {type(self).__name__}."
+ f" Accepted units are {list(type(self)._units.keys())}",
+ object_path,
+ )
+ self._value = int(val) * type(self)._units[unit]
+ else:
+ raise ValueError(f"{type(self._value)} Failed to convert: {self}")
+ elif source_value in (0, "0"):
+ self._value_orig = source_value
+ self._value = int(source_value)
+ elif isinstance(source_value, int):
+ raise ValueError(
+ f"number without units, please convert to string and add unit - {list(type(self)._units.keys())}",
+ object_path,
+ )
+ else:
+ raise ValueError(
+ f"expected number with units in a string, got '{type(source_value)}'.",
+ object_path,
+ )
+
+ def __str__(self) -> str:
+ """
+ Used by Jinja2. Must return only a number.
+ """
+ return str(self._value_orig)
+
+ def __repr__(self) -> str:
+ return f"Unit[{type(self).__name__},{self._value_orig}]"
+
+ def __eq__(self, o: object) -> bool:
+ """
+ Two instances are equal when they represent the same size
+ regardless of their string representation.
+ """
+ return isinstance(o, UnitBase) and o._value == self._value
+
+ def serialize(self) -> Any:
+ return self._value_orig
+
+ @classmethod
+ def json_schema(cls: Type["UnitBase"]) -> Dict[Any, Any]:
+ return {"type": "string", "pattern": rf"{cls._re.pattern}"}
diff --git a/manager/knot_resolver_manager/datamodel/types/enums.py b/manager/knot_resolver_manager/datamodel/types/enums.py
new file mode 100644
index 00000000..cd45e1d6
--- /dev/null
+++ b/manager/knot_resolver_manager/datamodel/types/enums.py
@@ -0,0 +1,153 @@
+from typing_extensions import Literal
+
+# Policy actions
+PolicyActionEnum = Literal[
+ # Nonchain actions
+ "pass",
+ "deny",
+ "drop",
+ "refuse",
+ "tc",
+ "reroute",
+ "answer",
+ # Chain actions
+ "mirror",
+ "forward",
+ "stub",
+ "debug-always",
+ "debug-cache-miss",
+ "qtrace",
+ "reqtrace",
+]
+
+# FLAGS from https://knot-resolver.readthedocs.io/en/stable/lib.html?highlight=options#c.kr_qflags
+PolicyFlagEnum = Literal[
+ "no-minimize",
+ "no-ipv4",
+ "no-ipv6",
+ "tcp",
+ "resolved",
+ "await-ipv4",
+ "await-ipv6",
+ "await-cut",
+ "no-edns",
+ "cached",
+ "no-cache",
+ "expiring",
+ "allow_local",
+ "dnssec-want",
+ "dnssec-bogus",
+ "dnssec-insecure",
+ "dnssec-cd",
+ "stub",
+ "always-cut",
+ "dnssec-wexpand",
+ "permissive",
+ "strict",
+ "badcookie-again",
+ "cname",
+ "reorder-rr",
+ "trace",
+ "no-0x20",
+ "dnssec-nods",
+ "dnssec-optout",
+ "nonauth",
+ "forward",
+ "dns64-mark",
+ "cache-tried",
+ "no-ns-found",
+ "pkt-is-sane",
+ "dns64-disable",
+]
+
+# DNS records from 'kres.type' table
+DNSRecordTypeEnum = Literal[
+ "A",
+ "A6",
+ "AAAA",
+ "AFSDB",
+ "ANY",
+ "APL",
+ "ATMA",
+ "AVC",
+ "AXFR",
+ "CAA",
+ "CDNSKEY",
+ "CDS",
+ "CERT",
+ "CNAME",
+ "CSYNC",
+ "DHCID",
+ "DLV",
+ "DNAME",
+ "DNSKEY",
+ "DOA",
+ "DS",
+ "EID",
+ "EUI48",
+ "EUI64",
+ "GID",
+ "GPOS",
+ "HINFO",
+ "HIP",
+ "HTTPS",
+ "IPSECKEY",
+ "ISDN",
+ "IXFR",
+ "KEY",
+ "KX",
+ "L32",
+ "L64",
+ "LOC",
+ "LP",
+ "MAILA",
+ "MAILB",
+ "MB",
+ "MD",
+ "MF",
+ "MG",
+ "MINFO",
+ "MR",
+ "MX",
+ "NAPTR",
+ "NID",
+ "NIMLOC",
+ "NINFO",
+ "NS",
+ "NSAP",
+ "NSAP-PTR",
+ "NSEC",
+ "NSEC3",
+ "NSEC3PARAM",
+ "NULL",
+ "NXT",
+ "OPENPGPKEY",
+ "OPT",
+ "PTR",
+ "PX",
+ "RKEY",
+ "RP",
+ "RRSIG",
+ "RT",
+ "SIG",
+ "SINK",
+ "SMIMEA",
+ "SOA",
+ "SPF",
+ "SRV",
+ "SSHFP",
+ "SVCB",
+ "TA",
+ "TALINK",
+ "TKEY",
+ "TLSA",
+ "TSIG",
+ "TXT",
+ "UID",
+ "UINFO",
+ "UNSPEC",
+ "URI",
+ "WKS",
+ "X25",
+ "ZONEMD",
+]
diff --git a/manager/knot_resolver_manager/datamodel/types/files.py b/manager/knot_resolver_manager/datamodel/types/files.py
new file mode 100644
index 00000000..99fd8c21
--- /dev/null
+++ b/manager/knot_resolver_manager/datamodel/types/files.py
@@ -0,0 +1,138 @@
+from pathlib import Path
+from typing import Any, Dict, Tuple, Type, TypeVar
+
+from knot_resolver_manager.datamodel.globals import get_resolve_root, get_strict_validation
+from knot_resolver_manager.utils.modeling.base_value_type import BaseValueType
+
+
+class UncheckedPath(BaseValueType):
+ """
+ Wrapper around pathlib.Path object. Can represent pretty much any Path. No checks are
+ performed on the value. The value is taken as is.
+ """
+
+ _value: Path
+
+ def __init__(
+ self, source_value: Any, parents: Tuple["UncheckedPath", ...] = tuple(), object_path: str = "/"
+ ) -> None:
+ super().__init__(source_value, object_path=object_path)
+ self._object_path: str = object_path
+ self._parents: Tuple[UncheckedPath, ...] = parents
+ self.strict_validation: bool = get_strict_validation()
+
+ if isinstance(source_value, str):
+ # we do not load global validation context if the path is absolute
+ # this prevents errors when constructing defaults in the schema
+ if source_value.startswith("/"):
+ resolve_root = Path("/")
+ else:
+ resolve_root = get_resolve_root()
+
+ self._raw_value: str = source_value
+ if self._parents:
+ pp = map(lambda p: p.to_path(), self._parents)
+ self._value: Path = Path(resolve_root, *pp, source_value)
+ else:
+ self._value: Path = Path(resolve_root, source_value)
+ else:
+ raise ValueError(f"expected file path in a string, got '{source_value}' with type '{type(source_value)}'.")
+
+ def __str__(self) -> str:
+ return str(self._value)
+
+ def __eq__(self, o: object) -> bool:
+ if not isinstance(o, UncheckedPath):
+ return False
+
+ return o._value == self._value
+
+ def __int__(self) -> int:
+ raise RuntimeError("Path cannot be converted to type <int>")
+
+ def to_path(self) -> Path:
+ return self._value
+
+ def serialize(self) -> Any:
+ return self._raw_value
+
+ def relative_to(self, parent: "UncheckedPath") -> "UncheckedPath":
+ """return a path with an added parent part"""
+ return UncheckedPath(self._raw_value, parents=(parent, *self._parents), object_path=self._object_path)
+
+ UPT = TypeVar("UPT", bound="UncheckedPath")
+
+ def reconstruct(self, cls: Type[UPT]) -> UPT:
+ """
+ Rebuild this object as an instance of its subclass. Practically, allows for conversions from
+ """
+ return cls(self._raw_value, parents=self._parents, object_path=self._object_path)
+
+ @classmethod
+ def json_schema(cls: Type["UncheckedPath"]) -> Dict[Any, Any]:
+ return {
+ "type": "string",
+ }
+
+
+class Dir(UncheckedPath):
+ """
+ Path, that is enforced to be:
+ - an existing directory
+ """
+
+ def __init__(
+ self, source_value: Any, parents: Tuple["UncheckedPath", ...] = tuple(), object_path: str = "/"
+ ) -> None:
+ super().__init__(source_value, parents=parents, object_path=object_path)
+ if self.strict_validation and not self._value.is_dir():
+ raise ValueError(f"path '{self._value}' does not point to an existing directory")
+
+
+class AbsoluteDir(Dir):
+ """
+ Path, that is enforced to be:
+ - absolute
+ - an existing directory
+ """
+
+ def __init__(
+ self, source_value: Any, parents: Tuple["UncheckedPath", ...] = tuple(), object_path: str = "/"
+ ) -> None:
+ super().__init__(source_value, parents=parents, object_path=object_path)
+ if self.strict_validation and not self._value.is_absolute():
+ raise ValueError("path not absolute")
+
+
+class File(UncheckedPath):
+ """
+ Path, that is enforced to be:
+ - an existing file
+ """
+
+ def __init__(
+ self, source_value: Any, parents: Tuple["UncheckedPath", ...] = tuple(), object_path: str = "/"
+ ) -> None:
+ super().__init__(source_value, parents=parents, object_path=object_path)
+ if self.strict_validation and not self._value.exists():
+ raise ValueError("file does not exist")
+ if self.strict_validation and not self._value.is_file():
+ raise ValueError("path is not a file")
+
+
+class FilePath(UncheckedPath):
+ """
+ Path, that is enforced to be:
+ - parent of the last path segment is an existing directory
+ - it does not point to a dir
+ """
+
+ def __init__(
+ self, source_value: Any, parents: Tuple["UncheckedPath", ...] = tuple(), object_path: str = "/"
+ ) -> None:
+ super().__init__(source_value, parents=parents, object_path=object_path)
+ p = self._value.parent
+ if self.strict_validation and not p.exists() or not p.is_dir():
+ raise ValueError(f"path '{self._value}' does not point inside an existing directory")
+ if self.strict_validation and self._value.is_dir():
+ raise ValueError("path points to a directory when we expected a file")
diff --git a/manager/knot_resolver_manager/datamodel/types/generic_types.py b/manager/knot_resolver_manager/datamodel/types/generic_types.py
new file mode 100644
index 00000000..bf4e8680
--- /dev/null
+++ b/manager/knot_resolver_manager/datamodel/types/generic_types.py
@@ -0,0 +1,33 @@
+from typing import Any, List, TypeVar, Union
+
+from knot_resolver_manager.utils.modeling import BaseGenericTypeWrapper
+
+T = TypeVar("T")
+
+
+class ListOrItem(BaseGenericTypeWrapper[Union[List[T], T]]):
+ _value_orig: Union[List[T], T]
+ _list: List[T]
+
+ def __init__(self, source_value: Any, object_path: str = "/") -> None: # pylint: disable=unused-argument
+ super().__init__(source_value)
+ self._value_orig: Union[List[T], T] = source_value
+ self._list: List[T] = source_value if isinstance(source_value, list) else [source_value]
+
+ def __getitem__(self, index: Any) -> T:
+ return self._list[index]
+
+ def __int__(self) -> int:
+ raise ValueError(f"Can't convert '{type(self).__name__}' to an integer.")
+
+ def __str__(self) -> str:
+ return str(self._value_orig)
+
+ def to_std(self) -> List[T]:
+ return self._list
+
+ def __eq__(self, o: object) -> bool:
+ return isinstance(o, ListOrItem) and o._value_orig == self._value_orig
+
+ def serialize(self) -> Union[List[T], T]:
+ return self._value_orig
diff --git a/manager/knot_resolver_manager/datamodel/types/types.py b/manager/knot_resolver_manager/datamodel/types/types.py
new file mode 100644
index 00000000..f38759c8
--- /dev/null
+++ b/manager/knot_resolver_manager/datamodel/types/types.py
@@ -0,0 +1,411 @@
+import ipaddress
+import re
+from typing import Any, Dict, Optional, Type, Union
+
+from knot_resolver_manager.datamodel.types.base_types import IntRangeBase, PatternBase, StrBase, UnitBase
+from knot_resolver_manager.utils.modeling import BaseValueType
+
+
+class IntNonNegative(IntRangeBase):
+ _min: int = 0
+
+
+class IntPositive(IntRangeBase):
+ _min: int = 1
+
+
+class Int0_512(IntRangeBase):
+ _min: int = 0
+ _max: int = 512
+
+
+class Int0_65535(IntRangeBase):
+ _min: int = 0
+ _max: int = 65_535
+
+
+class Percent(IntRangeBase):
+ _min: int = 0
+ _max: int = 100
+
+
+class PortNumber(IntRangeBase):
+ _min: int = 1
+ _max: int = 65_535
+
+ @classmethod
+ def from_str(cls: Type["PortNumber"], port: str, object_path: str = "/") -> "PortNumber":
+ try:
+ return cls(int(port), object_path)
+ except ValueError as e:
+ raise ValueError(f"invalid port number {port}") from e
+
+
+class SizeUnit(UnitBase):
+ _units = {"B": 1, "K": 1024, "M": 1024**2, "G": 1024**3}
+
+ def bytes(self) -> int:
+ return self._value
+
+ def mbytes(self) -> int:
+ return self._value // 1024**2
+
+
+class TimeUnit(UnitBase):
+ _units = {"us": 1, "ms": 10**3, "s": 10**6, "m": 60 * 10**6, "h": 3600 * 10**6, "d": 24 * 3600 * 10**6}
+
+ def seconds(self) -> int:
+ return self._value // 1000**2
+
+ def millis(self) -> int:
+ return self._value // 1000
+
+ def micros(self) -> int:
+ return self._value
+
+
+class DomainName(StrBase):
+ """
+ Fully or partially qualified domain name.
+ """
+
+ _punycode: str
+ _re = re.compile(
+ r"(?=^.{,253}\.?$)" # max 253 chars
+ r"(^(?!\.)" # do not start name with dot
+ r"((?!-)" # do not start label with hyphen
+ r"\.?[a-zA-Z0-9-]{,62}" # max 63 chars in label
+ r"[a-zA-Z0-9])+" # do not end label with hyphen
+ r"\.?$)" # end with or without '.'
+ r"|^\.$" # allow root-zone
+ )
+
+ def __init__(self, source_value: Any, object_path: str = "/") -> None:
+ super().__init__(source_value)
+ if isinstance(source_value, str):
+ try:
+ punycode = source_value.encode("idna").decode("utf-8") if source_value != "." else "."
+ except ValueError:
+ raise ValueError(
+ f"conversion of '{source_value}' to IDN punycode representation failed",
+ object_path,
+ )
+
+ if type(self)._re.match(punycode):
+ self._value = source_value
+ self._punycode = punycode
+ else:
+ raise ValueError(
+ f"'{source_value}' represented in punycode '{punycode}' does not match '{self._re.pattern}' pattern",
+ object_path,
+ )
+ else:
+ raise ValueError(
+ "Unexpected value for '<domain-name>'."
+ f" Expected string, got '{source_value}' with type '{type(source_value)}'",
+ object_path,
+ )
+
+ def __hash__(self) -> int:
+ if self._value.endswith("."):
+ return hash(self._value)
+ return hash(f"{self._value}.")
+
+ def punycode(self) -> str:
+ return self._punycode
+
+ @classmethod
+ def json_schema(cls: Type["DomainName"]) -> Dict[Any, Any]:
+ return {"type": "string", "pattern": rf"{cls._re.pattern}"}
+
+
+class InterfaceName(PatternBase):
+ _re = re.compile(r"^[a-zA-Z0-9]+(?:[-_][a-zA-Z0-9]+)*$")
+
+
+class IDPattern(PatternBase):
+ """
+ Alphanumerical ID for identifying systemd slice.
+ """
+
+ _re = re.compile(r"[a-zA-Z0-9]+")
+
+
+class InterfacePort(StrBase):
+ addr: Union[None, ipaddress.IPv4Address, ipaddress.IPv6Address] = None
+ if_name: Optional[InterfaceName] = None
+ port: PortNumber
+
+ def __init__(self, source_value: Any, object_path: str = "/") -> None:
+ super().__init__(source_value)
+ if isinstance(source_value, str):
+ parts = source_value.split("@")
+ if len(parts) == 2:
+ try:
+ self.addr = ipaddress.ip_address(parts[0])
+ except ValueError as e1:
+ try:
+ self.if_name = InterfaceName(parts[0])
+ except ValueError as e2:
+ raise ValueError(f"expected IP address or interface name, got '{parts[0]}'.") from e1 and e2
+ self.port = PortNumber.from_str(parts[1], object_path)
+ else:
+ raise ValueError(f"expected '<ip-address|interface-name>@<port>', got '{source_value}'.")
+ self._value = source_value
+ else:
+ raise ValueError(
+ "Unexpected value for '<ip-address|interface-name>@<port>'."
+ f" Expected string, got '{source_value}' with type '{type(source_value)}'",
+ object_path,
+ )
+
+
+class InterfaceOptionalPort(StrBase):
+ addr: Union[None, ipaddress.IPv4Address, ipaddress.IPv6Address] = None
+ if_name: Optional[InterfaceName] = None
+ port: Optional[PortNumber] = None
+
+ def __init__(self, source_value: Any, object_path: str = "/") -> None:
+ super().__init__(source_value)
+ if isinstance(source_value, str):
+ parts = source_value.split("@")
+ if 0 < len(parts) < 3:
+ try:
+ self.addr = ipaddress.ip_address(parts[0])
+ except ValueError as e1:
+ try:
+ self.if_name = InterfaceName(parts[0])
+ except ValueError as e2:
+ raise ValueError(f"expected IP address or interface name, got '{parts[0]}'.") from e1 and e2
+ if len(parts) == 2:
+ self.port = PortNumber.from_str(parts[1], object_path)
+ else:
+ raise ValueError(f"expected '<ip-address|interface-name>[@<port>]', got '{parts}'.")
+ self._value = source_value
+ else:
+ raise ValueError(
+ "Unexpected value for '<ip-address|interface-name>[@<port>]'."
+ f" Expected string, got '{source_value}' with type '{type(source_value)}'",
+ object_path,
+ )
+
+
+class IPAddressPort(StrBase):
+ addr: Union[ipaddress.IPv4Address, ipaddress.IPv6Address]
+ port: PortNumber
+
+ def __init__(self, source_value: Any, object_path: str = "/") -> None:
+ super().__init__(source_value)
+ if isinstance(source_value, str):
+ parts = source_value.split("@")
+ if len(parts) == 2:
+ self.port = PortNumber.from_str(parts[1], object_path)
+ try:
+ self.addr = ipaddress.ip_address(parts[0])
+ except ValueError as e:
+ raise ValueError(f"failed to parse IP address '{parts[0]}'.") from e
+ else:
+ raise ValueError(f"expected '<ip-address>@<port>', got '{source_value}'.")
+ self._value = source_value
+ else:
+ raise ValueError(
+ "Unexpected value for '<ip-address>@<port>'."
+ f" Expected string, got '{source_value}' with type '{type(source_value)}'"
+ )
+
+
+class IPAddressOptionalPort(StrBase):
+ addr: Union[ipaddress.IPv4Address, ipaddress.IPv6Address]
+ port: Optional[PortNumber] = None
+
+ def __init__(self, source_value: Any, object_path: str = "/") -> None:
+ super().__init__(source_value)
+ if isinstance(source_value, str):
+ parts = source_value.split("@")
+ if 0 < len(parts) < 3:
+ try:
+ self.addr = ipaddress.ip_address(parts[0])
+ except ValueError as e:
+ raise ValueError(f"failed to parse IP address '{parts[0]}'.") from e
+ if len(parts) == 2:
+ self.port = PortNumber.from_str(parts[1], object_path)
+ else:
+ raise ValueError(f"expected '<ip-address>[@<port>]', got '{parts}'.")
+ self._value = source_value
+ else:
+ raise ValueError(
+ "Unexpected value for a '<ip-address>[@<port>]'."
+ f" Expected string, got '{source_value}' with type '{type(source_value)}'",
+ object_path,
+ )
+
+
+class IPv4Address(BaseValueType):
+ _value: ipaddress.IPv4Address
+
+ def __init__(self, source_value: Any, object_path: str = "/") -> None:
+ super().__init__(source_value)
+ if isinstance(source_value, str):
+ try:
+ self._value: ipaddress.IPv4Address = ipaddress.IPv4Address(source_value)
+ except ValueError as e:
+ raise ValueError("failed to parse IPv4 address.") from e
+ else:
+ raise ValueError(
+ "Unexpected value for a IPv4 address."
+ f" Expected string, got '{source_value}' with type '{type(source_value)}'",
+ object_path,
+ )
+
+ def to_std(self) -> ipaddress.IPv4Address:
+ return self._value
+
+ def __str__(self) -> str:
+ return str(self._value)
+
+ def __int__(self) -> int:
+ raise ValueError("Can't convert IPv4 address to an integer")
+
+ def __eq__(self, o: object) -> bool:
+ """
+ Two instances of IPv4Address are equal when they represent same IPv4 address as string.
+ """
+ return isinstance(o, IPv4Address) and str(o._value) == str(self._value)
+
+ def serialize(self) -> Any:
+ return str(self._value)
+
+ @classmethod
+ def json_schema(cls: Type["IPv4Address"]) -> Dict[Any, Any]:
+ return {
+ "type": "string",
+ }
+
+
+class IPv6Address(BaseValueType):
+ _value: ipaddress.IPv6Address
+
+ def __init__(self, source_value: Any, object_path: str = "/") -> None:
+ super().__init__(source_value)
+ if isinstance(source_value, str):
+ try:
+ self._value: ipaddress.IPv6Address = ipaddress.IPv6Address(source_value)
+ except ValueError as e:
+ raise ValueError("failed to parse IPv6 address.") from e
+ else:
+ raise ValueError(
+ "Unexpected value for a IPv6 address."
+ f" Expected string, got '{source_value}' with type '{type(source_value)}'",
+ object_path,
+ )
+
+ def to_std(self) -> ipaddress.IPv6Address:
+ return self._value
+
+ def __str__(self) -> str:
+ return str(self._value)
+
+ def __int__(self) -> int:
+ raise ValueError("Can't convert IPv6 address to an integer")
+
+ def __eq__(self, o: object) -> bool:
+ """
+ Two instances of IPv6Address are equal when they represent same IPv6 address as string.
+ """
+ return isinstance(o, IPv6Address) and str(o._value) == str(self._value)
+
+ def serialize(self) -> Any:
+ return str(self._value)
+
+ @classmethod
+ def json_schema(cls: Type["IPv6Address"]) -> Dict[Any, Any]:
+ return {
+ "type": "string",
+ }
+
+
+IPAddress = Union[IPv4Address, IPv6Address]
+
+
+class IPNetwork(BaseValueType):
+ _value: Union[ipaddress.IPv4Network, ipaddress.IPv6Network]
+
+ def __init__(self, source_value: Any, object_path: str = "/") -> None:
+ super().__init__(source_value)
+ if isinstance(source_value, str):
+ try:
+ self._value: Union[ipaddress.IPv4Network, ipaddress.IPv6Network] = ipaddress.ip_network(source_value)
+ except ValueError as e:
+ raise ValueError("failed to parse IP network.") from e
+ else:
+ raise ValueError(
+ "Unexpected value for a network subnet."
+ f" Expected string, got '{source_value}' with type '{type(source_value)}'"
+ )
+
+ def to_std(self) -> Union[ipaddress.IPv4Network, ipaddress.IPv6Network]:
+ return self._value
+
+ def __str__(self) -> str:
+ return self._value.with_prefixlen
+
+ def __int__(self) -> int:
+ raise ValueError("Can't convert network prefix to an integer")
+
+ def serialize(self) -> Any:
+ return self._value.with_prefixlen
+
+ @classmethod
+ def json_schema(cls: Type["IPNetwork"]) -> Dict[Any, Any]:
+ return {
+ "type": "string",
+ }
+
+
+class IPv6Network96(BaseValueType):
+ _value: ipaddress.IPv6Network
+
+ def __init__(self, source_value: Any, object_path: str = "/") -> None:
+ super().__init__(source_value, object_path=object_path)
+ if isinstance(source_value, str):
+ try:
+ self._value: ipaddress.IPv6Network = ipaddress.IPv6Network(source_value)
+ except ValueError as e:
+ raise ValueError("failed to parse IPv6 /96 network.") from e
+
+ if self._value.prefixlen == 128:
+ raise ValueError(
+ "Expected IPv6 network address with /96 prefix length."
+ " Submitted address has been interpreted as /128."
+ " Maybe, you forgot to add /96 after the base address?"
+ )
+
+ if self._value.prefixlen != 96:
+ raise ValueError(
+ "expected IPv6 network address with /96 prefix length."
+ f" Got prefix lenght of {self._value.prefixlen}"
+ )
+ else:
+ raise ValueError(
+ "Unexpected value for a network subnet."
+ f" Expected string, got '{source_value}' with type '{type(source_value)}'"
+ )
+
+ def __str__(self) -> str:
+ return self._value.with_prefixlen
+
+ def __int__(self) -> int:
+ raise ValueError("Can't convert network prefix to an integer")
+
+ def __eq__(self, o: object) -> bool:
+ return isinstance(o, IPv6Network96) and o._value == self._value
+
+ def serialize(self) -> Any:
+ return self._value.with_prefixlen
+
+ def to_std(self) -> ipaddress.IPv6Network:
+ return self._value
+
+ @classmethod
+ def json_schema(cls: Type["IPv6Network96"]) -> Dict[Any, Any]:
+ return {"type": "string"}
diff --git a/manager/knot_resolver_manager/datamodel/view_schema.py b/manager/knot_resolver_manager/datamodel/view_schema.py
new file mode 100644
index 00000000..74bf5a11
--- /dev/null
+++ b/manager/knot_resolver_manager/datamodel/view_schema.py
@@ -0,0 +1,40 @@
+from typing import List, Optional
+
+from typing_extensions import Literal
+
+from knot_resolver_manager.datamodel.types import IDPattern, IPNetwork
+from knot_resolver_manager.utils.modeling import ConfigSchema
+
+
+class ViewOptionsSchema(ConfigSchema):
+ """
+ Configuration options for clients identified by the view.
+
+ ---
+ minimize: Send minimum amount of information in recursive queries to enhance privacy.
+ dns64: Enable/disable DNS64.
+ """
+
+ minimize: bool = True
+ dns64: bool = True
+
+
+class ViewSchema(ConfigSchema):
+ """
+ Configuration parameters that allow you to create personalized policy rules and other.
+
+ ---
+ subnets: Identifies the client based on his subnet.
+ tags: Tags to link with other policy rules.
+ answer: Direct approach how to handle request from clients identified by the view.
+ options: Configuration options for clients identified by the view.
+ """
+
+ subnets: List[IPNetwork]
+ tags: Optional[List[IDPattern]] = None
+ answer: Optional[Literal["allow", "refused", "noanswer"]] = None
+ options: ViewOptionsSchema = ViewOptionsSchema()
+
+ def _validate(self) -> None:
+ if bool(self.tags) == bool(self.answer):
+ raise ValueError("only one of 'tags' and 'answer' options must be configured")
diff --git a/manager/knot_resolver_manager/datamodel/webmgmt_schema.py b/manager/knot_resolver_manager/datamodel/webmgmt_schema.py
new file mode 100644
index 00000000..41cc3387
--- /dev/null
+++ b/manager/knot_resolver_manager/datamodel/webmgmt_schema.py
@@ -0,0 +1,27 @@
+from typing import Optional
+
+from knot_resolver_manager.datamodel.types import File, FilePath, InterfacePort
+from knot_resolver_manager.utils.modeling import ConfigSchema
+
+
+class WebmgmtSchema(ConfigSchema):
+ """
+ Configuration of legacy web management endpoint.
+
+ ---
+ unix_socket: Path to unix domain socket to listen to.
+ interface: IP address or interface name with port number to listen to.
+ tls: Enable/disable TLS.
+ cert_file: Path to certificate file.
+ key_file: Path to certificate key.
+ """
+
+ unix_socket: Optional[FilePath] = None
+ interface: Optional[InterfacePort] = None
+ tls: bool = False
+ cert_file: Optional[File] = None
+ key_file: Optional[File] = None
+
+ def _validate(self) -> None:
+ if bool(self.unix_socket) == bool(self.interface):
+ raise ValueError("One of 'interface' or 'unix-socket' must be configured.")
diff --git a/manager/knot_resolver_manager/exceptions.py b/manager/knot_resolver_manager/exceptions.py
new file mode 100644
index 00000000..5b05d98e
--- /dev/null
+++ b/manager/knot_resolver_manager/exceptions.py
@@ -0,0 +1,28 @@
+from typing import List
+
+
+class CancelStartupExecInsteadException(Exception):
+ """
+ Exception used for terminating system startup and instead
+ causing an exec of something else. Could be used by subprocess
+ controllers such as supervisord to allow them to run as top-level
+ process in a process tree.
+ """
+
+ def __init__(self, exec_args: List[str], *args: object) -> None:
+ self.exec_args = exec_args
+ super().__init__(*args)
+
+
+class KresManagerException(Exception):
+ """
+ Base class for all custom exceptions we use in our code
+ """
+
+
+class SubprocessControllerException(KresManagerException):
+ pass
+
+
+class SubprocessControllerTimeoutException(KresManagerException):
+ pass
diff --git a/manager/knot_resolver_manager/kres_manager.py b/manager/knot_resolver_manager/kres_manager.py
new file mode 100644
index 00000000..072c73fc
--- /dev/null
+++ b/manager/knot_resolver_manager/kres_manager.py
@@ -0,0 +1,306 @@
+import asyncio
+import logging
+import sys
+import time
+from subprocess import SubprocessError
+from typing import Callable, List, Optional
+
+from knot_resolver_manager.compat.asyncio import create_task
+from knot_resolver_manager.config_store import ConfigStore
+from knot_resolver_manager.constants import (
+ FIX_COUNTER_DECREASE_INTERVAL_SEC,
+ MANAGER_FIX_ATTEMPT_MAX_COUNTER,
+ WATCHDOG_INTERVAL,
+)
+from knot_resolver_manager.exceptions import SubprocessControllerException
+from knot_resolver_manager.kresd_controller.interface import (
+ Subprocess,
+ SubprocessController,
+ SubprocessStatus,
+ SubprocessType,
+)
+from knot_resolver_manager.utils.functional import Result
+from knot_resolver_manager.utils.modeling.types import NoneType
+
+from .datamodel import KresConfig
+
+logger = logging.getLogger(__name__)
+
+
+class _FixCounter:
+ def __init__(self) -> None:
+ self._counter = 0
+ self._timestamp = time.time()
+
+ def increase(self) -> None:
+ self._counter += 1
+ self._timestamp = time.time()
+
+ def try_decrease(self) -> None:
+ if time.time() - self._timestamp > FIX_COUNTER_DECREASE_INTERVAL_SEC:
+ if self._counter > 0:
+ logger.info(
+ f"Enough time has passed since last detected instability, decreasing fix attempt counter to {self._counter}"
+ )
+ self._counter -= 1
+ self._timestamp = time.time()
+
+ def __str__(self) -> str:
+ return str(self._counter)
+
+ def is_too_high(self) -> bool:
+ return self._counter >= MANAGER_FIX_ATTEMPT_MAX_COUNTER
+
+
+async def _deny_max_worker_changes(config_old: KresConfig, config_new: KresConfig) -> Result[None, str]:
+ if config_old.max_workers != config_new.max_workers:
+ return Result.err("Changing manager's `rundir` during runtime is not allowed.")
+
+ return Result.ok(None)
+
+
+class KresManager: # pylint: disable=too-many-instance-attributes
+ """
+ Core of the whole operation. Orchestrates individual instances under some
+ service manager like systemd.
+
+ Instantiate with `KresManager.create()`, not with the usual constructor!
+ """
+
+ def __init__(self, shutdown_trigger: Callable[[int], None], _i_know_what_i_am_doing: bool = False):
+ if not _i_know_what_i_am_doing:
+ logger.error(
+ "Trying to create an instance of KresManager using normal constructor. Please use "
+ "`KresManager.get_instance()` instead"
+ )
+ assert False
+
+ self._workers: List[Subprocess] = []
+ self._gc: Optional[Subprocess] = None
+ self._manager_lock = asyncio.Lock()
+ self._controller: SubprocessController
+ self._watchdog_task: Optional["asyncio.Task[None]"] = None
+ self._fix_counter: _FixCounter = _FixCounter()
+ self._config_store: ConfigStore
+ self._shutdown_trigger: Callable[[int], None] = shutdown_trigger
+
+ @staticmethod
+ async def create(
+ subprocess_controller: SubprocessController,
+ config_store: ConfigStore,
+ shutdown_trigger: Callable[[int], None],
+ ) -> "KresManager":
+ """
+ Creates new instance of KresManager.
+ """
+
+ inst = KresManager(shutdown_trigger, _i_know_what_i_am_doing=True)
+ await inst._async_init(subprocess_controller, config_store) # pylint: disable=protected-access
+ return inst
+
+ async def _async_init(self, subprocess_controller: SubprocessController, config_store: ConfigStore) -> None:
+ self._controller = subprocess_controller
+ self._config_store = config_store
+
+ # initialize subprocess controller
+ logger.debug("Starting controller")
+ await self._controller.initialize_controller(config_store.get())
+ self._watchdog_task = create_task(self._watchdog())
+ logger.debug("Looking for already running workers")
+ await self._collect_already_running_workers()
+
+ # registering the function calls them immediately, therefore after this, the config is applied
+ await config_store.register_verifier(self.validate_config)
+ await config_store.register_on_change_callback(self.apply_config)
+
+ # register controller config change listeners
+ await config_store.register_verifier(_deny_max_worker_changes)
+
+ async def _spawn_new_worker(self, config: KresConfig) -> None:
+ subprocess = await self._controller.create_subprocess(config, SubprocessType.KRESD)
+ await subprocess.start()
+ self._workers.append(subprocess)
+
+ async def _stop_a_worker(self) -> None:
+ if len(self._workers) == 0:
+ raise IndexError("Can't stop a kresd when there are no running")
+
+ subprocess = self._workers.pop()
+ await subprocess.stop()
+
+ async def _collect_already_running_workers(self) -> None:
+ for subp in await self._controller.get_all_running_instances():
+ if subp.type == SubprocessType.KRESD:
+ self._workers.append(subp)
+ elif subp.type == SubprocessType.GC:
+ assert self._gc is None
+ self._gc = subp
+ else:
+ raise RuntimeError("unexpected subprocess type")
+
+ async def _rolling_restart(self, new_config: KresConfig) -> None:
+ for kresd in self._workers:
+ await kresd.apply_new_config(new_config)
+
+ async def _ensure_number_of_children(self, config: KresConfig, n: int) -> None:
+ # kill children that are not needed
+ while len(self._workers) > n:
+ await self._stop_a_worker()
+
+ # spawn new children if needed
+ while len(self._workers) < n:
+ await self._spawn_new_worker(config)
+
+ def _is_gc_running(self) -> bool:
+ return self._gc is not None
+
+ async def _start_gc(self, config: KresConfig) -> None:
+ subprocess = await self._controller.create_subprocess(config, SubprocessType.GC)
+ await subprocess.start()
+ self._gc = subprocess
+
+ async def _stop_gc(self) -> None:
+ assert self._gc is not None
+ await self._gc.stop()
+ self._gc = None
+
+ async def validate_config(self, _old: KresConfig, new: KresConfig) -> Result[NoneType, str]:
+ async with self._manager_lock:
+ logger.debug("Testing the new config with a canary process")
+ try:
+ # technically, this has side effects of leaving a new process runnning
+ # but it's practically not a problem, because
+ # if it keeps running, the config is valid and others will soon join as well
+ # if it crashes and the startup fails, then well, it's not running anymore... :)
+ await self._spawn_new_worker(new)
+ except (SubprocessError, SubprocessControllerException):
+ logger.error("Kresd with the new config failed to start, rejecting config")
+ return Result.err("canary kresd process failed to start. Config might be invalid.")
+
+ logger.debug("Canary process test passed.")
+ return Result.ok(None)
+
+ async def _reload_system_state(self) -> None:
+ async with self._manager_lock:
+ self._workers = []
+ self._gc = None
+ await self._collect_already_running_workers()
+
+ async def apply_config(self, config: KresConfig, _noretry: bool = False) -> None:
+ try:
+ async with self._manager_lock:
+ logger.debug("Applying config to all workers")
+ await self._rolling_restart(config)
+ await self._ensure_number_of_children(config, int(config.workers))
+
+ if self._is_gc_running() != bool(config.cache.garbage_collector):
+ if config.cache.garbage_collector:
+ logger.debug("Starting cache GC")
+ await self._start_gc(config)
+ else:
+ logger.debug("Stopping cache GC")
+ await self._stop_gc()
+ except SubprocessControllerException as e:
+ if _noretry:
+ raise
+ elif self._fix_counter.is_too_high():
+ logger.error(f"Failed to apply config: {e}")
+ logger.error("There have already been problems recently, refusing to try to fix it.")
+ await self.forced_shutdown() # possible improvement - the person who requested this change won't get a response this way
+ else:
+ logger.error(f"Failed to apply config: {e}")
+ logger.warning("Reloading system state and trying again.")
+ self._fix_counter.increase()
+ await self._reload_system_state()
+ await self.apply_config(config, _noretry=True)
+
+ async def stop(self):
+ if self._watchdog_task is not None:
+ self._watchdog_task.cancel() # cancel it
+ try:
+ await self._watchdog_task # and let it really finish
+ except asyncio.CancelledError:
+ pass
+
+ async with self._manager_lock:
+ # we could stop all the children one by one right now
+ # we won't do that and we leave that up to the subprocess controller to do that while it is shutting down
+ await self._controller.shutdown_controller()
+ # now, when everything is stopped, let's clean up all the remains
+ await asyncio.gather(*[w.cleanup() for w in self._workers])
+
+ async def forced_shutdown(self) -> None:
+ logger.warning("Collecting all remaining workers...")
+ await self._reload_system_state()
+ logger.warning("Terminating...")
+ self._shutdown_trigger(1)
+
+ async def _instability_handler(self) -> None:
+ if self._fix_counter.is_too_high():
+ logger.error(
+ "Already attempted to many times to fix system state. Refusing to try again and shutting down."
+ )
+ await self.forced_shutdown()
+ return
+
+ try:
+ logger.warning("Instability detected. Dropping known list of workers and reloading it from the system.")
+ self._fix_counter.increase()
+ await self._reload_system_state()
+ logger.warning("Workers reloaded. Applying old config....")
+ await self.apply_config(self._config_store.get(), _noretry=True)
+ logger.warning(f"System stability hopefully renewed. Fix attempt counter is currently {self._fix_counter}")
+ except BaseException:
+ logger.error("Failed attempting to fix an error. Forcefully shutting down.", exc_info=True)
+ await self.forced_shutdown()
+
+ async def _watchdog(self) -> None:
+ while True:
+ await asyncio.sleep(WATCHDOG_INTERVAL)
+
+ self._fix_counter.try_decrease()
+
+ try:
+ # gather current state
+ async with self._manager_lock:
+ detected_subprocesses = await self._controller.get_subprocess_status()
+ expected_ids = [x.id for x in self._workers]
+ if self._gc:
+ expected_ids.append(self._gc.id)
+ invoke_callback = False
+
+ for eid in expected_ids:
+ if eid not in detected_subprocesses:
+ logger.error("Subprocess with id '%s' was not found in the system!", eid)
+ invoke_callback = True
+ continue
+
+ if detected_subprocesses[eid] is SubprocessStatus.FAILED:
+ logger.error("Subprocess '%s' is failed.", eid)
+ invoke_callback = True
+ continue
+
+ if detected_subprocesses[eid] is SubprocessStatus.UNKNOWN:
+ logger.warning("Subprocess '%s' is in unknown state!", eid)
+
+ non_registered_ids = detected_subprocesses.keys() - set(expected_ids)
+ if len(non_registered_ids) != 0:
+ logger.error(
+ "Found additional kresd instances in the system, which shouldn't be there - %s",
+ non_registered_ids,
+ )
+ invoke_callback = True
+
+ except asyncio.CancelledError:
+ raise
+ except BaseException:
+ invoke_callback = True
+ logger.error("Knot Resolver watchdog failed with an unexpected exception.", exc_info=True)
+
+ if invoke_callback:
+ try:
+ await self._instability_handler()
+ except Exception:
+ logger.error("Watchdog failed while invoking instability callback", exc_info=True)
+ logger.error("Violently terminating!")
+ sys.exit(1)
diff --git a/manager/knot_resolver_manager/kresd_controller/__init__.py b/manager/knot_resolver_manager/kresd_controller/__init__.py
new file mode 100644
index 00000000..6c5053e2
--- /dev/null
+++ b/manager/knot_resolver_manager/kresd_controller/__init__.py
@@ -0,0 +1,93 @@
+"""
+This file contains autodetection logic for available subprocess controllers. Because we have to catch errors
+from imports, they are located in functions which are invoked at the end of this file.
+
+We supported multiple subprocess controllers while developing it. It now all converged onto just supervisord.
+The interface however remains so that different controllers can be added in the future.
+"""
+# pylint: disable=import-outside-toplevel
+
+import asyncio
+import logging
+from typing import List, Optional
+
+from knot_resolver_manager.datamodel.config_schema import KresConfig
+from knot_resolver_manager.kresd_controller.interface import SubprocessController
+
+logger = logging.getLogger(__name__)
+
+"""
+List of all subprocess controllers that are available in order of priority.
+It is filled dynamically based on available modules that do not fail to import.
+"""
+_registered_controllers: List[SubprocessController] = []
+
+
+def try_supervisord():
+ """
+ Attempt to load supervisord controllers.
+ """
+ try:
+ from knot_resolver_manager.kresd_controller.supervisord import SupervisordSubprocessController
+
+ _registered_controllers.append(SupervisordSubprocessController())
+ except ImportError:
+ logger.error("Failed to import modules related to supervisord service manager", exc_info=True)
+
+
+async def get_best_controller_implementation(config: KresConfig) -> SubprocessController:
+ logger.info("Starting service manager auto-selection...")
+
+ if len(_registered_controllers) == 0:
+ logger.error("No controllers are available! Did you install all dependencies?")
+ raise LookupError("No service managers available!")
+
+ # check all controllers concurrently
+ res = await asyncio.gather(*(cont.is_controller_available(config) for cont in _registered_controllers))
+ logger.info(
+ "Available subprocess controllers are %s",
+ str(tuple((str(c) for r, c in zip(res, _registered_controllers) if r))),
+ )
+
+ # take the first one on the list which is available
+ for avail, controller in zip(res, _registered_controllers):
+ if avail:
+ logger.info("Selected controller '%s'", str(controller))
+ return controller
+
+ # or fail
+ raise LookupError("Can't find any available service manager!")
+
+
+def list_controller_names() -> List[str]:
+ """
+ Returns a list of names of registered controllers. The listed controllers are not necessarly functional.
+ """
+
+ return [str(controller) for controller in sorted(_registered_controllers, key=str)]
+
+
+async def get_controller_by_name(config: KresConfig, name: str) -> SubprocessController:
+ logger.debug("Subprocess controller selected manualy by the user, testing feasibility...")
+
+ controller: Optional[SubprocessController] = None
+ for c in sorted(_registered_controllers, key=str):
+ if str(c).startswith(name):
+ if str(c) != name:
+ logger.debug("Assuming '%s' is a shortcut for '%s'", name, str(c))
+ controller = c
+ break
+
+ if controller is None:
+ logger.error("Subprocess controller with name '%s' was not found", name)
+ raise LookupError(f"No subprocess controller named '{name}' found")
+
+ if await controller.is_controller_available(config):
+ logger.info("Selected controller '%s'", str(controller))
+ return controller
+ else:
+ raise LookupError("The selected subprocess controller is not available for use on this system.")
+
+
+# run the imports on module load
+try_supervisord()
diff --git a/manager/knot_resolver_manager/kresd_controller/interface.py b/manager/knot_resolver_manager/kresd_controller/interface.py
new file mode 100644
index 00000000..c0d22a52
--- /dev/null
+++ b/manager/knot_resolver_manager/kresd_controller/interface.py
@@ -0,0 +1,251 @@
+import asyncio
+import itertools
+import logging
+import sys
+from abc import ABC, abstractmethod # pylint: disable=no-name-in-module
+from enum import Enum, auto
+from typing import Dict, Iterable, Optional, Type, TypeVar
+from weakref import WeakValueDictionary
+
+from knot_resolver_manager.constants import kresd_config_file
+from knot_resolver_manager.datamodel.config_schema import KresConfig
+from knot_resolver_manager.exceptions import SubprocessControllerException
+from knot_resolver_manager.statistics import register_resolver_metrics_for, unregister_resolver_metrics_for
+from knot_resolver_manager.utils.async_utils import writefile
+
+logger = logging.getLogger(__name__)
+
+
+class SubprocessType(Enum):
+ KRESD = auto()
+ GC = auto()
+
+
+T = TypeVar("T", bound="KresID")
+
+
+class KresID:
+ """
+ ID object used for identifying subprocesses.
+ """
+
+ _used: "Dict[SubprocessType, WeakValueDictionary[int, KresID]]" = {k: WeakValueDictionary() for k in SubprocessType}
+
+ @classmethod
+ def alloc(cls: Type[T], typ: SubprocessType) -> T:
+ # find free ID closest to zero
+ for i in itertools.count(start=0, step=1):
+ if i not in cls._used[typ]:
+ res = cls.new(typ, i)
+ return res
+
+ raise RuntimeError("Reached an end of an infinite loop. How?")
+
+ @classmethod
+ def new(cls: "Type[T]", typ: SubprocessType, n: int) -> "T":
+ if n in cls._used[typ]:
+ # Ignoring typing here, because I can't find a way how to make the _used dict
+ # typed based on subclass. I am not even sure that it's different between subclasses,
+ # it's probably still the same dict. But we don't really care about it
+ return cls._used[typ][n] # type: ignore
+ else:
+ val = cls(typ, n, _i_know_what_i_am_doing=True)
+ cls._used[typ][n] = val
+ return val
+
+ def __init__(self, typ: SubprocessType, n: int, _i_know_what_i_am_doing: bool = False):
+ if not _i_know_what_i_am_doing:
+ raise RuntimeError("Don't do this. You seem to have no idea what it does")
+
+ self._id = n
+ self._type = typ
+
+ @property
+ def subprocess_type(self) -> SubprocessType:
+ return self._type
+
+ def __repr__(self) -> str:
+ return f"KresID({self})"
+
+ def __hash__(self) -> int:
+ return self._id
+
+ def __eq__(self, o: object) -> bool:
+ if isinstance(o, KresID):
+ return self._id == o._id
+ return False
+
+ def __str__(self) -> str:
+ """
+ Returns string representation of the ID usable directly in the underlying service manager
+ """
+ raise NotImplementedError()
+
+ @staticmethod
+ def from_string(val: str) -> "KresID":
+ """
+ Inverse of __str__
+ """
+ raise NotImplementedError()
+
+ def __int__(self) -> int:
+ return self._id
+
+
+class Subprocess(ABC):
+ """
+ One SubprocessInstance corresponds to one manager's subprocess
+ """
+
+ def __init__(self, config: KresConfig, kid: KresID) -> None:
+ self._id = kid
+ self._config = config
+ self._metrics_registered: bool = False
+
+ async def start(self) -> None:
+ # create config file
+ lua_config = self._config.render_lua()
+ await writefile(kresd_config_file(self._config, self.id), lua_config)
+ try:
+ await self._start()
+ register_resolver_metrics_for(self)
+ self._metrics_registered = True
+ except SubprocessControllerException as e:
+ kresd_config_file(self._config, self.id).unlink()
+ raise e
+
+ async def apply_new_config(self, new_config: KresConfig) -> None:
+ self._config = new_config
+ # update config file
+ logger.debug(f"Writing config file for {self.id}")
+ lua_config = new_config.render_lua()
+ await writefile(kresd_config_file(new_config, self.id), lua_config)
+ # update runtime status
+ logger.debug(f"Restarting {self.id}")
+ await self._restart()
+
+ async def stop(self) -> None:
+ if self._metrics_registered:
+ unregister_resolver_metrics_for(self)
+ await self._stop()
+ await self.cleanup()
+
+ async def cleanup(self) -> None:
+ """
+ Remove temporary files and all traces of this instance running. It is NOT SAFE to call this while
+ the kresd is running, because it will break automatic restarts (at the very least).
+ """
+ kresd_config_file(self._config, self.id).unlink()
+
+ def __eq__(self, o: object) -> bool:
+ return isinstance(o, type(self)) and o.type == self.type and o.id == self.id
+
+ def __hash__(self) -> int:
+ return hash(type(self)) ^ hash(self.type) ^ hash(self.id)
+
+ @abstractmethod
+ async def _start(self) -> None:
+ pass
+
+ @abstractmethod
+ async def _stop(self) -> None:
+ pass
+
+ @abstractmethod
+ async def _restart(self) -> None:
+ pass
+
+ @property
+ def type(self) -> SubprocessType:
+ return self.id.subprocess_type
+
+ @property
+ def id(self) -> KresID:
+ return self._id
+
+ async def command(self, cmd: str) -> str:
+ reader: asyncio.StreamReader
+ writer: Optional[asyncio.StreamWriter] = None
+ try:
+ reader, writer = await asyncio.open_unix_connection(f"./control/{int(self.id)}")
+
+ # drop prompt
+ _ = await reader.read(2)
+
+ # write command
+ writer.write(cmd.encode("utf8"))
+ writer.write(b"\n")
+ await writer.drain()
+
+ # read result
+ result_bytes = await reader.readline()
+ return result_bytes.decode("utf8")[:-1] # strip trailing newline
+
+ finally:
+ if writer is not None:
+ writer.close()
+
+ # proper closing of the socket is only implemented in later versions of python
+ if sys.version_info.minor >= 7:
+ await writer.wait_closed() # type: ignore
+
+
+class SubprocessStatus(Enum):
+ RUNNING = auto()
+ FAILED = auto()
+ UNKNOWN = auto()
+
+
+class SubprocessController(ABC):
+ """
+ The common Subprocess Controller interface. This is what KresManager requires and what has to be implemented by all
+ controllers.
+ """
+
+ @abstractmethod
+ async def is_controller_available(self, config: KresConfig) -> bool:
+ """
+ Returns bool, whether the controller is available with the given config
+ """
+
+ @abstractmethod
+ async def initialize_controller(self, config: KresConfig) -> None:
+ """
+ Should be called when we want to really start using the controller with a specific configuration
+ """
+
+ @abstractmethod
+ async def get_all_running_instances(self) -> Iterable[Subprocess]:
+ """
+
+ Must NOT be called before initialize_controller()
+ """
+
+ @abstractmethod
+ async def shutdown_controller(self) -> None:
+ """
+ Called when the manager is gracefully shutting down. Allows us to stop
+ the service manager process or simply cleanup, so that we don't reuse
+ the same resources in a new run.
+
+ Must NOT be called before initialize_controller()
+ """
+
+ @abstractmethod
+ async def create_subprocess(self, subprocess_config: KresConfig, subprocess_type: SubprocessType) -> Subprocess:
+ """
+ Return a Subprocess object which can be operated on. The subprocess is not
+ started or in any way active after this call. That has to be performaed manually
+ using the returned object itself.
+
+ Must NOT be called before initialize_controller()
+ """
+
+ @abstractmethod
+ async def get_subprocess_status(self) -> Dict[KresID, SubprocessStatus]:
+ """
+ Get a status of running subprocesses as seen by the controller. This method actively polls
+ for information.
+
+ Must NOT be called before initialize_controller()
+ """
diff --git a/manager/knot_resolver_manager/kresd_controller/supervisord/__init__.py b/manager/knot_resolver_manager/kresd_controller/supervisord/__init__.py
new file mode 100644
index 00000000..f9d3171f
--- /dev/null
+++ b/manager/knot_resolver_manager/kresd_controller/supervisord/__init__.py
@@ -0,0 +1,269 @@
+import logging
+from os import kill # pylint: disable=[no-name-in-module]
+from pathlib import Path
+from typing import Any, Dict, Iterable, NoReturn, Optional, Union, cast
+from xmlrpc.client import Fault, ServerProxy
+
+import supervisor.xmlrpc # type: ignore[import]
+
+from knot_resolver_manager.compat.asyncio import async_in_a_thread
+from knot_resolver_manager.constants import supervisord_config_file, supervisord_pid_file, supervisord_sock_file
+from knot_resolver_manager.datamodel.config_schema import KresConfig
+from knot_resolver_manager.exceptions import CancelStartupExecInsteadException, SubprocessControllerException
+from knot_resolver_manager.kresd_controller.interface import (
+ KresID,
+ Subprocess,
+ SubprocessController,
+ SubprocessStatus,
+ SubprocessType,
+)
+from knot_resolver_manager.kresd_controller.supervisord.config_file import SupervisordKresID, write_config_file
+from knot_resolver_manager.utils import which
+from knot_resolver_manager.utils.async_utils import call, readfile
+
+logger = logging.getLogger(__name__)
+
+
+async def _start_supervisord(config: KresConfig) -> None:
+ logger.debug("Writing supervisord config")
+ await write_config_file(config)
+ logger.debug("Starting supervisord")
+ res = await call(["supervisord", "--configuration", str(supervisord_config_file(config).absolute())])
+ if res != 0:
+ raise SubprocessControllerException(f"Supervisord exited with exit code {res}")
+
+
+async def _exec_supervisord(config: KresConfig) -> NoReturn:
+ logger.debug("Writing supervisord config")
+ await write_config_file(config)
+ logger.debug("Execing supervisord")
+ raise CancelStartupExecInsteadException(
+ [
+ str(which.which("supervisord")),
+ "supervisord",
+ "--configuration",
+ str(supervisord_config_file(config).absolute()),
+ ]
+ )
+
+
+async def _reload_supervisord(config: KresConfig) -> None:
+ await write_config_file(config)
+ try:
+ supervisord = _create_supervisord_proxy(config)
+ supervisord.reloadConfig()
+ except Fault as e:
+ raise SubprocessControllerException("supervisord reload failed") from e
+
+
+@async_in_a_thread
+def _stop_supervisord(config: KresConfig) -> None:
+ supervisord = _create_supervisord_proxy(config)
+ # pid = supervisord.getPID()
+ try:
+ # we might be trying to shut down supervisord at a moment, when it's waiting
+ # for us to stop. Therefore, this shutdown request for supervisord might
+ # die and it's not a problem.
+ supervisord.shutdown()
+ except Fault as e:
+ if e.faultCode == 6 and e.faultString == "SHUTDOWN_STATE":
+ # supervisord is already stopping, so it's fine
+ pass
+ else:
+ # something wrong happened, let's be loud about it
+ raise
+
+ # We could remove the configuration, but there is actually no specific need to do so.
+ # If we leave it behind, someone might find it and use it to start us from scratch again,
+ # which is perfectly fine.
+ # supervisord_config_file(config).unlink()
+
+
+async def _is_supervisord_available() -> bool:
+ # yes, it is! The code in this file wouldn't be running without it due to imports :)
+
+ # so let's just check that we can find supervisord and supervisorctl binaries
+ try:
+ which.which("supervisord")
+ which.which("supervisorctl")
+ except RuntimeError:
+ logger.error("Failed to find supervisord or supervisorctl executables in $PATH")
+ return False
+
+ return True
+
+
+async def _get_supervisord_pid(config: KresConfig) -> Optional[int]:
+ if not Path(supervisord_pid_file(config)).exists():
+ return None
+
+ return int(await readfile(supervisord_pid_file(config)))
+
+
+def _is_process_runinng(pid: int) -> bool:
+ try:
+ # kill with signal 0 is a safe way to test that a process exists
+ kill(pid, 0)
+ return True
+ except ProcessLookupError:
+ return False
+
+
+async def _is_supervisord_running(config: KresConfig) -> bool:
+ pid = await _get_supervisord_pid(config)
+ if pid is None:
+ return False
+ elif not _is_process_runinng(pid):
+ supervisord_pid_file(config).unlink()
+ return False
+ else:
+ return True
+
+
+def _create_proxy(config: KresConfig) -> ServerProxy:
+ return ServerProxy(
+ "http://127.0.0.1",
+ transport=supervisor.xmlrpc.SupervisorTransport(
+ None, None, serverurl="unix://" + str(supervisord_sock_file(config))
+ ),
+ )
+
+
+def _create_supervisord_proxy(config: KresConfig) -> Any:
+ proxy = _create_proxy(config)
+ return getattr(proxy, "supervisor")
+
+
+def _create_fast_proxy(config: KresConfig) -> Any:
+ proxy = _create_proxy(config)
+ return getattr(proxy, "fast")
+
+
+def _list_running_subprocesses(config: KresConfig) -> Dict[SupervisordKresID, SubprocessStatus]:
+ supervisord = _create_supervisord_proxy(config)
+ processes: Any = supervisord.getAllProcessInfo()
+
+ def convert(proc: Any) -> SubprocessStatus:
+ conversion_tbl = {
+ # "STOPPED": None, # filtered out elsewhere
+ "STARTING": SubprocessStatus.RUNNING,
+ "RUNNING": SubprocessStatus.RUNNING,
+ "BACKOFF": SubprocessStatus.RUNNING,
+ "STOPPING": SubprocessStatus.RUNNING,
+ "EXITED": SubprocessStatus.FAILED,
+ "FATAL": SubprocessStatus.FAILED,
+ "UNKNOWN": SubprocessStatus.UNKNOWN,
+ }
+
+ if proc["statename"] in conversion_tbl:
+ status = conversion_tbl[proc["statename"]]
+ else:
+ logger.warning(f"Unknown supervisord process state {proc['statename']}")
+ status = SubprocessStatus.UNKNOWN
+ return status
+
+ # there will be a manager process as well, but we don't want to report anything on ourselves
+ processes = [pr for pr in processes if pr["name"] != "manager"]
+
+ # convert all the names
+ return {
+ SupervisordKresID.from_string(f"{pr['group']}:{pr['name']}"): convert(pr)
+ for pr in processes
+ if pr["statename"] != "STOPPED"
+ }
+
+
+class SupervisordSubprocess(Subprocess):
+ def __init__(
+ self,
+ config: KresConfig,
+ controller: "SupervisordSubprocessController",
+ base_id: Union[SubprocessType, SupervisordKresID],
+ ):
+ if isinstance(base_id, SubprocessType):
+ super().__init__(config, SupervisordKresID.alloc(base_id))
+ else:
+ super().__init__(config, base_id)
+ self._controller: "SupervisordSubprocessController" = controller
+
+ @property
+ def name(self):
+ return str(self.id)
+
+ @async_in_a_thread
+ def _start(self) -> None:
+ # +1 for canary process (same as in config_file.py)
+ assert int(self.id) <= int(self._config.max_workers) + 1, "trying to spawn more than allowed limit of workers"
+ try:
+ supervisord = _create_fast_proxy(self._config)
+ supervisord.startProcess(self.name)
+ except Fault as e:
+ raise SubprocessControllerException(f"failed to start '{self.id}'") from e
+
+ @async_in_a_thread
+ def _stop(self) -> None:
+ supervisord = _create_supervisord_proxy(self._config)
+ supervisord.stopProcess(self.name)
+
+ @async_in_a_thread
+ def _restart(self) -> None:
+ supervisord = _create_supervisord_proxy(self._config)
+ supervisord.stopProcess(self.name)
+ fast = _create_fast_proxy(self._config)
+ fast.startProcess(self.name)
+
+ def get_used_config(self) -> KresConfig:
+ return self._config
+
+
+class SupervisordSubprocessController(SubprocessController):
+ def __init__(self): # pylint: disable=super-init-not-called
+ self._controller_config: Optional[KresConfig] = None
+
+ def __str__(self):
+ return "supervisord"
+
+ async def is_controller_available(self, config: KresConfig) -> bool:
+ res = await _is_supervisord_available()
+ if not res:
+ logger.info("Failed to find usable supervisord.")
+
+ logger.debug("Detection - supervisord controller is available for use")
+ return res
+
+ async def get_all_running_instances(self) -> Iterable[Subprocess]:
+ assert self._controller_config is not None
+
+ if await _is_supervisord_running(self._controller_config):
+ states = _list_running_subprocesses(self._controller_config)
+ return [
+ SupervisordSubprocess(self._controller_config, self, id_)
+ for id_ in states
+ if states[id_] == SubprocessStatus.RUNNING
+ ]
+ else:
+ return []
+
+ async def initialize_controller(self, config: KresConfig) -> None:
+ self._controller_config = config
+
+ if not await _is_supervisord_running(config):
+ logger.info(
+ "We want supervisord to restart us when needed, we will therefore exec() it and let it start us again."
+ )
+ await _exec_supervisord(config)
+ else:
+ logger.info("Supervisord is already running, we will just update its config...")
+ await _reload_supervisord(config)
+
+ async def shutdown_controller(self) -> None:
+ assert self._controller_config is not None
+ await _stop_supervisord(self._controller_config)
+
+ async def create_subprocess(self, subprocess_config: KresConfig, subprocess_type: SubprocessType) -> Subprocess:
+ return SupervisordSubprocess(subprocess_config, self, subprocess_type)
+
+ @async_in_a_thread
+ def get_subprocess_status(self) -> Dict[KresID, SubprocessStatus]:
+ assert self._controller_config is not None
+ return cast(Dict[KresID, SubprocessStatus], _list_running_subprocesses(self._controller_config))
diff --git a/manager/knot_resolver_manager/kresd_controller/supervisord/config_file.py b/manager/knot_resolver_manager/kresd_controller/supervisord/config_file.py
new file mode 100644
index 00000000..08450739
--- /dev/null
+++ b/manager/knot_resolver_manager/kresd_controller/supervisord/config_file.py
@@ -0,0 +1,182 @@
+import logging
+import os
+
+from jinja2 import Template
+from typing_extensions import Literal
+
+from knot_resolver_manager.compat.dataclasses import dataclass
+from knot_resolver_manager.constants import (
+ kres_gc_executable,
+ kresd_cache_dir,
+ kresd_config_file_supervisord_pattern,
+ kresd_executable,
+ supervisord_config_file,
+ supervisord_config_file_tmp,
+ supervisord_pid_file,
+ supervisord_sock_file,
+ supervisord_subprocess_log_dir,
+ user_constants,
+)
+from knot_resolver_manager.datamodel.config_schema import KresConfig
+from knot_resolver_manager.datamodel.logging_schema import LogTargetEnum
+from knot_resolver_manager.kresd_controller.interface import KresID, SubprocessType
+from knot_resolver_manager.utils.async_utils import read_resource, writefile
+
+logger = logging.getLogger(__name__)
+
+
+class SupervisordKresID(KresID):
+ # WARNING: be really careful with renaming. If the naming schema is changing,
+ # we should be able to parse the old one as well, otherwise updating manager will
+ # cause weird behavior
+
+ @staticmethod
+ def from_string(val: str) -> "SupervisordKresID":
+ if val == "cache-gc" or val == "cache-gc:cache-gc":
+ # the double name is checked because thats how we read it from supervisord
+ return SupervisordKresID.new(SubprocessType.GC, 0)
+ else:
+ val = val.replace("kresd:kresd", "")
+ return SupervisordKresID.new(SubprocessType.KRESD, int(val))
+
+ def __str__(self) -> str:
+ if self.subprocess_type is SubprocessType.GC:
+ return "cache-gc"
+ elif self.subprocess_type is SubprocessType.KRESD:
+ return f"kresd:kresd{self._id}"
+ else:
+ raise RuntimeError(f"Unexpected subprocess type {self.subprocess_type}")
+
+
+def kres_cache_gc_args(config: KresConfig) -> str:
+ args = ""
+
+ if config.logging.level == "debug" or (config.logging.groups and "cache-gc" in config.logging.groups):
+ args += " -v"
+
+ gc_config = config.cache.garbage_collector
+ if gc_config:
+ args += (
+ f" -d {gc_config.interval.millis()}"
+ f" -u {gc_config.threshold}"
+ f" -f {gc_config.release}"
+ f" -l {gc_config.rw_deletes}"
+ f" -L {gc_config.rw_reads}"
+ f" -t {gc_config.temp_keys_space.mbytes()}"
+ f" -m {gc_config.rw_duration.micros()}"
+ f" -w {gc_config.rw_delay.micros()}"
+ )
+ if gc_config.dry_run:
+ args += " -n"
+ return args
+ raise ValueError("missing configuration for the cache garbage collector")
+
+
+@dataclass
+class ProcessTypeConfig:
+ """
+ Data structure holding data for supervisord config template
+ """
+
+ logfile: str
+ workdir: str
+ command: str
+ environment: str
+ max_procs: int = 1
+
+ @staticmethod
+ def create_gc_config(config: KresConfig) -> "ProcessTypeConfig":
+ cwd = str(os.getcwd())
+ return ProcessTypeConfig( # type: ignore[call-arg]
+ logfile=supervisord_subprocess_log_dir(config) / "gc.log",
+ workdir=cwd,
+ command=f"{kres_gc_executable()} -c {kresd_cache_dir(config)}{kres_cache_gc_args(config)}",
+ environment="",
+ )
+
+ @staticmethod
+ def create_kresd_config(config: KresConfig) -> "ProcessTypeConfig":
+ cwd = str(os.getcwd())
+ return ProcessTypeConfig( # type: ignore[call-arg]
+ logfile=supervisord_subprocess_log_dir(config) / "kresd%(process_num)d.log",
+ workdir=cwd,
+ command=f"{kresd_executable()} -c {kresd_config_file_supervisord_pattern(config)} -n",
+ environment='SYSTEMD_INSTANCE="%(process_num)d",X-SUPERVISORD-TYPE=notify',
+ max_procs=int(config.max_workers) + 1, # +1 for the canary process
+ )
+
+ @staticmethod
+ def create_manager_config(_config: KresConfig) -> "ProcessTypeConfig":
+ # read original command from /proc
+ with open("/proc/self/cmdline", "rb") as f:
+ args = [s.decode("utf-8") for s in f.read()[:-1].split(b"\0")]
+
+ # insert debugger when asked
+ if os.environ.get("KRES_DEBUG_MANAGER"):
+ logger.warning("Injecting debugger into the supervisord config")
+ # the args array looks like this:
+ # [PYTHON_PATH, "-m", "knot_resolver_manager", ...]
+ args = args[:1] + ["-m", "debugpy", "--listen", "0.0.0.0:5678", "--wait-for-client"] + args[2:]
+
+ cmd = '"' + '" "'.join(args) + '"'
+
+ return ProcessTypeConfig( # type: ignore[call-arg]
+ workdir=user_constants().working_directory_on_startup,
+ command=cmd,
+ environment="X-SUPERVISORD-TYPE=notify",
+ logfile="", # this will be ignored
+ )
+
+
+@dataclass
+class SupervisordConfig:
+ unix_http_server: str
+ pid_file: str
+ workdir: str
+ logfile: str
+ loglevel: Literal["critical", "error", "warn", "info", "debug", "trace", "blather"]
+ target: LogTargetEnum
+
+ @staticmethod
+ def create(config: KresConfig) -> "SupervisordConfig":
+ # determine the correct logging level
+ if config.logging.groups and "supervisord" in config.logging.groups:
+ loglevel = "info"
+ else:
+ loglevel = {
+ "crit": "critical",
+ "err": "error",
+ "warning": "warn",
+ "notice": "warn",
+ "info": "info",
+ "debug": "debug",
+ }[config.logging.level]
+
+ cwd = str(os.getcwd())
+ return SupervisordConfig( # type: ignore[call-arg]
+ unix_http_server=supervisord_sock_file(config),
+ pid_file=supervisord_pid_file(config),
+ workdir=cwd,
+ logfile="syslog" if config.logging.target == "syslog" else "/dev/null",
+ loglevel=loglevel,
+ target=config.logging.target,
+ )
+
+
+async def write_config_file(config: KresConfig) -> None:
+ if not supervisord_subprocess_log_dir(config).exists():
+ supervisord_subprocess_log_dir(config).mkdir(exist_ok=True)
+
+ template = await read_resource(__package__, "supervisord.conf.j2")
+ assert template is not None
+ template = template.decode("utf8")
+ config_string = Template(template).render( # pyright: reportUnknownMemberType=false
+ gc=ProcessTypeConfig.create_gc_config(config),
+ kresd=ProcessTypeConfig.create_kresd_config(config),
+ manager=ProcessTypeConfig.create_manager_config(config),
+ config=SupervisordConfig.create(config),
+ )
+ print(config_string)
+ await writefile(supervisord_config_file_tmp(config), config_string)
+ # atomically replace (we don't technically need this right now, but better safe then sorry)
+ os.rename(supervisord_config_file_tmp(config), supervisord_config_file(config))
diff --git a/manager/knot_resolver_manager/kresd_controller/supervisord/plugin/fast_rpcinterface.py b/manager/knot_resolver_manager/kresd_controller/supervisord/plugin/fast_rpcinterface.py
new file mode 100644
index 00000000..c3834784
--- /dev/null
+++ b/manager/knot_resolver_manager/kresd_controller/supervisord/plugin/fast_rpcinterface.py
@@ -0,0 +1,173 @@
+# type: ignore
+# pylint: skip-file
+
+"""
+This file is modified version of supervisord's source code:
+https://github.com/Supervisor/supervisor/blob/5d9c39619e2e7e7fca33c890cb2a9f2d3d0ab762/supervisor/rpcinterface.py
+
+The changes made are:
+
+ - removed everything that we do not need, reformatted to fit our code stylepo (2022-06-24)
+ - made startProcess faster by setting delay to 0 (2022-06-24)
+
+
+The original supervisord licence follows:
+--------------------------------------------------------------------
+
+Supervisor is licensed under the following license:
+
+ A copyright notice accompanies this license document that identifies
+ the copyright holders.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ 1. Redistributions in source code must retain the accompanying
+ copyright notice, this list of conditions, and the following
+ disclaimer.
+
+ 2. Redistributions in binary form must reproduce the accompanying
+ copyright notice, this list of conditions, and the following
+ disclaimer in the documentation and/or other materials provided
+ with the distribution.
+
+ 3. Names of the copyright holders must not be used to endorse or
+ promote products derived from this software without prior
+ written permission from the copyright holders.
+
+ 4. If any files are modified, you must cause the modified files to
+ carry prominent notices stating that you changed the files and
+ the date of any change.
+
+ Disclaimer
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND
+ ANY EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
+ THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ SUCH DAMAGE.
+"""
+
+from supervisor.http import NOT_DONE_YET
+from supervisor.options import BadCommand, NoPermission, NotExecutable, NotFound, split_namespec
+from supervisor.states import RUNNING_STATES, ProcessStates, SupervisorStates
+from supervisor.xmlrpc import Faults, RPCError
+
+
+class SupervisorNamespaceRPCInterface:
+ def __init__(self, supervisord):
+ self.supervisord = supervisord
+
+ def _update(self, text):
+ self.update_text = text # for unit tests, mainly
+ if isinstance(self.supervisord.options.mood, int) and self.supervisord.options.mood < SupervisorStates.RUNNING:
+ raise RPCError(Faults.SHUTDOWN_STATE)
+
+ # RPC API methods
+
+ def _getGroupAndProcess(self, name):
+ # get process to start from name
+ group_name, process_name = split_namespec(name)
+
+ group = self.supervisord.process_groups.get(group_name)
+ if group is None:
+ raise RPCError(Faults.BAD_NAME, name)
+
+ if process_name is None:
+ return group, None
+
+ process = group.processes.get(process_name)
+ if process is None:
+ raise RPCError(Faults.BAD_NAME, name)
+
+ return group, process
+
+ def startProcess(self, name, wait=True):
+ """Start a process
+
+ @param string name Process name (or ``group:name``, or ``group:*``)
+ @param boolean wait Wait for process to be fully started
+ @return boolean result Always true unless error
+
+ """
+ self._update("startProcess")
+ group, process = self._getGroupAndProcess(name)
+ if process is None:
+ group_name, process_name = split_namespec(name)
+ return self.startProcessGroup(group_name, wait)
+
+ # test filespec, don't bother trying to spawn if we know it will
+ # eventually fail
+ try:
+ filename, argv = process.get_execv_args()
+ except NotFound as why:
+ raise RPCError(Faults.NO_FILE, why.args[0])
+ except (BadCommand, NotExecutable, NoPermission) as why:
+ raise RPCError(Faults.NOT_EXECUTABLE, why.args[0])
+
+ if process.get_state() in RUNNING_STATES:
+ raise RPCError(Faults.ALREADY_STARTED, name)
+
+ if process.get_state() == ProcessStates.UNKNOWN:
+ raise RPCError(Faults.FAILED, "%s is in an unknown process state" % name)
+
+ process.spawn()
+
+ # We call reap() in order to more quickly obtain the side effects of
+ # process.finish(), which reap() eventually ends up calling. This
+ # might be the case if the spawn() was successful but then the process
+ # died before its startsecs elapsed or it exited with an unexpected
+ # exit code. In particular, finish() may set spawnerr, which we can
+ # check and immediately raise an RPCError, avoiding the need to
+ # defer by returning a callback.
+
+ self.supervisord.reap()
+
+ if process.spawnerr:
+ raise RPCError(Faults.SPAWN_ERROR, name)
+
+ # We call process.transition() in order to more quickly obtain its
+ # side effects. In particular, it might set the process' state from
+ # STARTING->RUNNING if the process has a startsecs==0.
+ process.transition()
+
+ if wait and process.get_state() != ProcessStates.RUNNING:
+ # by default, this branch will almost always be hit for processes
+ # with default startsecs configurations, because the default number
+ # of startsecs for a process is "1", and the process will not have
+ # entered the RUNNING state yet even though we've called
+ # transition() on it. This is because a process is not considered
+ # RUNNING until it has stayed up > startsecs.
+
+ def onwait():
+ if process.spawnerr:
+ raise RPCError(Faults.SPAWN_ERROR, name)
+
+ state = process.get_state()
+
+ if state not in (ProcessStates.STARTING, ProcessStates.RUNNING):
+ raise RPCError(Faults.ABNORMAL_TERMINATION, name)
+
+ if state == ProcessStates.RUNNING:
+ return True
+
+ return NOT_DONE_YET
+
+ onwait.delay = 0
+ onwait.rpcinterface = self
+ return onwait # deferred
+
+ return True
+
+
+# this is not used in code but referenced via an entry point in the conf file
+def make_main_rpcinterface(supervisord):
+ return SupervisorNamespaceRPCInterface(supervisord)
diff --git a/manager/knot_resolver_manager/kresd_controller/supervisord/plugin/manager_integration.py b/manager/knot_resolver_manager/kresd_controller/supervisord/plugin/manager_integration.py
new file mode 100644
index 00000000..81115617
--- /dev/null
+++ b/manager/knot_resolver_manager/kresd_controller/supervisord/plugin/manager_integration.py
@@ -0,0 +1,85 @@
+# type: ignore
+# pylint: disable=protected-access
+import atexit
+import os
+import signal
+from typing import Any, Optional
+
+from supervisor.compat import as_string
+from supervisor.events import ProcessStateFatalEvent, ProcessStateRunningEvent, ProcessStateStartingEvent, subscribe
+from supervisor.options import ServerOptions
+from supervisor.process import Subprocess
+from supervisor.states import SupervisorStates
+from supervisor.supervisord import Supervisor
+
+from knot_resolver_manager.utils.systemd_notify import systemd_notify
+
+superd: Optional[Supervisor] = None
+
+
+def check_for_fatal_manager(event: ProcessStateFatalEvent) -> None:
+ assert superd is not None
+
+ proc: Subprocess = event.process
+ processname = as_string(proc.config.name)
+ if processname == "manager":
+ # stop the whole supervisord gracefully
+ superd.options.logger.critical("manager process entered FATAL state! Shutting down")
+ superd.options.mood = SupervisorStates.SHUTDOWN
+
+ # force the interpreter to exit with exit code 1
+ atexit.register(lambda: os._exit(1))
+
+
+def check_for_starting_manager(event: ProcessStateStartingEvent) -> None:
+ assert superd is not None
+
+ proc: Subprocess = event.process
+ processname = as_string(proc.config.name)
+ if processname == "manager":
+ # manager has sucessfully started, report it upstream
+ systemd_notify(STATUS="Starting services...")
+
+
+def check_for_runnning_manager(event: ProcessStateRunningEvent) -> None:
+ assert superd is not None
+
+ proc: Subprocess = event.process
+ processname = as_string(proc.config.name)
+ if processname == "manager":
+ # manager has sucessfully started, report it upstream
+ systemd_notify(READY="1", STATUS="Ready")
+
+
+def ServerOptions_get_signal(self):
+ sig = self.signal_receiver.get_signal()
+ if sig == signal.SIGHUP and superd is not None:
+ superd.options.logger.info("received SIGHUP, forwarding to the process 'manager'")
+ manager_pid = superd.process_groups["manager"].processes["manager"].pid
+ os.kill(manager_pid, signal.SIGHUP)
+ return None
+
+ return sig
+
+
+def inject(supervisord: Supervisor, **_config: Any) -> Any: # pylint: disable=useless-return
+ global superd
+ superd = supervisord
+
+ # This status notification here unsets the env variable $NOTIFY_SOCKET provided by systemd
+ # and stores it locally. Therefore, it shouldn't clash with $NOTIFY_SOCKET we are providing
+ # downstream
+ systemd_notify(STATUS="Initializing supervisord...")
+
+ # register events
+ subscribe(ProcessStateFatalEvent, check_for_fatal_manager)
+ subscribe(ProcessStateStartingEvent, check_for_starting_manager)
+ subscribe(ProcessStateRunningEvent, check_for_runnning_manager)
+
+ # forward SIGHUP to manager
+ ServerOptions.get_signal = ServerOptions_get_signal
+
+ # this method is called by supervisord when loading the plugin,
+ # it should return XML-RPC object, which we don't care about
+ # That's why why are returning just None
+ return None
diff --git a/manager/knot_resolver_manager/kresd_controller/supervisord/plugin/notifymodule.c b/manager/knot_resolver_manager/kresd_controller/supervisord/plugin/notifymodule.c
new file mode 100644
index 00000000..d56ee7d2
--- /dev/null
+++ b/manager/knot_resolver_manager/kresd_controller/supervisord/plugin/notifymodule.c
@@ -0,0 +1,176 @@
+#define PY_SSIZE_T_CLEAN
+#include <Python.h>
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <sys/socket.h>
+#include <fcntl.h>
+#include <stddef.h>
+#include <sys/socket.h>
+#include <sys/un.h>
+
+#define CONTROL_SOCKET_NAME "knot-resolver-control-socket"
+#define NOTIFY_SOCKET_NAME "NOTIFY_SOCKET"
+#define MODULE_NAME "notify"
+#define RECEIVE_BUFFER_SIZE 2048
+
+static PyObject *NotifySocketError;
+
+static PyObject *init_control_socket(PyObject *self, PyObject *args)
+{
+ /* create socket */
+ int controlfd = socket(AF_UNIX, SOCK_DGRAM | SOCK_NONBLOCK, 0);
+ if (controlfd == -1) {
+ PyErr_SetFromErrno(NotifySocketError);
+ return NULL;
+ }
+
+ /* create address */
+ struct sockaddr_un server_addr;
+ bzero(&server_addr, sizeof(server_addr));
+ server_addr.sun_family = AF_UNIX;
+ server_addr.sun_path[0] = '\0'; // mark it as abstract namespace socket
+ strcpy(server_addr.sun_path + 1, CONTROL_SOCKET_NAME);
+ size_t addr_len = offsetof(struct sockaddr_un, sun_path) +
+ strlen(CONTROL_SOCKET_NAME) + 1;
+
+ /* bind to the address */
+ int res = bind(controlfd, (struct sockaddr *)&server_addr, addr_len);
+ if (res < 0) {
+ PyErr_SetFromErrno(NotifySocketError);
+ return NULL;
+ }
+
+ /* make sure that we are send credentials */
+ int data = (int)true;
+ res = setsockopt(controlfd, SOL_SOCKET, SO_PASSCRED, &data,
+ sizeof(data));
+ if (res < 0) {
+ PyErr_SetFromErrno(NotifySocketError);
+ return NULL;
+ }
+
+ /* store the name of the socket in env to fake systemd */
+ char *old_value = getenv(NOTIFY_SOCKET_NAME);
+ if (old_value != NULL) {
+ printf("[notify_socket] warning, running under systemd and overwriting $%s\n",
+ NOTIFY_SOCKET_NAME);
+ // fixme
+ }
+
+ res = setenv(NOTIFY_SOCKET_NAME, "@" CONTROL_SOCKET_NAME, 1);
+ if (res < 0) {
+ PyErr_SetFromErrno(NotifySocketError);
+ return NULL;
+ }
+
+ return PyLong_FromLong((long)controlfd);
+}
+
+static PyObject *handle_control_socket_connection_event(PyObject *self,
+ PyObject *args)
+{
+ long controlfd;
+ if (!PyArg_ParseTuple(args, "i", &controlfd))
+ return NULL;
+
+ /* read command assuming it fits and it was sent all at once */
+ // prepare space to read filedescriptors
+ struct msghdr msg;
+ msg.msg_name = NULL;
+ msg.msg_namelen = 0;
+
+ // prepare a place to read the actual message
+ char place_for_data[RECEIVE_BUFFER_SIZE];
+ bzero(&place_for_data, sizeof(place_for_data));
+ struct iovec iov = { .iov_base = &place_for_data,
+ .iov_len = sizeof(place_for_data) };
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+
+ char cmsg[CMSG_SPACE(sizeof(struct ucred))];
+ msg.msg_control = cmsg;
+ msg.msg_controllen = sizeof(cmsg);
+
+ /* Receive real plus ancillary data */
+ int len = recvmsg(controlfd, &msg, 0);
+ if (len == -1) {
+ if (errno == EWOULDBLOCK || errno == EAGAIN) {
+ Py_RETURN_NONE;
+ } else {
+ PyErr_SetFromErrno(NotifySocketError);
+ return NULL;
+ }
+ }
+
+ /* read the sender pid */
+ struct cmsghdr *cmsgp = CMSG_FIRSTHDR(&msg);
+ pid_t pid = -1;
+ while (cmsgp != NULL) {
+ if (cmsgp->cmsg_type == SCM_CREDENTIALS) {
+ if (
+ cmsgp->cmsg_len != CMSG_LEN(sizeof(struct ucred)) ||
+ cmsgp->cmsg_level != SOL_SOCKET
+ ) {
+ printf("[notify_socket] invalid cmsg data, ignoring\n");
+ Py_RETURN_NONE;
+ }
+
+ struct ucred cred;
+ memcpy(&cred, CMSG_DATA(cmsgp), sizeof(cred));
+ pid = cred.pid;
+ }
+ cmsgp = CMSG_NXTHDR(&msg, cmsgp);
+ }
+ if (pid == -1) {
+ printf("[notify_socket] ignoring received data without credentials: %s\n",
+ place_for_data);
+ Py_RETURN_NONE;
+ }
+
+ /* return received data as a tuple (pid, data bytes) */
+ return Py_BuildValue("iy", pid, place_for_data);
+}
+
+static PyMethodDef NotifyMethods[] = {
+ { "init_socket", init_control_socket, METH_VARARGS,
+ "Init notify socket. Returns it's file descriptor." },
+ { "read_message", handle_control_socket_connection_event, METH_VARARGS,
+ "Reads datagram from notify socket. Returns tuple of PID and received bytes." },
+ { NULL, NULL, 0, NULL } /* Sentinel */
+};
+
+static struct PyModuleDef notifymodule = {
+ PyModuleDef_HEAD_INIT, MODULE_NAME, /* name of module */
+ NULL, /* module documentation, may be NULL */
+ -1, /* size of per-interpreter state of the module,
+ or -1 if the module keeps state in global variables. */
+ NotifyMethods
+};
+
+PyMODINIT_FUNC PyInit_notify(void)
+{
+ PyObject *m;
+
+ m = PyModule_Create(&notifymodule);
+ if (m == NULL)
+ return NULL;
+
+ NotifySocketError =
+ PyErr_NewException(MODULE_NAME ".error", NULL, NULL);
+ Py_XINCREF(NotifySocketError);
+ if (PyModule_AddObject(m, "error", NotifySocketError) < 0) {
+ Py_XDECREF(NotifySocketError);
+ Py_CLEAR(NotifySocketError);
+ Py_DECREF(m);
+ return NULL;
+ }
+
+ return m;
+}
diff --git a/manager/knot_resolver_manager/kresd_controller/supervisord/plugin/patch_logger.py b/manager/knot_resolver_manager/kresd_controller/supervisord/plugin/patch_logger.py
new file mode 100644
index 00000000..411f232e
--- /dev/null
+++ b/manager/knot_resolver_manager/kresd_controller/supervisord/plugin/patch_logger.py
@@ -0,0 +1,97 @@
+# type: ignore
+# pylint: disable=protected-access
+
+import os
+import sys
+import traceback
+from typing import Any
+
+from supervisor.dispatchers import POutputDispatcher
+from supervisor.loggers import LevelsByName, StreamHandler, SyslogHandler
+from supervisor.supervisord import Supervisor
+from typing_extensions import Literal
+
+FORWARD_LOG_LEVEL = LevelsByName.CRIT # to make sure it's always printed
+
+
+def empty_function(*args, **kwargs):
+ pass
+
+
+FORWARD_MSG_FORMAT: str = "%(name)s[%(pid)d]%(stream)s: %(data)s"
+
+
+def POutputDispatcher_log(self: POutputDispatcher, data: bytearray):
+ if data:
+ # parse the input
+ if not isinstance(data, bytes):
+ text = data
+ else:
+ try:
+ text = data.decode("utf-8")
+ except UnicodeDecodeError:
+ text = "Undecodable: %r" % data
+
+ # print line by line prepending correct prefix to match the style
+ config = self.process.config
+ config.options.logger.handlers = forward_handlers
+ for line in text.splitlines():
+ stream = ""
+ if self.channel == "stderr":
+ stream = " (stderr)"
+ config.options.logger.log(
+ FORWARD_LOG_LEVEL, FORWARD_MSG_FORMAT, name=config.name, stream=stream, data=line, pid=self.process.pid
+ )
+ config.options.logger.handlers = supervisord_handlers
+
+
+def _create_handler(fmt, level, target: Literal["stdout", "stderr", "syslog"]) -> StreamHandler:
+ if target == "syslog":
+ handler = SyslogHandler()
+ else:
+ handler = StreamHandler(sys.stdout if target == "stdout" else sys.stderr)
+ handler.setFormat(fmt)
+ handler.setLevel(level)
+ return handler
+
+
+supervisord_handlers = []
+forward_handlers = []
+
+
+def inject(supervisord: Supervisor, **config: Any) -> Any: # pylint: disable=useless-return
+ try:
+ # reconfigure log handlers
+ supervisord.options.logger.info("reconfiguring log handlers")
+ supervisord_handlers.append(
+ _create_handler(
+ f"%(asctime)s supervisor[{os.getpid()}]: [%(levelname)s] %(message)s\n",
+ supervisord.options.loglevel,
+ config["target"],
+ )
+ )
+ forward_handlers.append(
+ _create_handler("%(asctime)s %(message)s\n", supervisord.options.loglevel, config["target"])
+ )
+ supervisord.options.logger.handlers = supervisord_handlers
+
+ # replace output handler for subprocesses
+ POutputDispatcher._log = POutputDispatcher_log
+
+ # we forward stdio in all cases, even when logging to syslog. This should prevent the unforturtunate
+ # case of swallowing an error message leaving the users confused. To make the forwarded lines obvious
+ # we just prepend a explanatory string at the beginning of all messages
+ if config["target"] == "syslog":
+ global FORWARD_MSG_FORMAT
+ FORWARD_MSG_FORMAT = "captured stdio output from " + FORWARD_MSG_FORMAT
+
+ # this method is called by supervisord when loading the plugin,
+ # it should return XML-RPC object, which we don't care about
+ # That's why why are returning just None
+ return None
+
+ # if we fail to load the module, print some explanation
+ # should not happen when run by endusers
+ except BaseException:
+ traceback.print_exc()
+ raise
diff --git a/manager/knot_resolver_manager/kresd_controller/supervisord/plugin/sd_notify.py b/manager/knot_resolver_manager/kresd_controller/supervisord/plugin/sd_notify.py
new file mode 100644
index 00000000..53711c86
--- /dev/null
+++ b/manager/knot_resolver_manager/kresd_controller/supervisord/plugin/sd_notify.py
@@ -0,0 +1,214 @@
+# type: ignore
+# pylint: disable=protected-access
+# pylint: disable=c-extension-no-member
+import os
+import signal
+import time
+from functools import partial
+from typing import Any, Callable, Dict, List, Optional, Tuple, TypeVar
+
+from supervisor.events import ProcessStateEvent, ProcessStateStartingEvent, subscribe
+from supervisor.medusa.asyncore_25 import compact_traceback
+from supervisor.process import Subprocess
+from supervisor.states import ProcessStates
+from supervisor.supervisord import Supervisor
+
+from knot_resolver_manager.kresd_controller.supervisord.plugin import notify
+
+starting_processes: List[Subprocess] = []
+
+
+def is_type_notify(proc: Subprocess) -> bool:
+ return proc.config.environment is not None and proc.config.environment.get("X-SUPERVISORD-TYPE", None) == "notify"
+
+
+class NotifySocketDispatcher:
+ """
+ See supervisor.dispatcher
+ """
+
+ def __init__(self, supervisor: Supervisor, fd: int):
+ self._supervisor = supervisor
+ self.fd = fd
+ self.closed = False # True if close() has been called
+
+ def __repr__(self):
+ return f"<{self.__class__.__name__} with fd={self.fd}>"
+
+ def readable(self):
+ return True
+
+ def writable(self):
+ return False
+
+ def handle_read_event(self):
+ logger: Any = self._supervisor.options.logger
+
+ res: Optional[Tuple[int, bytes]] = notify.read_message(self.fd)
+ if res is None:
+ return None # there was some junk
+ pid, data = res
+
+ if data.startswith(b"READY=1"):
+ # handle case, when some process is really ready
+
+ # pylint: disable=undefined-loop-variable
+ for proc in starting_processes:
+ if proc.pid == pid:
+ break
+ else:
+ logger.warn(f"ignoring ready notification from unregistered PID={pid}")
+ return None
+
+ if is_type_notify(proc):
+ proc._assertInState(ProcessStates.STARTING)
+ proc.change_state(ProcessStates.RUNNING)
+ logger.info(
+ f"success: {proc.config.name} entered RUNNING state, process sent ready notification via $NOTIFY_SOCKET"
+ )
+ else:
+ logger.warn(f"ignoring ready notification from {proc.config.name}, which is not configured to send it")
+
+ else:
+ # handle case, when we got something unexpected
+ logger.warn(f"ignoring unrecognized data on $NOTIFY_SOCKET sent from PID={pid}, data='{data!r}'")
+ return None
+
+ def handle_write_event(self):
+ raise ValueError("this dispatcher is not writable")
+
+ def handle_error(self):
+ _nil, t, v, tbinfo = compact_traceback()
+
+ self._supervisor.options.logger.error(
+ f"uncaptured python exception, closing notify socket {repr(self)} ({t}:{v} {tbinfo})"
+ )
+ self.close()
+
+ def close(self):
+ if not self.closed:
+ os.close(self.fd)
+ self.closed = True
+
+ def flush(self):
+ pass
+
+
+def keep_track_of_starting_processes(event: ProcessStateEvent) -> None:
+ global starting_processes
+
+ proc: Subprocess = event.process
+
+ if isinstance(event, ProcessStateStartingEvent):
+ # process is starting
+ # if proc not in starting_processes:
+ starting_processes.append(proc)
+ else:
+ # not starting
+ starting_processes = [p for p in starting_processes if p.pid is not proc.pid]
+
+
+notify_dispatcher: Optional[NotifySocketDispatcher] = None
+
+
+def process_transition(slf: Subprocess) -> None:
+ if not is_type_notify(slf):
+ return slf
+
+ # modified version of upstream process transition code
+ if slf.state == ProcessStates.STARTING:
+ if time.time() - slf.laststart > slf.config.startsecs:
+ # STARTING -> STOPPING if the process has not sent ready notification
+ # within proc.config.startsecs
+ slf.config.options.logger.warn(
+ f"process '{slf.config.name}' did not send ready notification within {slf.config.startsecs} secs, killing"
+ )
+ slf.kill(signal.SIGKILL)
+ slf.x_notifykilled = True # used in finish() function to set to FATAL state
+ slf.laststart = time.time() + 1 # prevent immediate state transition to RUNNING from happening
+
+ # return self for chaining
+ return slf
+
+
+def subprocess_finish_tail(slf, pid, sts) -> Tuple[Any, Any, Any]:
+ if getattr(slf, "x_notifykilled", False):
+ # we want FATAL, not STOPPED state after timeout waiting for startup notification
+ # why? because it's likely not gonna help to try starting the process up again if
+ # it failed so early
+ slf.change_state(ProcessStates.FATAL)
+
+ # clear the marker value
+ del slf.x_notifykilled
+
+ # return for chaining
+ return slf, pid, sts
+
+
+def supervisord_get_process_map(supervisord: Any, mp: Dict[Any, Any]) -> Dict[Any, Any]:
+ global notify_dispatcher
+ if notify_dispatcher is None:
+ notify_dispatcher = NotifySocketDispatcher(supervisord, notify.init_socket())
+ supervisord.options.logger.info("notify: injected $NOTIFY_SOCKET into event loop")
+
+ # add our dispatcher to the result
+ assert notify_dispatcher.fd not in mp
+ mp[notify_dispatcher.fd] = notify_dispatcher
+
+ return mp
+
+
+def process_spawn_as_child_add_env(slf: Subprocess, *args: Any) -> Tuple[Any, ...]:
+ if is_type_notify(slf):
+ slf.config.environment["NOTIFY_SOCKET"] = "@knot-resolver-control-socket"
+ return (slf, *args)
+
+
+T = TypeVar("T")
+U = TypeVar("U")
+
+
+def chain(first: Callable[..., U], second: Callable[[U], T]) -> Callable[..., T]:
+ def wrapper(*args: Any, **kwargs: Any) -> T:
+ res = first(*args, **kwargs)
+ if isinstance(res, tuple):
+ return second(*res)
+ else:
+ return second(res)
+
+ return wrapper
+
+
+def append(first: Callable[..., T], second: Callable[..., None]) -> Callable[..., T]:
+ def wrapper(*args: Any, **kwargs: Any) -> T:
+ res = first(*args, **kwargs)
+ second(*args, **kwargs)
+ return res
+
+ return wrapper
+
+
+def monkeypatch(supervisord: Supervisor) -> None:
+ """Inject ourselves into supervisord code"""
+
+ # append notify socket handler to event loopo
+ supervisord.get_process_map = chain(supervisord.get_process_map, partial(supervisord_get_process_map, supervisord))
+
+ # prepend timeout handler to transition method
+ Subprocess.transition = chain(process_transition, Subprocess.transition)
+ Subprocess.finish = append(Subprocess.finish, subprocess_finish_tail)
+
+ # add environment variable $NOTIFY_SOCKET to starting processes
+ Subprocess._spawn_as_child = chain(process_spawn_as_child_add_env, Subprocess._spawn_as_child)
+
+ # keep references to starting subprocesses
+ subscribe(ProcessStateEvent, keep_track_of_starting_processes)
+
+
+def inject(supervisord: Supervisor, **_config: Any) -> Any: # pylint: disable=useless-return
+ monkeypatch(supervisord)
+
+ # this method is called by supervisord when loading the plugin,
+ # it should return XML-RPC object, which we don't care about
+ # That's why why are returning just None
+ return None
diff --git a/manager/knot_resolver_manager/kresd_controller/supervisord/supervisord.conf.j2 b/manager/knot_resolver_manager/kresd_controller/supervisord/supervisord.conf.j2
new file mode 100644
index 00000000..29116f09
--- /dev/null
+++ b/manager/knot_resolver_manager/kresd_controller/supervisord/supervisord.conf.j2
@@ -0,0 +1,80 @@
+[supervisord]
+pidfile = {{ config.pid_file }}
+directory = {{ config.workdir }}
+nodaemon = true
+
+{# disable initial logging until patch_logger.py takes over #}
+logfile = /dev/null
+logfile_maxbytes = 0
+silent = true
+
+{# config for patch_logger.py #}
+loglevel = {{ config.loglevel }}
+{# there are more options in the plugin section #}
+
+
+[unix_http_server]
+file = {{ config.unix_http_server }}
+
+[supervisorctl]
+serverurl = unix://{{ config.unix_http_server }}
+
+{# Extensions to changing the supervisord behavior #}
+[rpcinterface:patch_logger]
+supervisor.rpcinterface_factory = knot_resolver_manager.kresd_controller.supervisord.plugin.patch_logger:inject
+target = {{ config.target }}
+
+[rpcinterface:manager_integration]
+supervisor.rpcinterface_factory = knot_resolver_manager.kresd_controller.supervisord.plugin.manager_integration:inject
+
+[rpcinterface:sd_notify]
+supervisor.rpcinterface_factory = knot_resolver_manager.kresd_controller.supervisord.plugin.sd_notify:inject
+
+{# Extensions for actual API control #}
+[rpcinterface:supervisor]
+supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
+
+[rpcinterface:fast]
+supervisor.rpcinterface_factory = knot_resolver_manager.kresd_controller.supervisord.plugin.fast_rpcinterface:make_main_rpcinterface
+
+
+
+[program:manager]
+redirect_stderr=false
+directory={{ manager.workdir }}
+command={{ manager.command }}
+stopsignal=SIGINT
+killasgroup=true
+autorestart=true
+autostart=true
+startsecs=5
+environment={{ manager.environment }},KRES_SUPRESS_LOG_PREFIX=true
+stdout_logfile=NONE
+stderr_logfile=NONE
+
+[program:kresd]
+process_name=%(program_name)s%(process_num)d
+numprocs={{ kresd.max_procs }}
+directory={{ kresd.workdir }}
+command={{ kresd.command }}
+autostart=false
+autorestart=true
+stopsignal=TERM
+killasgroup=true
+startsecs=10
+environment={{ kresd.environment }}
+stdout_logfile=NONE
+stderr_logfile=NONE
+
+[program:cache-gc]
+redirect_stderr=false
+directory={{ gc.workdir }}
+command={{ gc.command }}
+autostart=false
+autorestart=true
+stopsignal=TERM
+killasgroup=true
+startsecs=0
+environment={{ gc.environment }}
+stdout_logfile=NONE
+stderr_logfile=NONE \ No newline at end of file
diff --git a/manager/knot_resolver_manager/log.py b/manager/knot_resolver_manager/log.py
new file mode 100644
index 00000000..08da675b
--- /dev/null
+++ b/manager/knot_resolver_manager/log.py
@@ -0,0 +1,105 @@
+import logging
+import logging.handlers
+import os
+import sys
+from typing import Optional
+
+from knot_resolver_manager.config_store import ConfigStore, only_on_real_changes
+from knot_resolver_manager.constants import STARTUP_LOG_LEVEL
+from knot_resolver_manager.datamodel.config_schema import KresConfig
+from knot_resolver_manager.datamodel.logging_schema import LogTargetEnum
+
+logger = logging.getLogger(__name__)
+
+
+def get_log_format(config: KresConfig) -> str:
+ """
+ Based on an environment variable $KRES_SUPRESS_LOG_PREFIX, returns the appropriate format string for logger.
+ """
+
+ if os.environ.get("KRES_SUPRESS_LOG_PREFIX") == "true":
+ # In this case, we are running under supervisord and it's adding prefixes to our output
+ return "[%(levelname)s] %(name)s: %(message)s"
+ else:
+ # In this case, we are running standalone during inicialization and we need to add a prefix to each line
+ # by ourselves to make it consistent
+ assert config.logging.target != "syslog"
+ stream = ""
+ if config.logging.target == "stderr":
+ stream = " (stderr)"
+
+ pid = os.getpid()
+ return f"%(asctime)s manager[{pid}]{stream}: [%(levelname)s] %(name)s: %(message)s"
+
+
+async def _set_log_level(config: KresConfig) -> None:
+ levels_map = {
+ "crit": "CRITICAL",
+ "err": "ERROR",
+ "warning": "WARNING",
+ "notice": "WARNING",
+ "info": "INFO",
+ "debug": "DEBUG",
+ }
+
+ # when logging group is set to make us log with DEBUG
+ if config.logging.groups and "manager" in config.logging.groups:
+ target = "DEBUG"
+ # otherwise, follow the standard log level
+ else:
+ target = levels_map[config.logging.level]
+
+ # expect exactly one existing log handler on the root
+ logger.warning(f"Changing logging level to '{target}'")
+ logging.getLogger().setLevel(target)
+
+
+async def _set_logging_handler(config: KresConfig) -> None:
+ target: Optional[LogTargetEnum] = config.logging.target
+
+ if target is None:
+ target = "stdout"
+
+ handler: logging.Handler
+ if target == "syslog":
+ handler = logging.handlers.SysLogHandler(address="/dev/log")
+ handler.setFormatter(logging.Formatter("%(name)s: %(message)s"))
+ elif target == "stdout":
+ handler = logging.StreamHandler(sys.stdout)
+ handler.setFormatter(logging.Formatter(get_log_format(config)))
+ elif target == "stderr":
+ handler = logging.StreamHandler(sys.stderr)
+ handler.setFormatter(logging.Formatter(get_log_format(config)))
+ else:
+ raise RuntimeError(f"Unexpected value '{target}' for log target in the config")
+
+ root = logging.getLogger()
+
+ # if we had a MemoryHandler before, we should give it the new handler where we can flush it
+ if isinstance(root.handlers[0], logging.handlers.MemoryHandler):
+ root.handlers[0].setTarget(handler)
+
+ # stop the old handler
+ root.handlers[0].flush()
+ root.handlers[0].close()
+ root.removeHandler(root.handlers[0])
+
+ # configure the new handler
+ root.addHandler(handler)
+
+
+@only_on_real_changes(lambda config: config.logging)
+async def _configure_logger(config: KresConfig) -> None:
+ await _set_logging_handler(config)
+ await _set_log_level(config)
+
+
+async def logger_init(config_store: ConfigStore) -> None:
+ await config_store.register_on_change_callback(_configure_logger)
+
+
+def logger_startup() -> None:
+ logging.getLogger().setLevel(STARTUP_LOG_LEVEL)
+ err_handler = logging.StreamHandler(sys.stderr)
+ err_handler.setFormatter(logging.Formatter(logging.BASIC_FORMAT))
+ logging.getLogger().addHandler(logging.handlers.MemoryHandler(10_000, logging.ERROR, err_handler))
diff --git a/manager/knot_resolver_manager/main.py b/manager/knot_resolver_manager/main.py
new file mode 100644
index 00000000..f10e1799
--- /dev/null
+++ b/manager/knot_resolver_manager/main.py
@@ -0,0 +1,42 @@
+"""
+Effectively the same as normal __main__.py. However, we moved it's content over to this
+file to allow us to exclude the __main__.py file from black's autoformatting
+"""
+
+import argparse
+import sys
+from pathlib import Path
+from typing import NoReturn
+
+from knot_resolver_manager import compat
+from knot_resolver_manager.constants import DEFAULT_MANAGER_CONFIG_FILE
+from knot_resolver_manager.log import logger_startup
+from knot_resolver_manager.server import start_server
+
+
+def parse_args() -> argparse.Namespace:
+ parser = argparse.ArgumentParser(description="Knot Resolver - caching DNS resolver")
+ parser.add_argument(
+ "-c",
+ "--config",
+ help="Config file to load. Overrides default config location at '" + str(DEFAULT_MANAGER_CONFIG_FILE) + "'",
+ type=str,
+ nargs=1,
+ required=False,
+ default=None,
+ )
+ return parser.parse_args()
+
+
+def main() -> NoReturn:
+ # initial logging is to memory until we read the config
+ logger_startup()
+
+ # parse arguments
+ args = parse_args()
+
+ # where to look for config
+ config_path = DEFAULT_MANAGER_CONFIG_FILE if args.config is None else Path(args.config[0])
+
+ exit_code = compat.asyncio.run(start_server(config=config_path))
+ sys.exit(exit_code)
diff --git a/manager/knot_resolver_manager/server.py b/manager/knot_resolver_manager/server.py
new file mode 100644
index 00000000..58e22e97
--- /dev/null
+++ b/manager/knot_resolver_manager/server.py
@@ -0,0 +1,596 @@
+import asyncio
+import errno
+import json
+import logging
+import os
+import signal
+import sys
+from functools import partial
+from http import HTTPStatus
+from pathlib import Path
+from time import time
+from typing import Any, Dict, List, Optional, Set, Union, cast
+
+from aiohttp import web
+from aiohttp.web import middleware
+from aiohttp.web_app import Application
+from aiohttp.web_response import json_response
+from aiohttp.web_runner import AppRunner, TCPSite, UnixSite
+from typing_extensions import Literal
+
+import knot_resolver_manager.utils.custom_atexit as atexit
+from knot_resolver_manager import log, statistics
+from knot_resolver_manager.compat import asyncio as asyncio_compat
+from knot_resolver_manager.config_store import ConfigStore
+from knot_resolver_manager.constants import DEFAULT_MANAGER_CONFIG_FILE, PID_FILE_NAME, init_user_constants
+from knot_resolver_manager.datamodel.config_schema import KresConfig, get_rundir_without_validation
+from knot_resolver_manager.datamodel.globals import (
+ Context,
+ reset_global_validation_context,
+ set_global_validation_context,
+)
+from knot_resolver_manager.datamodel.management_schema import ManagementSchema
+from knot_resolver_manager.exceptions import CancelStartupExecInsteadException, KresManagerException
+from knot_resolver_manager.kresd_controller import get_best_controller_implementation
+from knot_resolver_manager.utils import ignore_exceptions_optional
+from knot_resolver_manager.utils.async_utils import readfile
+from knot_resolver_manager.utils.etag import structural_etag
+from knot_resolver_manager.utils.functional import Result
+from knot_resolver_manager.utils.modeling.exceptions import DataParsingError, DataValidationError
+from knot_resolver_manager.utils.modeling.parsing import DataFormat, parse_yaml
+from knot_resolver_manager.utils.modeling.query import query
+from knot_resolver_manager.utils.modeling.types import NoneType
+from knot_resolver_manager.utils.systemd_notify import systemd_notify
+
+from .kres_manager import KresManager
+
+logger = logging.getLogger(__name__)
+
+
+@middleware
+async def error_handler(request: web.Request, handler: Any) -> web.Response:
+ """
+ Generic error handler for route handlers.
+
+ If an exception is thrown during request processing, this middleware catches it
+ and responds accordingly.
+ """
+
+ try:
+ return await handler(request)
+ except DataValidationError as e:
+ return web.Response(text=f"validation of configuration failed:\n{e}", status=HTTPStatus.BAD_REQUEST)
+ except DataParsingError as e:
+ return web.Response(text=f"request processing error:\n{e}", status=HTTPStatus.BAD_REQUEST)
+ except KresManagerException as e:
+ return web.Response(text=f"request processing failed:\n{e}", status=HTTPStatus.INTERNAL_SERVER_ERROR)
+
+
+def from_mime_type(mime_type: str) -> DataFormat:
+ formats = {
+ "application/json": DataFormat.JSON,
+ "application/octet-stream": DataFormat.JSON, # default in aiohttp
+ }
+ if mime_type not in formats:
+ raise DataParsingError(f"unsupported MIME type '{mime_type}', expected: {str(formats)[1:-1]}")
+ return formats[mime_type]
+
+
+def parse_from_mime_type(data: str, mime_type: str) -> Any:
+ return from_mime_type(mime_type).parse_to_dict(data)
+
+
+class Server:
+ # pylint: disable=too-many-instance-attributes
+ # This is top-level class containing pretty much everything. Instead of global
+ # variables, we use instance attributes. That's why there are so many and it's
+ # ok.
+ def __init__(self, store: ConfigStore, config_path: Optional[Path]):
+ # config store & server dynamic reconfiguration
+ self.config_store = store
+
+ # HTTP server
+ self.app = Application(middlewares=[error_handler])
+ self.runner = AppRunner(self.app)
+ self.listen: Optional[ManagementSchema] = None
+ self.site: Union[NoneType, TCPSite, UnixSite] = None
+ self.listen_lock = asyncio.Lock()
+ self._config_path: Optional[Path] = config_path
+ self._exit_code: int = 0
+ self._shutdown_event = asyncio.Event()
+
+ async def _reconfigure(self, config: KresConfig) -> None:
+ await self._reconfigure_listen_address(config)
+
+ async def _deny_management_changes(self, config_old: KresConfig, config_new: KresConfig) -> Result[None, str]:
+ if config_old.management != config_new.management:
+ return Result.err(
+ "/server/management: Changing management API address/unix-socket dynamically is not allowed as it's really dangerous."
+ " If you really need this feature, please contact the developers and explain why. Technically,"
+ " there are no problems in supporting it. We are only blocking the dynamic changes because"
+ " we think the consequences of leaving this footgun unprotected are worse than its usefulness."
+ )
+ return Result.ok(None)
+
+ async def _reload_config(self) -> None:
+ if self._config_path is None:
+ logger.warning("The manager was started with inlined configuration - can't reload")
+ else:
+ try:
+ data = await readfile(self._config_path)
+ config = KresConfig(parse_yaml(data))
+ await self.config_store.update(config)
+ logger.info("Configuration file successfully reloaded")
+ except FileNotFoundError:
+ logger.error(
+ f"Configuration file was not found at '{self._config_path}'."
+ " Something must have happened to it while we were running."
+ )
+ logger.error("Configuration have NOT been changed.")
+ except (DataParsingError, DataValidationError) as e:
+ logger.error(f"Failed to parse the updated configuration file: {e}")
+ logger.error("Configuration have NOT been changed.")
+ except KresManagerException as e:
+ logger.error(f"Reloading of the configuration file failed: {e}")
+ logger.error("Configuration have NOT been changed.")
+
+ async def sigint_handler(self) -> None:
+ logger.info("Received SIGINT, triggering graceful shutdown")
+ self.trigger_shutdown(0)
+
+ async def sigterm_handler(self) -> None:
+ logger.info("Received SIGTERM, triggering graceful shutdown")
+ self.trigger_shutdown(0)
+
+ async def sighup_handler(self) -> None:
+ logger.info("Received SIGHUP, reloading configuration file")
+ systemd_notify(RELOADING="1")
+ await self._reload_config()
+ systemd_notify(READY="1")
+
+ @staticmethod
+ def all_handled_signals() -> Set[signal.Signals]:
+ return {signal.SIGHUP, signal.SIGINT, signal.SIGTERM}
+
+ def bind_signal_handlers(self):
+ asyncio_compat.add_async_signal_handler(signal.SIGTERM, self.sigterm_handler)
+ asyncio_compat.add_async_signal_handler(signal.SIGINT, self.sigint_handler)
+ asyncio_compat.add_async_signal_handler(signal.SIGHUP, self.sighup_handler)
+
+ def unbind_signal_handlers(self):
+ asyncio_compat.remove_signal_handler(signal.SIGTERM)
+ asyncio_compat.remove_signal_handler(signal.SIGINT)
+ asyncio_compat.remove_signal_handler(signal.SIGHUP)
+
+ async def start(self) -> None:
+ self._setup_routes()
+ await self.runner.setup()
+ await self.config_store.register_verifier(self._deny_management_changes)
+ await self.config_store.register_on_change_callback(self._reconfigure)
+
+ async def wait_for_shutdown(self) -> None:
+ await self._shutdown_event.wait()
+
+ def trigger_shutdown(self, exit_code: int) -> None:
+ self._shutdown_event.set()
+ self._exit_code = exit_code
+
+ async def _handler_index(self, _request: web.Request) -> web.Response:
+ """
+ Dummy index handler to indicate that the server is indeed running...
+ """
+ return json_response(
+ {
+ "msg": "Knot Resolver Manager is running! The configuration endpoint is at /config",
+ "status": "RUNNING",
+ }
+ )
+
+ @statistics.async_timing_histogram(statistics.MANAGER_REQUEST_RECONFIGURE_LATENCY)
+ async def _handler_config_query(self, request: web.Request) -> web.Response:
+ """
+ Route handler for changing resolver configuration
+ """
+ # There are a lot of local variables in here, but they are usually immutable (almost SSA form :) )
+ # pylint: disable=too-many-locals
+
+ # parse the incoming data
+ if request.method == "GET":
+ update_with: Optional[Dict[str, Any]] = None
+ else:
+ update_with = parse_from_mime_type(await request.text(), request.content_type)
+ document_path = request.match_info["path"]
+ getheaders = ignore_exceptions_optional(List[str], None, KeyError)(request.headers.getall)
+ etags = getheaders("if-match")
+ not_etags = getheaders("if-none-match")
+ current_config: Dict[str, Any] = self.config_store.get().get_unparsed_data()
+
+ # stop processing if etags
+ def strip_quotes(s: str) -> str:
+ return s.strip('"')
+
+ # WARNING: this check is prone to race conditions. When changing, make sure that the current config
+ # is really the latest current config (i.e. no await in between obtaining the config and the checks)
+ status = HTTPStatus.NOT_MODIFIED if request.method in ("GET", "HEAD") else HTTPStatus.PRECONDITION_FAILED
+ if etags is not None and structural_etag(current_config) not in map(strip_quotes, etags):
+ return web.Response(status=status)
+ if not_etags is not None and structural_etag(current_config) in map(strip_quotes, not_etags):
+ return web.Response(status=status)
+
+ # run query
+ op = cast(Literal["get", "delete", "patch", "put"], request.method.lower())
+ new_config, to_return = query(current_config, op, document_path, update_with)
+
+ # update the config
+ if request.method != "GET":
+ # validate
+ config_validated = KresConfig(new_config)
+ # apply
+ await self.config_store.update(config_validated)
+
+ # serialize the response (the `to_return` object is a Dict/list/scalar, we want to return json)
+ resp_text: Optional[str] = json.dumps(to_return) if to_return is not None else None
+
+ # create the response and return it
+ res = web.Response(status=HTTPStatus.OK, text=resp_text, content_type="application/json")
+ res.headers.add("ETag", f'"{structural_etag(new_config)}"')
+ return res
+
+ async def _handler_metrics(self, _request: web.Request) -> web.Response:
+ return web.Response(
+ body=await statistics.report_stats(),
+ content_type="text/plain",
+ charset="utf8",
+ )
+
+ async def _handler_schema(self, _request: web.Request) -> web.Response:
+ return web.json_response(
+ KresConfig.json_schema(), headers={"Access-Control-Allow-Origin": "*"}, dumps=partial(json.dumps, indent=4)
+ )
+
+ async def _handle_view_schema(self, _request: web.Request) -> web.Response:
+ """
+ Provides a UI for visuallising and understanding JSON schema.
+
+ The feature in the Knot Resolver Manager to render schemas is unwanted, as it's completely
+ out of scope. However, it can be convinient. We therefore rely on a public web-based viewers
+ and provide just a redirect. If this feature ever breaks due to disapearance of the public
+ service, we can fix it. But we are not guaranteeing, that this will always work.
+ """
+
+ return web.Response(
+ text="""
+ <html>
+ <head><title>Redirect to schema viewer</title></head>
+ <body>
+ <script>
+ // we are using JS in order to use proper host
+ let protocol = window.location.protocol;
+ let host = window.location.host;
+ let url = encodeURIComponent(`${protocol}//${host}/schema`);
+ window.location.replace(`https://json-schema.app/view/%23?url=${url}`);
+ </script>
+ <h1>JavaScript required for a dynamic redirect...</h1>
+ </body>
+ </html>
+ """,
+ content_type="text/html",
+ )
+
+ async def _handler_stop(self, _request: web.Request) -> web.Response:
+ """
+ Route handler for shutting down the server (and whole manager)
+ """
+
+ self._shutdown_event.set()
+ logger.info("Shutdown event triggered...")
+ return web.Response(text="Shutting down...")
+
+ async def _handler_reload(self, _request: web.Request) -> web.Response:
+ """
+ Route handler for reloading the server
+ """
+
+ logger.info("Reloading event triggered...")
+ await self._reload_config()
+ return web.Response(text="Reloading...")
+
+ def _setup_routes(self) -> None:
+ self.app.add_routes(
+ [
+ web.get("/", self._handler_index),
+ web.get(r"/v1/config{path:.*}", self._handler_config_query),
+ web.put(r"/v1/config{path:.*}", self._handler_config_query),
+ web.delete(r"/v1/config{path:.*}", self._handler_config_query),
+ web.patch(r"/v1/config{path:.*}", self._handler_config_query),
+ web.post("/stop", self._handler_stop),
+ web.post("/reload", self._handler_reload),
+ web.get("/schema", self._handler_schema),
+ web.get("/schema/ui", self._handle_view_schema),
+ web.get("/metrics", self._handler_metrics),
+ ]
+ )
+
+ async def _reconfigure_listen_address(self, config: KresConfig) -> None:
+ async with self.listen_lock:
+ mgn = config.management
+
+ # if the listen address did not change, do nothing
+ if self.listen == mgn:
+ return
+
+ # start the new listen address
+ nsite: Union[web.TCPSite, web.UnixSite]
+ if mgn.unix_socket:
+ nsite = web.UnixSite(self.runner, str(mgn.unix_socket))
+ logger.info(f"Starting API HTTP server on http+unix://{mgn.unix_socket}")
+ elif mgn.interface:
+ nsite = web.TCPSite(self.runner, str(mgn.interface.addr), int(mgn.interface.port))
+ logger.info(f"Starting API HTTP server on http://{mgn.interface.addr}:{mgn.interface.port}")
+ else:
+ raise KresManagerException("Requested API on unsupported configuration format.")
+ await nsite.start()
+
+ # stop the old listen
+ assert (self.listen is None) == (self.site is None)
+ if self.listen is not None and self.site is not None:
+ if self.listen.unix_socket:
+ logger.info(f"Stopping API HTTP server on http+unix://{mgn.unix_socket}")
+ elif self.listen.interface:
+ logger.info(
+ f"Stopping API HTTP server on http://{self.listen.interface.addr}:{self.listen.interface.port}"
+ )
+ await self.site.stop()
+
+ # save new state
+ self.listen = mgn
+ self.site = nsite
+
+ async def shutdown(self) -> None:
+ if self.site is not None:
+ await self.site.stop()
+ await self.runner.cleanup()
+
+ def get_exit_code(self) -> int:
+ return self._exit_code
+
+
+async def _load_raw_config(config: Union[Path, Dict[str, Any]]) -> Dict[str, Any]:
+ # Initial configuration of the manager
+ if isinstance(config, Path):
+ if not config.exists():
+ raise KresManagerException(
+ f"Manager is configured to load config file at {config} on startup, but the file does not exist."
+ )
+ else:
+ logger.info("Loading initial configuration from %s", config)
+ config = parse_yaml(await readfile(config))
+
+ # validate the initial configuration
+ assert isinstance(config, dict)
+ return config
+
+
+async def _load_config(config: Dict[str, Any]) -> KresConfig:
+ logger.info("Validating initial configuration...")
+ config_validated = KresConfig(config)
+ return config_validated
+
+
+async def _init_config_store(config: Dict[str, Any]) -> ConfigStore:
+ config_validated = await _load_config(config)
+ config_store = ConfigStore(config_validated)
+ return config_store
+
+
+async def _init_manager(config_store: ConfigStore, server: Server) -> KresManager:
+ """
+ Called asynchronously when the application initializes.
+ """
+
+ # Instantiate subprocess controller (if we wanted to, we could switch it at this point)
+ controller = await get_best_controller_implementation(config_store.get())
+
+ # Create KresManager. This will perform autodetection of available service managers and
+ # select the most appropriate to use (or use the one configured directly)
+ manager = await KresManager.create(controller, config_store, server.trigger_shutdown)
+
+ logger.info("Initial configuration applied. Process manager initialized...")
+ return manager
+
+
+async def _deny_working_directory_changes(config_old: KresConfig, config_new: KresConfig) -> Result[None, str]:
+ if config_old.rundir != config_new.rundir:
+ return Result.err("Changing manager's `rundir` during runtime is not allowed.")
+
+ return Result.ok(None)
+
+
+def _set_working_directory(config_raw: Dict[str, Any]) -> None:
+ rundir = get_rundir_without_validation(config_raw)
+
+ logger.info("changing working directory to rundir at '%s'", rundir.to_path().absolute())
+ os.chdir(rundir.to_path())
+
+
+def _lock_working_directory(attempt: int = 0) -> None:
+ # the following syscall is atomic, it's essentially the same as acquiring a lock
+ try:
+ pidfile_fd = os.open(PID_FILE_NAME, os.O_CREAT | os.O_EXCL | os.O_WRONLY, 0o644)
+ except OSError as e:
+ if e.errno == errno.EEXIST and attempt == 0:
+ # the pid file exists, let's check PID
+ with open(PID_FILE_NAME, "r", encoding="utf-8") as f:
+ pid = int(f.read().strip())
+ try:
+ os.kill(pid, 0)
+ except OSError as e2:
+ if e2.errno == errno.ESRCH:
+ os.unlink(PID_FILE_NAME)
+ _lock_working_directory(attempt=attempt + 1)
+ return
+ raise KresManagerException(
+ "Another manager is running in the same working directory."
+ f" PID file is located at {os.getcwd()}/{PID_FILE_NAME}"
+ )
+ else:
+ raise KresManagerException(
+ "Another manager is running in the same working directory."
+ f" PID file is located at {os.getcwd()}/{PID_FILE_NAME}"
+ )
+
+ # now we know that we are the only manager running in this directory
+
+ # write PID to the pidfile and close it afterwards
+ pidfile = os.fdopen(pidfile_fd, "w")
+ pid = os.getpid()
+ pidfile.write(f"{pid}\n")
+ pidfile.close()
+
+ # make sure that the file is deleted on shutdown
+ atexit.register(lambda: os.unlink(PID_FILE_NAME))
+
+
+async def _sigint_while_shutting_down():
+ logger.warning(
+ "Received SIGINT while already shutting down. Ignoring."
+ " If you want to forcefully stop the manager right now, use SIGTERM."
+ )
+
+
+async def _sigterm_while_shutting_down():
+ logger.warning("Received SIGTERM. Invoking dirty shutdown!")
+ sys.exit(128 + signal.SIGTERM)
+
+
+async def start_server(config: Path = DEFAULT_MANAGER_CONFIG_FILE) -> int:
+ # This function is quite long, but it describes how manager runs. So let's silence pylint
+ # pylint: disable=too-many-statements
+
+ start_time = time()
+ working_directory_on_startup = os.getcwd()
+ manager: Optional[KresManager] = None
+
+ # Block signals during initialization to force their processing once everything is ready
+ signal.pthread_sigmask(signal.SIG_BLOCK, Server.all_handled_signals())
+
+ # before starting server, initialize the subprocess controller, config store, etc. Any errors during inicialization
+ # are fatal
+ try:
+ # Make sure that the config path does not change meaning when we change working directory
+ config = config.absolute()
+
+ # Preprocess config - load from file or in general take it to the last step before validation.
+ config_raw = await _load_raw_config(config)
+
+ # We want to change cwd as soon as possible. Some parts of the codebase are using os.getcwd() to get the
+ # working directory.
+ #
+ # If we fail to read rundir from unparsed config, the first config validation error comes from here
+ set_global_validation_context(Context(config.parent, False)) # Strict validation for Paths is off.
+ _set_working_directory(config_raw)
+ reset_global_validation_context()
+
+ # We don't want more than one manager in a single working directory. So we lock it with a PID file.
+ # Warning - this does not prevent multiple managers with the same naming of kresd service.
+ _lock_working_directory()
+
+ # before processing any configuration, set validation context
+ # - resolve_root = root against which all relative paths will be resolved
+ set_global_validation_context(Context(config.parent))
+
+ # After the working directory is set, we can initialize proper config store with a newly parsed configuration.
+ config_store = await _init_config_store(config_raw)
+
+ # Some "constants" need to be loaded from the initial config, some need to be stored from the initial run conditions
+ await init_user_constants(config_store, working_directory_on_startup)
+
+ # This behaviour described above with paths means, that we MUST NOT allow `rundir` change after initialization.
+ # It would cause strange problems because every other path configuration depends on it. Therefore, we have to
+ # add a check to the config store, which disallows changes.
+ await config_store.register_verifier(_deny_working_directory_changes)
+
+ # Up to this point, we have been logging to memory buffer. But now, when we have the configuration loaded, we
+ # can flush the buffer into the proper place
+ await log.logger_init(config_store)
+
+ # With configuration on hand, we can initialize monitoring. We want to do this before any subprocesses are
+ # started, therefore before initializing manager
+ await statistics.init_monitoring(config_store)
+
+ # prepare instance of the server (no side effects)
+ server = Server(config_store, config)
+
+ # After we have loaded the configuration, we can start worring about subprocess management.
+ manager = await _init_manager(config_store, server)
+
+ except CancelStartupExecInsteadException as e:
+ # if we caught this exception, some component wants to perform a reexec during startup. Most likely, it would
+ # be a subprocess manager like supervisord, which wants to make sure the manager runs under supervisord in
+ # the process tree. So now we stop everything, and exec what we are told to. We are assuming, that the thing
+ # we'll exec will invoke us again.
+ logger.info("Exec requested with arguments: %s", str(e.exec_args))
+
+ # unblock signals, this could actually terminate us straight away
+ signal.pthread_sigmask(signal.SIG_UNBLOCK, Server.all_handled_signals())
+
+ # run exit functions
+ atexit.run_callbacks()
+
+ # and finally exec what we were told to exec
+ os.execl(*e.exec_args)
+
+ except KresManagerException as e:
+ # We caught an error with a pretty error message. Just print it and exit.
+ logger.error(e)
+ return 1
+
+ except BaseException:
+ logger.error("Uncaught generic exception during manager inicialization...", exc_info=True)
+ return 1
+
+ # At this point, all backend functionality-providing components are initialized. It's therefore save to start
+ # the API server.
+ try:
+ await server.start()
+ except OSError as e:
+ if e.errno in (errno.EADDRINUSE, errno.EADDRNOTAVAIL):
+ # fancy error reporting of network binding errors
+ logger.error(str(e))
+ await manager.stop()
+ return 1
+ raise
+
+ # At this point, pretty much everything is ready to go. We should just make sure the user can shut
+ # the manager down with signals.
+ server.bind_signal_handlers()
+ signal.pthread_sigmask(signal.SIG_UNBLOCK, Server.all_handled_signals())
+
+ logger.info(f"Manager fully initialized and running in {round(time() - start_time, 3)} seconds")
+
+ # notify systemd/anything compatible that we are ready
+ systemd_notify(READY="1")
+
+ await server.wait_for_shutdown()
+
+ # notify systemd that we are shutting down
+ systemd_notify(STOPPING="1")
+
+ # Ok, now we are tearing everything down.
+
+ # First of all, let's block all unwanted interruptions. We don't want to be reconfiguring kresd's while
+ # shutting down.
+ signal.pthread_sigmask(signal.SIG_BLOCK, Server.all_handled_signals())
+ server.unbind_signal_handlers()
+ # on the other hand, we want to immediatelly stop when the user really wants us to stop
+ asyncio_compat.add_async_signal_handler(signal.SIGTERM, _sigterm_while_shutting_down)
+ asyncio_compat.add_async_signal_handler(signal.SIGINT, _sigint_while_shutting_down)
+ signal.pthread_sigmask(signal.SIG_UNBLOCK, {signal.SIGTERM, signal.SIGINT})
+
+ # After triggering shutdown, we neet to clean everything up
+ logger.info("Stopping API service...")
+ await server.shutdown()
+ logger.info("Stopping kresd manager...")
+ await manager.stop()
+ logger.info(f"The manager run for {round(time() - start_time)} seconds...")
+ return server.get_exit_code()
diff --git a/manager/knot_resolver_manager/statistics.py b/manager/knot_resolver_manager/statistics.py
new file mode 100644
index 00000000..069f4e75
--- /dev/null
+++ b/manager/knot_resolver_manager/statistics.py
@@ -0,0 +1,413 @@
+import asyncio
+import json
+import logging
+from typing import TYPE_CHECKING, Any, Awaitable, Callable, Dict, Generator, List, Optional, Tuple, TypeVar
+
+from prometheus_client import Histogram, exposition # type: ignore
+from prometheus_client.bridge.graphite import GraphiteBridge # type: ignore
+from prometheus_client.core import GaugeMetricFamily # type: ignore
+from prometheus_client.core import REGISTRY, CounterMetricFamily, HistogramMetricFamily, Metric
+
+from knot_resolver_manager import compat
+from knot_resolver_manager.config_store import ConfigStore, only_on_real_changes
+from knot_resolver_manager.datamodel.config_schema import KresConfig
+from knot_resolver_manager.utils.functional import Result
+
+if TYPE_CHECKING:
+ from knot_resolver_manager.kresd_controller.interface import KresID, Subprocess
+
+
+logger = logging.getLogger(__name__)
+
+MANAGER_REQUEST_RECONFIGURE_LATENCY = Histogram(
+ "manager_request_reconfigure_latency", "Time it takes to change configuration"
+)
+
+_REGISTERED_RESOLVERS: "Dict[KresID, Subprocess]" = {}
+
+
+T = TypeVar("T")
+
+
+def async_timing_histogram(metric: Histogram) -> Callable[[Callable[..., Awaitable[T]]], Callable[..., Awaitable[T]]]:
+ """
+ Decorator which can be used to report duration on async functions
+ """
+
+ def decorator(func: Callable[..., Awaitable[T]]) -> Callable[..., Awaitable[T]]:
+ async def wrapper(*args: Any, **kwargs: Any) -> T:
+ with metric.time():
+ res = await func(*args, **kwargs)
+ return res
+
+ return wrapper
+
+ return decorator
+
+
+async def _command_registered_resolvers(cmd: str) -> "Dict[KresID, str]":
+ async def single_pair(sub: "Subprocess") -> "Tuple[KresID, str]":
+ return sub.id, await sub.command(cmd)
+
+ pairs = await asyncio.gather(*(single_pair(inst) for inst in _REGISTERED_RESOLVERS.values()))
+ return dict(pairs)
+
+
+def _counter(name: str, description: str, label: Tuple[str, str], value: float) -> CounterMetricFamily:
+ c = CounterMetricFamily(name, description, labels=(label[0],))
+ c.add_metric(label[1], value) # type: ignore
+ return c
+
+
+def _gauge(name: str, description: str, label: Tuple[str, str], value: float) -> GaugeMetricFamily:
+ c = GaugeMetricFamily(name, description, labels=(label[0],))
+ c.add_metric(label[1], value) # type: ignore
+ return c
+
+
+def _histogram(
+ name: str, description: str, label: Tuple[str, str], buckets: List[Tuple[str, int]], sum_value: float
+) -> HistogramMetricFamily:
+ c = HistogramMetricFamily(name, description, labels=(label[0],))
+ c.add_metric(label[1], buckets, sum_value=sum_value) # type: ignore
+ return c
+
+
+class ResolverCollector:
+ def __init__(self, config_store: ConfigStore) -> None:
+ self._stats_raw: "Optional[Dict[KresID, str]]" = None
+ self._config_store: ConfigStore = config_store
+ self._collection_task: "Optional[asyncio.Task[None]]" = None
+ self._skip_immediate_collection: bool = False
+
+ async def collect_kresd_stats(self, _triggered_from_prometheus_library: bool = False) -> None:
+ if self._skip_immediate_collection:
+ # this would happen because we are calling this function first manually before stat generation,
+ # and once again immediately afterwards caused by the prometheus library's stat collection
+ #
+ # this is a code made to solve problem with calling async functions from sync methods
+ self._skip_immediate_collection = False
+ return
+
+ config = self._config_store.get()
+
+ if config.monitoring.enabled == "manager-only":
+ logger.debug("Skipping kresd stat collection due to configuration")
+ self._stats_raw = None
+ return
+
+ lazy = config.monitoring.enabled == "lazy"
+ cmd = "collect_lazy_statistics()" if lazy else "collect_statistics()"
+ logger.debug("Collecting kresd stats with method '%s'", cmd)
+ stats_raw = await _command_registered_resolvers(cmd)
+ self._stats_raw = stats_raw
+
+ # if this function was not called by the prometheus library and calling collect() is imminent,
+ # we should block the next collection cycle as it would be useless
+ if not _triggered_from_prometheus_library:
+ self._skip_immediate_collection = True
+
+ def _trigger_stats_collection(self) -> None:
+ # we are running inside an event loop, but in a synchronous function and that sucks a lot
+ # it means that we shouldn't block the event loop by performing a blocking stats collection
+ # but it also means that we can't yield to the event loop as this function is synchronous
+ # therefore we can only start a new task, but we can't wait for it
+ # which causes the metrics to be delayed by one collection pass (not the best, but probably good enough)
+ #
+ # this issue can be prevented by calling the `collect_kresd_stats()` function manually before entering
+ # the Prometheus library. We just have to prevent the library from invoking it again. See the mentioned
+ # function for details
+
+ if compat.asyncio.is_event_loop_running():
+ # when running, we can schedule the new data collection
+ if self._collection_task is not None and not self._collection_task.done():
+ logger.warning("Statistics collection task is still running. Skipping scheduling of a new one!")
+ else:
+ self._collection_task = compat.asyncio.create_task(
+ self.collect_kresd_stats(_triggered_from_prometheus_library=True)
+ )
+
+ else:
+ # when not running, we can start a new loop (we are not in the manager's main thread)
+ compat.asyncio.run(self.collect_kresd_stats(_triggered_from_prometheus_library=True))
+
+ def _create_resolver_metrics_loaded_gauge(self, kid: "KresID", loaded: bool) -> GaugeMetricFamily:
+ return _gauge(
+ "resolver_metrics_loaded",
+ "0 if metrics from resolver instance were not loaded, otherwise 1",
+ label=("instance_id", str(kid)),
+ value=int(loaded),
+ )
+
+ def collect(self) -> Generator[Metric, None, None]:
+ # schedule new stats collection
+ self._trigger_stats_collection()
+
+ # if we have no data, return metrics with information about it and exit
+ if self._stats_raw is None:
+ for kid in _REGISTERED_RESOLVERS:
+ yield self._create_resolver_metrics_loaded_gauge(kid, False)
+ return
+
+ # if we have data, parse them
+ for kid in _REGISTERED_RESOLVERS:
+ success = False
+ try:
+ if kid in self._stats_raw:
+ raw = self._stats_raw[kid]
+ metrics: Dict[str, int] = json.loads(raw[1:-1])
+ yield from self._parse_resolver_metrics(kid, metrics)
+ success = True
+ except json.JSONDecodeError:
+ logger.warning("Failed to load metrics from resolver instance %s: failed to parse statistics", str(kid))
+ except KeyError as e:
+ logger.warning(
+ "Failed to load metrics from resolver instance %s: attempted to read missing statistic %s",
+ str(kid),
+ str(e),
+ )
+
+ yield self._create_resolver_metrics_loaded_gauge(kid, success)
+
+ def describe(self) -> List[Metric]:
+ # this function prevents the collector registry from invoking the collect function on startup
+ return []
+
+ def _parse_resolver_metrics(self, instance_id: "KresID", metrics: Any) -> Generator[Metric, None, None]:
+ sid = str(instance_id)
+
+ # response latency histogram
+ BUCKET_NAMES_IN_RESOLVER = ("1ms", "10ms", "50ms", "100ms", "250ms", "500ms", "1000ms", "1500ms", "slow")
+ BUCKET_NAMES_PROMETHEUS = ("0.001", "0.01", "0.05", "0.1", "0.25", "0.5", "1.0", "1.5", "+Inf")
+ yield _histogram(
+ "resolver_response_latency",
+ "Time it takes to respond to queries in seconds",
+ label=("instance_id", sid),
+ buckets=[
+ (bnp, metrics[f"answer.{duration}"])
+ for bnp, duration in zip(BUCKET_NAMES_PROMETHEUS, BUCKET_NAMES_IN_RESOLVER)
+ ],
+ sum_value=metrics["answer.sum_ms"] / 1_000,
+ )
+
+ yield _counter(
+ "resolver_request_total",
+ "total number of DNS requests (including internal client requests)",
+ label=("instance_id", sid),
+ value=metrics["request.total"],
+ )
+ yield _counter(
+ "resolver_request_internal",
+ "number of internal requests generated by Knot Resolver (e.g. DNSSEC trust anchor updates)",
+ label=("instance_id", sid),
+ value=metrics["request.internal"],
+ )
+ yield _counter(
+ "resolver_request_udp",
+ "number of external requests received over plain UDP (RFC 1035)",
+ label=("instance_id", sid),
+ value=metrics["request.udp"],
+ )
+ yield _counter(
+ "resolver_request_tcp",
+ "number of external requests received over plain TCP (RFC 1035)",
+ label=("instance_id", sid),
+ value=metrics["request.tcp"],
+ )
+ yield _counter(
+ "resolver_request_dot",
+ "number of external requests received over DNS-over-TLS (RFC 7858)",
+ label=("instance_id", sid),
+ value=metrics["request.dot"],
+ )
+ yield _counter(
+ "resolver_request_doh",
+ "number of external requests received over DNS-over-HTTP (RFC 8484)",
+ label=("instance_id", sid),
+ value=metrics["request.doh"],
+ )
+ yield _counter(
+ "resolver_request_xdp",
+ "number of external requests received over plain UDP via an AF_XDP socket",
+ label=("instance_id", sid),
+ value=metrics["request.xdp"],
+ )
+ yield _counter(
+ "resolver_answer_total",
+ "total number of answered queries",
+ label=("instance_id", sid),
+ value=metrics["answer.total"],
+ )
+ yield _counter(
+ "resolver_answer_cached",
+ "number of queries answered from cache",
+ label=("instance_id", sid),
+ value=metrics["answer.cached"],
+ )
+ yield _counter(
+ "resolver_answer_rcode_noerror",
+ "number of NOERROR answers",
+ label=("instance_id", sid),
+ value=metrics["answer.noerror"],
+ )
+ yield _counter(
+ "resolver_answer_rcode_nodata",
+ "number of NOERROR answers without any data",
+ label=("instance_id", sid),
+ value=metrics["answer.nodata"],
+ )
+ yield _counter(
+ "resolver_answer_rcode_nxdomain",
+ "number of NXDOMAIN answers",
+ label=("instance_id", sid),
+ value=metrics["answer.nxdomain"],
+ )
+ yield _counter(
+ "resolver_answer_rcode_servfail",
+ "number of SERVFAIL answers",
+ label=("instance_id", sid),
+ value=metrics["answer.servfail"],
+ )
+ yield _counter(
+ "resolver_answer_flag_aa",
+ "number of authoritative answers",
+ label=("instance_id", sid),
+ value=metrics["answer.aa"],
+ )
+ yield _counter(
+ "resolver_answer_flag_tc",
+ "number of truncated answers",
+ label=("instance_id", sid),
+ value=metrics["answer.tc"],
+ )
+ yield _counter(
+ "resolver_answer_flag_ra",
+ "number of answers with recursion available flag",
+ label=("instance_id", sid),
+ value=metrics["answer.ra"],
+ )
+ yield _counter(
+ "resolver_answer_flags_rd",
+ "number of recursion desired (in answer!)",
+ label=("instance_id", sid),
+ value=metrics["answer.rd"],
+ )
+ yield _counter(
+ "resolver_answer_flag_ad",
+ "number of authentic data (DNSSEC) answers",
+ label=("instance_id", sid),
+ value=metrics["answer.ad"],
+ )
+ yield _counter(
+ "resolver_answer_flag_cd",
+ "number of checking disabled (DNSSEC) answers",
+ label=("instance_id", sid),
+ value=metrics["answer.cd"],
+ )
+ yield _counter(
+ "resolver_answer_flag_do",
+ "number of DNSSEC answer OK",
+ label=("instance_id", sid),
+ value=metrics["answer.do"],
+ )
+ yield _counter(
+ "resolver_answer_flag_edns0",
+ "number of answers with EDNS0 present",
+ label=("instance_id", sid),
+ value=metrics["answer.edns0"],
+ )
+ yield _counter(
+ "resolver_query_edns",
+ "number of queries with EDNS present",
+ label=("instance_id", sid),
+ value=metrics["query.edns"],
+ )
+ yield _counter(
+ "resolver_query_dnssec",
+ "number of queries with DNSSEC DO=1",
+ label=("instance_id", sid),
+ value=metrics["query.dnssec"],
+ )
+
+
+_resolver_collector: Optional[ResolverCollector] = None
+
+
+def unregister_resolver_metrics_for(subprocess: "Subprocess") -> None:
+ """
+ Cancel metric collection from resolver "Subprocess"
+ """
+ del _REGISTERED_RESOLVERS[subprocess.id]
+
+
+def register_resolver_metrics_for(subprocess: "Subprocess") -> None:
+ """
+ Register resolver "Subprocess" for metric collection
+ """
+ _REGISTERED_RESOLVERS[subprocess.id] = subprocess
+
+
+async def report_stats() -> bytes:
+ """
+ Collects metrics from everything, returns data string in Prometheus format.
+ """
+
+ # manually trigger stat collection so that we do not have to wait for it
+ if _resolver_collector is not None:
+ await _resolver_collector.collect_kresd_stats()
+ else:
+ raise RuntimeError("Function invoked before initializing the module!")
+
+ # generate the report
+ return exposition.generate_latest() # type: ignore
+
+
+async def _deny_turning_off_graphite_bridge(old_config: KresConfig, new_config: KresConfig) -> Result[None, str]:
+ if old_config.monitoring.graphite and not new_config.monitoring.graphite:
+ return Result.err(
+ "You can't turn off graphite monitoring dynamically. If you really want this feature, please let the developers know."
+ )
+
+ if (
+ old_config.monitoring.graphite is not None
+ and new_config.monitoring.graphite is not None
+ and old_config.monitoring.graphite != new_config.monitoring.graphite
+ ):
+ return Result.err("Changing graphite exporter configuration in runtime is not allowed.")
+
+ return Result.ok(None)
+
+
+_graphite_bridge: Optional[GraphiteBridge] = None
+
+
+@only_on_real_changes(lambda c: c.monitoring.graphite)
+async def _configure_graphite_bridge(config: KresConfig) -> None:
+ """
+ Starts graphite bridge if required
+ """
+ global _graphite_bridge
+ if config.monitoring.graphite is not False and _graphite_bridge is None:
+ logger.info(
+ "Starting Graphite metrics exporter for [%s]:%d",
+ str(config.monitoring.graphite.host),
+ int(config.monitoring.graphite.port),
+ )
+ _graphite_bridge = GraphiteBridge((str(config.monitoring.graphite.host), int(config.monitoring.graphite.port)))
+ _graphite_bridge.start( # type: ignore
+ interval=config.monitoring.graphite.interval.seconds(), prefix=config.monitoring.graphite.prefix
+ )
+
+
+async def init_monitoring(config_store: ConfigStore) -> None:
+ """
+ Initialize monitoring. Must be called before any other function from this module.
+ """
+ # register metrics collector
+ global _resolver_collector
+ _resolver_collector = ResolverCollector(config_store)
+ REGISTRY.register(_resolver_collector) # type: ignore
+
+ # register graphite bridge
+ await config_store.register_verifier(_deny_turning_off_graphite_bridge)
+ await config_store.register_on_change_callback(_configure_graphite_bridge)
diff --git a/manager/knot_resolver_manager/utils/__init__.py b/manager/knot_resolver_manager/utils/__init__.py
new file mode 100644
index 00000000..062b740d
--- /dev/null
+++ b/manager/knot_resolver_manager/utils/__init__.py
@@ -0,0 +1,45 @@
+from typing import Any, Callable, Optional, Type, TypeVar
+
+T = TypeVar("T")
+
+
+def ignore_exceptions_optional(
+ _tp: Type[T], default: Optional[T], *exceptions: Type[BaseException]
+) -> Callable[[Callable[..., Optional[T]]], Callable[..., Optional[T]]]:
+ """
+ Decorator, that wraps around a function preventing it from raising exceptions
+ and instead returning the configured default value.
+
+ :param Type[T] _tp: Return type of the function. Essentialy only a template argument for type-checking
+ :param T default: The value to return as a default
+ :param List[Type[BaseException]] exceptions: The list of exceptions to catch
+ :return: value of the decorated function, or default if exception raised
+ :rtype: T
+ """
+
+ def decorator(func: Callable[..., Optional[T]]) -> Callable[..., Optional[T]]:
+ def f(*nargs: Any, **nkwargs: Any) -> Optional[T]:
+ try:
+ return func(*nargs, **nkwargs)
+ except BaseException as e:
+ if isinstance(e, exceptions): # pyright: reportUnnecessaryIsInstance=false
+ return default
+ else:
+ raise e # pyright: reportGeneralTypeIssues=false
+
+ return f
+
+ return decorator
+
+
+def ignore_exceptions(
+ default: T, *exceptions: Type[BaseException]
+) -> Callable[[Callable[..., Optional[T]]], Callable[..., Optional[T]]]:
+ return ignore_exceptions_optional(type(default), default, *exceptions)
+
+
+def phantom_use(var: Any) -> None: # pylint: disable=unused-argument
+ """
+ Function, which consumes its argument doing absolutely nothing with it. Useful
+ for convincing pylint, that we need the variable even when its unused.
+ """
diff --git a/manager/knot_resolver_manager/utils/async_utils.py b/manager/knot_resolver_manager/utils/async_utils.py
new file mode 100644
index 00000000..1cd7303e
--- /dev/null
+++ b/manager/knot_resolver_manager/utils/async_utils.py
@@ -0,0 +1,129 @@
+import asyncio
+import os
+import pkgutil
+import signal
+import sys
+import time
+from asyncio import create_subprocess_exec, create_subprocess_shell
+from pathlib import PurePath
+from threading import Thread
+from typing import Any, Dict, Generic, List, Optional, TypeVar, Union
+
+from knot_resolver_manager.compat.asyncio import to_thread
+
+
+def unblock_signals():
+ if sys.version_info.major >= 3 and sys.version_info.minor >= 8:
+ signal.pthread_sigmask(signal.SIG_UNBLOCK, signal.valid_signals()) # type: ignore
+ else:
+ # the list of signals is not exhaustive, but it should cover all signals we might ever want to block
+ signal.pthread_sigmask(
+ signal.SIG_UNBLOCK,
+ {
+ signal.SIGHUP,
+ signal.SIGINT,
+ signal.SIGTERM,
+ signal.SIGUSR1,
+ signal.SIGUSR2,
+ },
+ )
+
+
+async def call(
+ cmd: Union[str, bytes, List[str], List[bytes]], shell: bool = False, discard_output: bool = False
+) -> int:
+ """
+ custom async alternative to subprocess.call()
+ """
+ kwargs: Dict[str, Any] = {
+ "preexec_fn": unblock_signals,
+ }
+ if discard_output:
+ kwargs["stdout"] = asyncio.subprocess.DEVNULL
+ kwargs["stderr"] = asyncio.subprocess.DEVNULL
+
+ if shell:
+ if isinstance(cmd, list):
+ raise RuntimeError("can't use list of arguments with shell=True")
+ proc = await create_subprocess_shell(cmd, **kwargs)
+ else:
+ if not isinstance(cmd, list):
+ raise RuntimeError(
+ "Please use list of arguments, not a single string. It will prevent ambiguity when parsing"
+ )
+ proc = await create_subprocess_exec(*cmd, **kwargs)
+
+ return await proc.wait()
+
+
+async def readfile(path: Union[str, PurePath]) -> str:
+ """
+ asynchronously read whole file and return its content
+ """
+
+ def readfile_sync(path: Union[str, PurePath]) -> str:
+ with open(path, "r", encoding="utf8") as f:
+ return f.read()
+
+ return await to_thread(readfile_sync, path)
+
+
+async def writefile(path: Union[str, PurePath], content: str) -> None:
+ """
+ asynchronously set content of a file to a given string `content`.
+ """
+
+ def writefile_sync(path: Union[str, PurePath], content: str) -> int:
+ with open(path, "w", encoding="utf8") as f:
+ return f.write(content)
+
+ await to_thread(writefile_sync, path, content)
+
+
+async def wait_for_process_termination(pid: int, sleep_sec: float = 0) -> None:
+ """
+ will wait for any process (does not have to be a child process) given by its PID to terminate
+
+ sleep_sec configures the granularity, with which we should return
+ """
+
+ def wait_sync(pid: int, sleep_sec: float) -> None:
+ while True:
+ try:
+ os.kill(pid, 0)
+ if sleep_sec == 0:
+ os.sched_yield()
+ else:
+ time.sleep(sleep_sec)
+ except ProcessLookupError:
+ break
+
+ await to_thread(wait_sync, pid, sleep_sec)
+
+
+async def read_resource(package: str, filename: str) -> Optional[bytes]:
+ return await to_thread(pkgutil.get_data, package, filename)
+
+
+T = TypeVar("T")
+
+
+class BlockingEventDispatcher(Thread, Generic[T]):
+ def __init__(self, name: str = "blocking_event_dispatcher") -> None:
+ super().__init__(name=name, daemon=True)
+ # warning: the asyncio queue is not thread safe
+ self._removed_unit_names: "asyncio.Queue[T]" = asyncio.Queue()
+ self._main_event_loop = asyncio.get_event_loop()
+
+ def dispatch_event(self, event: T) -> None:
+ """
+ Method to dispatch events from the blocking thread
+ """
+
+ async def add_to_queue():
+ await self._removed_unit_names.put(event)
+
+ self._main_event_loop.call_soon_threadsafe(add_to_queue)
+
+ async def next_event(self) -> T:
+ return await self._removed_unit_names.get()
diff --git a/manager/knot_resolver_manager/utils/custom_atexit.py b/manager/knot_resolver_manager/utils/custom_atexit.py
new file mode 100644
index 00000000..2fe55433
--- /dev/null
+++ b/manager/knot_resolver_manager/utils/custom_atexit.py
@@ -0,0 +1,20 @@
+"""
+Custom replacement for standard module `atexit`. We use `atexit` behind the scenes, we just add the option
+to invoke the exit functions manually.
+"""
+
+import atexit
+from typing import Callable, List
+
+_at_exit_functions: List[Callable[[], None]] = []
+
+
+def register(func: Callable[[], None]) -> None:
+ _at_exit_functions.append(func)
+ atexit.register(func)
+
+
+def run_callbacks() -> None:
+ for func in _at_exit_functions:
+ func()
+ atexit.unregister(func)
diff --git a/manager/knot_resolver_manager/utils/etag.py b/manager/knot_resolver_manager/utils/etag.py
new file mode 100644
index 00000000..bb80700b
--- /dev/null
+++ b/manager/knot_resolver_manager/utils/etag.py
@@ -0,0 +1,10 @@
+import base64
+import json
+from hashlib import blake2b
+from typing import Any
+
+
+def structural_etag(obj: Any) -> str:
+ m = blake2b(digest_size=15)
+ m.update(json.dumps(obj, sort_keys=True).encode("utf8"))
+ return base64.urlsafe_b64encode(m.digest()).decode("utf8")
diff --git a/manager/knot_resolver_manager/utils/functional.py b/manager/knot_resolver_manager/utils/functional.py
new file mode 100644
index 00000000..43abd705
--- /dev/null
+++ b/manager/knot_resolver_manager/utils/functional.py
@@ -0,0 +1,72 @@
+from enum import Enum, auto
+from typing import Any, Callable, Generic, Iterable, TypeVar, Union
+
+T = TypeVar("T")
+
+
+def foldl(oper: Callable[[T, T], T], default: T, arr: Iterable[T]) -> T:
+ val = default
+ for x in arr:
+ val = oper(val, x)
+ return val
+
+
+def contains_element_matching(cond: Callable[[T], bool], arr: Iterable[T]) -> bool:
+ return foldl(lambda x, y: x or y, False, map(cond, arr))
+
+
+def all_matches(cond: Callable[[T], bool], arr: Iterable[T]) -> bool:
+ return foldl(lambda x, y: x and y, True, map(cond, arr))
+
+
+Succ = TypeVar("Succ")
+Err = TypeVar("Err")
+
+
+class _Status(Enum):
+ OK = auto()
+ ERROR = auto()
+
+
+class _ResultSentinel:
+ pass
+
+
+_RESULT_SENTINEL = _ResultSentinel()
+
+
+class Result(Generic[Succ, Err]):
+ @staticmethod
+ def ok(succ: T) -> "Result[T, Any]":
+ return Result(_Status.OK, succ=succ)
+
+ @staticmethod
+ def err(err: T) -> "Result[Any, T]":
+ return Result(_Status.ERROR, err=err)
+
+ def __init__(
+ self,
+ status: _Status,
+ succ: Union[Succ, _ResultSentinel] = _RESULT_SENTINEL,
+ err: Union[Err, _ResultSentinel] = _RESULT_SENTINEL,
+ ) -> None:
+ super().__init__()
+ self._status: _Status = status
+ self._succ: Union[_ResultSentinel, Succ] = succ
+ self._err: Union[_ResultSentinel, Err] = err
+
+ def unwrap(self) -> Succ:
+ assert self._status is _Status.OK
+ assert not isinstance(self._succ, _ResultSentinel)
+ return self._succ
+
+ def unwrap_err(self) -> Err:
+ assert self._status is _Status.ERROR
+ assert not isinstance(self._err, _ResultSentinel)
+ return self._err
+
+ def is_ok(self) -> bool:
+ return self._status is _Status.OK
+
+ def is_err(self) -> bool:
+ return self._status is _Status.ERROR
diff --git a/manager/knot_resolver_manager/utils/modeling/README.md b/manager/knot_resolver_manager/utils/modeling/README.md
new file mode 100644
index 00000000..97c68b54
--- /dev/null
+++ b/manager/knot_resolver_manager/utils/modeling/README.md
@@ -0,0 +1,155 @@
+# Modeling utils
+
+These utilities are used to model schemas for data stored in a python dictionary or YAML and JSON format.
+The utilities also take care of parsing, validating and creating JSON schemas and basic documentation.
+
+## Creating schema
+
+Schema is created using `ConfigSchema` class. Schema structure is specified using annotations.
+
+```python
+from .modeling import ConfigSchema
+
+class SimpleSchema(ConfigSchema):
+ integer: int = 5 # a default value can be specified
+ string: str
+ boolean: bool
+```
+Even more complex types can be used in a schema. Schemas can be also nested.
+Words in multi-word names are separated by underscore `_` (e.g. `simple_schema`).
+
+```python
+from typing import Dict, List, Optional, Union
+
+class ComplexSchema(ConfigSchema):
+ optional: Optional[str] # this field is optional
+ union: Union[int, str] # integer and string are both valid
+ list: List[int] # list of integers
+ dictionary: Dict[str, bool] = {"key": False}
+ simple_schema: SimpleSchema # nested schema
+```
+
+
+### Additianal validation
+
+If a some additional validation needs to be done, there is `_validate()` method for that.
+`ValueError` exception should be raised in case of validation error.
+
+```python
+class FieldsSchema(ConfigSchema):
+ field1: int
+ field2: int
+
+ def _validate(self) -> None:
+ if self.field1 > self.field2:
+ raise ValueError("field1 is bigger than field2")
+```
+
+
+### Additional layer, transformation methods
+
+It is possible to add layers to schema and use a transformation method between layers to process the value.
+Transformation method must be named based on field (`value` in this example) with `_` underscore prefix.
+In this example, the `Layer2Schema` is structure for input data and `Layer1Schema` is for result data.
+
+```python
+class Layer1Schema(ConfigSchema):
+ class Layer2Schema(ConfigSchema):
+ value: Union[str, int]
+
+ _LAYER = Layer2Schema
+
+ value: int
+
+ def _value(self, obj: Layer2Schema) -> Any:
+ if isinstance(str, obj.value):
+ return len(obj.value) # transform str values to int; this is just example
+ return obj.value
+```
+
+### Documentation and JSON schema
+
+Created schema can be documented using simple docstring. Json schema is created by calling `json_schema()` method on schema class. JSON schema includes description from docstring, defaults, etc.
+
+```python
+SimpleSchema(ConfigSchema):
+ """
+ This is description for SimpleSchema itself.
+
+ ---
+ integer: description for integer field
+ string: description for string field
+ boolean: description for boolean field
+ """
+
+ integer: int = 5
+ string: str
+ boolean: bool
+
+json_schema = SimpleSchema.json_schema()
+```
+
+
+## Creating custom type
+
+Custom types can be made by extending `BaseValueType` class which is integrated to parsing and validating process.
+Use `DataValidationError` to rase exception during validation. `object_path` is used to track node in more complex/nested schemas and create useful logging message.
+
+```python
+from .modeling import BaseValueType
+from .modeling.exceptions import DataValidationError
+
+class IntNonNegative(BaseValueType):
+ def __init__(self, source_value: Any, object_path: str = "/") -> None:
+ super().__init__(source_value)
+ if isinstance(source_value, int) and not isinstance(source_value, bool):
+ if source_value < 0:
+ raise DataValidationError(f"value {source_value} is negative number.", object_path)
+ self._value = source_value
+ else:
+ raise DataValidationError(
+ f"expected integer, got '{type(source_value)}'",
+ object_path,
+ )
+```
+
+For JSON schema you should implement `json_schema` method.
+It should return [JSON schema representation](https://json-schema.org/understanding-json-schema/index.html) of the custom type.
+
+```python
+ @classmethod
+ def json_schema(cls: Type["IntNonNegative"]) -> Dict[Any, Any]:
+ return {"type": "integer", "minimum": 0}
+```
+
+
+## Parsing JSON/YAML
+
+For example, YAML data for `ComplexSchema` can look like this.
+Words in multi-word names are separated by hyphen `-` (e.g. `simple-schema`).
+
+```yaml
+# data.yaml
+union: here could also be a number
+list: [1,2,3,]
+dictionary:
+ key": false
+simple-schema:
+ integer: 55
+ string: this is string
+ boolean: false
+```
+
+To parse data from YAML format just use `parse_yaml` function or `parse_json` for JSON format.
+Parsed data are stored in a dict-like object that takes care of `-`/`_` conversion.
+
+```python
+from .modeling import parse_yaml
+
+# read data from file
+with open("data.yaml") as f:
+ str_data = f.read()
+
+dict_data = parse_yaml(str_data)
+validated_data = ComplexSchema(dict_data)
+``` \ No newline at end of file
diff --git a/manager/knot_resolver_manager/utils/modeling/__init__.py b/manager/knot_resolver_manager/utils/modeling/__init__.py
new file mode 100644
index 00000000..d16f6c12
--- /dev/null
+++ b/manager/knot_resolver_manager/utils/modeling/__init__.py
@@ -0,0 +1,14 @@
+from .base_generic_type_wrapper import BaseGenericTypeWrapper
+from .base_schema import BaseSchema, ConfigSchema
+from .base_value_type import BaseValueType
+from .parsing import parse_json, parse_yaml, try_to_parse
+
+__all__ = [
+ "BaseGenericTypeWrapper",
+ "BaseValueType",
+ "BaseSchema",
+ "ConfigSchema",
+ "parse_yaml",
+ "parse_json",
+ "try_to_parse",
+]
diff --git a/manager/knot_resolver_manager/utils/modeling/base_generic_type_wrapper.py b/manager/knot_resolver_manager/utils/modeling/base_generic_type_wrapper.py
new file mode 100644
index 00000000..1f2c1767
--- /dev/null
+++ b/manager/knot_resolver_manager/utils/modeling/base_generic_type_wrapper.py
@@ -0,0 +1,9 @@
+from typing import Generic, TypeVar
+
+from .base_value_type import BaseTypeABC
+
+T = TypeVar("T")
+
+
+class BaseGenericTypeWrapper(Generic[T], BaseTypeABC): # pylint: disable=abstract-method
+ pass
diff --git a/manager/knot_resolver_manager/utils/modeling/base_schema.py b/manager/knot_resolver_manager/utils/modeling/base_schema.py
new file mode 100644
index 00000000..32388816
--- /dev/null
+++ b/manager/knot_resolver_manager/utils/modeling/base_schema.py
@@ -0,0 +1,808 @@
+import enum
+import inspect
+from abc import ABC, abstractmethod # pylint: disable=[no-name-in-module]
+from typing import Any, Callable, Dict, Generic, List, Optional, Set, Tuple, Type, TypeVar, Union, cast
+
+import yaml
+
+from knot_resolver_manager.utils.functional import all_matches
+
+from .base_generic_type_wrapper import BaseGenericTypeWrapper
+from .base_value_type import BaseValueType
+from .exceptions import AggregateDataValidationError, DataDescriptionError, DataValidationError
+from .renaming import Renamed, renamed
+from .types import (
+ get_generic_type_argument,
+ get_generic_type_arguments,
+ get_generic_type_wrapper_argument,
+ get_optional_inner_type,
+ is_dict,
+ is_enum,
+ is_generic_type_wrapper,
+ is_internal_field_name,
+ is_list,
+ is_literal,
+ is_none_type,
+ is_optional,
+ is_tuple,
+ is_union,
+)
+
+T = TypeVar("T")
+
+
+def is_obj_type(obj: Any, types: Union[type, Tuple[Any, ...], Tuple[type, ...]]) -> bool:
+ # To check specific type we are using 'type()' instead of 'isinstance()'
+ # because for example 'bool' is instance of 'int', 'isinstance(False, int)' returns True.
+ # pylint: disable=unidiomatic-typecheck
+ if isinstance(types, tuple):
+ return type(obj) in types
+ return type(obj) == types
+
+
+class Serializable(ABC):
+ """
+ An interface for making classes serializable to a dictionary (and in turn into a JSON).
+ """
+
+ @abstractmethod
+ def to_dict(self) -> Dict[Any, Any]:
+ raise NotImplementedError(f"...for class {self.__class__.__name__}")
+
+ @staticmethod
+ def is_serializable(typ: Type[Any]) -> bool:
+ return (
+ typ in {str, bool, int, float}
+ or is_none_type(typ)
+ or is_literal(typ)
+ or is_dict(typ)
+ or is_list(typ)
+ or is_generic_type_wrapper(typ)
+ or (inspect.isclass(typ) and issubclass(typ, Serializable))
+ or (inspect.isclass(typ) and issubclass(typ, BaseValueType))
+ or (inspect.isclass(typ) and issubclass(typ, BaseSchema))
+ or (is_optional(typ) and Serializable.is_serializable(get_optional_inner_type(typ)))
+ or (is_union(typ) and all_matches(Serializable.is_serializable, get_generic_type_arguments(typ)))
+ )
+
+ @staticmethod
+ def serialize(obj: Any) -> Any:
+ if isinstance(obj, Serializable):
+ return obj.to_dict()
+
+ elif isinstance(obj, (BaseValueType, BaseGenericTypeWrapper)):
+ o = obj.serialize()
+ # if Serializable.is_serializable(o):
+ return Serializable.serialize(o)
+ # return o
+
+ elif isinstance(obj, list):
+ res: List[Any] = [Serializable.serialize(i) for i in cast(List[Any], obj)]
+ return res
+
+ return obj
+
+
+class _lazy_default(Generic[T], Serializable):
+ """
+ Wrapper for default values BaseSchema classes which deffers their instantiation until the schema
+ itself is being instantiated
+ """
+
+ def __init__(self, constructor: Callable[..., T], *args: Any, **kwargs: Any) -> None:
+ # pylint: disable=[super-init-not-called]
+ self._func = constructor
+ self._args = args
+ self._kwargs = kwargs
+
+ def instantiate(self) -> T:
+ return self._func(*self._args, **self._kwargs)
+
+ def to_dict(self) -> Dict[Any, Any]:
+ return Serializable.serialize(self.instantiate())
+
+
+def lazy_default(constructor: Callable[..., T], *args: Any, **kwargs: Any) -> T:
+ """We use a factory function because you can't lie about the return type in `__new__`"""
+ return _lazy_default(constructor, *args, **kwargs) # type: ignore
+
+
+def _split_docstring(docstring: str) -> Tuple[str, Optional[str]]:
+ """
+ Splits docstring into description of the class and description of attributes
+ """
+
+ if "---" not in docstring:
+ return ("\n".join([s.strip() for s in docstring.splitlines()]).strip(), None)
+
+ doc, attrs_doc = docstring.split("---", maxsplit=1)
+ return (
+ "\n".join([s.strip() for s in doc.splitlines()]).strip(),
+ attrs_doc,
+ )
+
+
+def _parse_attrs_docstrings(docstring: str) -> Optional[Dict[str, str]]:
+ """
+ Given a docstring of a BaseSchema, return a dict with descriptions of individual attributes.
+ """
+
+ _, attrs_doc = _split_docstring(docstring)
+ if attrs_doc is None:
+ return None
+
+ # try to parse it as yaml:
+ data = yaml.safe_load(attrs_doc)
+ assert isinstance(data, dict), "Invalid format of attribute description"
+ return cast(Dict[str, str], data)
+
+
+def _get_properties_schema(typ: Type[Any]) -> Dict[Any, Any]:
+ schema: Dict[Any, Any] = {}
+ annot: Dict[str, Any] = typ.__dict__.get("__annotations__", {})
+ docstring: str = typ.__dict__.get("__doc__", "") or ""
+ attribute_documentation = _parse_attrs_docstrings(docstring)
+ for field_name, python_type in annot.items():
+ name = field_name.replace("_", "-")
+ schema[name] = _describe_type(python_type)
+
+ # description
+ if attribute_documentation is not None:
+ if field_name not in attribute_documentation:
+ raise DataDescriptionError(f"The docstring does not describe field '{field_name}'", str(typ))
+ schema[name]["description"] = attribute_documentation[field_name]
+ del attribute_documentation[field_name]
+
+ # default value
+ if hasattr(typ, field_name):
+ assert Serializable.is_serializable(
+ python_type
+ ), f"Type '{python_type}' does not appear to be JSON serializable"
+ schema[name]["default"] = Serializable.serialize(getattr(typ, field_name))
+
+ if attribute_documentation is not None and len(attribute_documentation) > 0:
+ raise DataDescriptionError(
+ f"The docstring describes attributes which are not present - {tuple(attribute_documentation.keys())}",
+ str(typ),
+ )
+
+ return schema
+
+
+def _describe_type(typ: Type[Any]) -> Dict[Any, Any]:
+ # pylint: disable=too-many-branches
+
+ if inspect.isclass(typ) and issubclass(typ, BaseSchema):
+ return typ.json_schema(include_schema_definition=False)
+
+ elif inspect.isclass(typ) and issubclass(typ, BaseValueType):
+ return typ.json_schema()
+
+ elif is_generic_type_wrapper(typ):
+ wrapped = get_generic_type_wrapper_argument(typ)
+ return _describe_type(wrapped)
+
+ elif is_none_type(typ):
+ return {"type": "null"}
+
+ elif typ == int:
+ return {"type": "integer"}
+
+ elif typ == bool:
+ return {"type": "boolean"}
+
+ elif typ == str:
+ return {"type": "string"}
+
+ elif is_literal(typ):
+ lit = get_generic_type_arguments(typ)
+ return {"type": "string", "enum": lit}
+
+ elif is_optional(typ):
+ desc = _describe_type(get_optional_inner_type(typ))
+ if "type" in desc:
+ desc["type"] = [desc["type"], "null"]
+ return desc
+ else:
+ return {"anyOf": [{"type": "null"}, desc]}
+
+ elif is_union(typ):
+ variants = get_generic_type_arguments(typ)
+ return {"anyOf": [_describe_type(v) for v in variants]}
+
+ elif is_list(typ):
+ return {"type": "array", "items": _describe_type(get_generic_type_argument(typ))}
+
+ elif is_dict(typ):
+ key, val = get_generic_type_arguments(typ)
+
+ if inspect.isclass(key) and issubclass(key, BaseValueType):
+ assert (
+ key.__str__ is not BaseValueType.__str__
+ ), "To support derived 'BaseValueType', __str__ must be implemented."
+ else:
+ assert key == str, "We currently do not support any other keys then strings"
+
+ return {"type": "object", "additionalProperties": _describe_type(val)}
+
+ elif inspect.isclass(typ) and issubclass(typ, enum.Enum): # same as our is_enum(typ), but inlined for type checker
+ return {"type": "string", "enum": [str(v) for v in typ]}
+
+ raise NotImplementedError(f"Trying to get JSON schema for type '{typ}', which is not implemented")
+
+
+TSource = Union[None, "BaseSchema", Dict[str, Any]]
+
+
+def _create_untouchable(name: str) -> object:
+ class _Untouchable:
+ def __getattribute__(self, item_name: str) -> Any:
+ raise RuntimeError(f"You are not supposed to access object '{name}'.")
+
+ def __setattr__(self, item_name: str, value: Any) -> None:
+ raise RuntimeError(f"You are not supposed to access object '{name}'.")
+
+ return _Untouchable()
+
+
+class ObjectMapper:
+ def _create_tuple(self, tp: Type[Any], obj: Tuple[Any, ...], object_path: str) -> Tuple[Any, ...]:
+ types = get_generic_type_arguments(tp)
+ errs: List[DataValidationError] = []
+ res: List[Any] = []
+ for i, (t, val) in enumerate(zip(types, obj)):
+ try:
+ res.append(self.map_object(t, val, object_path=f"{object_path}[{i}]"))
+ except DataValidationError as e:
+ errs.append(e)
+ if len(errs) == 1:
+ raise errs[0]
+ elif len(errs) > 1:
+ raise AggregateDataValidationError(object_path, child_exceptions=errs)
+ return tuple(res)
+
+ def _create_dict(self, tp: Type[Any], obj: Dict[Any, Any], object_path: str) -> Dict[Any, Any]:
+ key_type, val_type = get_generic_type_arguments(tp)
+ try:
+ errs: List[DataValidationError] = []
+ res: Dict[Any, Any] = {}
+ for key, val in obj.items():
+ try:
+ nkey = self.map_object(key_type, key, object_path=f"{object_path}[{key}]")
+ nval = self.map_object(val_type, val, object_path=f"{object_path}[{key}]")
+ res[nkey] = nval
+ except DataValidationError as e:
+ errs.append(e)
+ if len(errs) == 1:
+ raise errs[0]
+ elif len(errs) > 1:
+ raise AggregateDataValidationError(object_path, child_exceptions=errs)
+ return res
+ except AttributeError as e:
+ raise DataValidationError(
+ f"Expected dict-like object, but failed to access its .items() method. Value was {obj}", object_path
+ ) from e
+
+ def _create_list(self, tp: Type[Any], obj: List[Any], object_path: str) -> List[Any]:
+ if isinstance(obj, str):
+ raise DataValidationError("expected list, got string", object_path)
+
+ inner_type = get_generic_type_argument(tp)
+ errs: List[DataValidationError] = []
+ res: List[Any] = []
+
+ try:
+ for i, val in enumerate(obj):
+ res.append(self.map_object(inner_type, val, object_path=f"{object_path}[{i}]"))
+ except DataValidationError as e:
+ errs.append(e)
+ except TypeError as e:
+ errs.append(DataValidationError(str(e), object_path))
+
+ if len(errs) == 1:
+ raise errs[0]
+ elif len(errs) > 1:
+ raise AggregateDataValidationError(object_path, child_exceptions=errs)
+ return res
+
+ def _create_str(self, obj: Any, object_path: str) -> str:
+ # we are willing to cast any primitive value to string, but no compound values are allowed
+ if is_obj_type(obj, (str, float, int)) or isinstance(obj, BaseValueType):
+ return str(obj)
+ elif is_obj_type(obj, bool):
+ raise DataValidationError(
+ "Expected str, found bool. Be careful, that YAML parsers consider even"
+ ' "no" and "yes" as a bool. Search for the Norway Problem for more'
+ " details. And please use quotes explicitly.",
+ object_path,
+ )
+ else:
+ raise DataValidationError(
+ f"expected str (or number that would be cast to string), but found type {type(obj)}", object_path
+ )
+
+ def _create_int(self, obj: Any, object_path: str) -> int:
+ # we don't want to make an int out of anything else than other int
+ # except for BaseValueType class instances
+ if is_obj_type(obj, int) or isinstance(obj, BaseValueType):
+ return int(obj)
+ raise DataValidationError(f"expected int, found {type(obj)}", object_path)
+
+ def _create_union(self, tp: Type[T], obj: Any, object_path: str) -> T:
+ variants = get_generic_type_arguments(tp)
+ errs: List[DataValidationError] = []
+ for v in variants:
+ try:
+ return self.map_object(v, obj, object_path=object_path)
+ except DataValidationError as e:
+ errs.append(e)
+
+ raise DataValidationError("could not parse any of the possible variants", object_path, child_exceptions=errs)
+
+ def _create_optional(self, tp: Type[Optional[T]], obj: Any, object_path: str) -> Optional[T]:
+ inner: Type[Any] = get_optional_inner_type(tp)
+ if obj is None:
+ return None
+ else:
+ return self.map_object(inner, obj, object_path=object_path)
+
+ def _create_bool(self, obj: Any, object_path: str) -> bool:
+ if is_obj_type(obj, bool):
+ return obj
+ else:
+ raise DataValidationError(f"expected bool, found {type(obj)}", object_path)
+
+ def _create_literal(self, tp: Type[Any], obj: Any, object_path: str) -> Any:
+ expected = get_generic_type_arguments(tp)
+ if obj in expected:
+ return obj
+ else:
+ raise DataValidationError(f"'{obj}' does not match any of the expected values {expected}", object_path)
+
+ def _create_base_schema_object(self, tp: Type[Any], obj: Any, object_path: str) -> "BaseSchema":
+ if isinstance(obj, (dict, BaseSchema)):
+ return tp(obj, object_path=object_path)
+ raise DataValidationError(f"expected 'dict' or 'NoRenameBaseSchema' object, found '{type(obj)}'", object_path)
+
+ def create_value_type_object(self, tp: Type[Any], obj: Any, object_path: str) -> "BaseValueType":
+ if isinstance(obj, tp):
+ # if we already have a custom value type, just pass it through
+ return obj
+ else:
+ # no validation performed, the implementation does it in the constuctor
+ try:
+ return tp(obj, object_path=object_path)
+ except ValueError as e:
+ if len(e.args) > 0 and isinstance(e.args[0], str):
+ msg = e.args[0]
+ else:
+ msg = f"Failed to validate value against {tp} type"
+ raise DataValidationError(msg, object_path) from e
+
+ def _create_default(self, obj: Any) -> Any:
+ if isinstance(obj, _lazy_default):
+ return obj.instantiate() # type: ignore
+ else:
+ return obj
+
+ def map_object(
+ self,
+ tp: Type[Any],
+ obj: Any,
+ default: Any = ...,
+ use_default: bool = False,
+ object_path: str = "/",
+ ) -> Any:
+ """
+ Given an expected type `cls` and a value object `obj`, return a new object of the given type and map fields of `obj` into it. During the mapping procedure,
+ runtime type checking is performed.
+ """
+
+ # Disabling these checks, because I think it's much more readable as a single function
+ # and it's not that large at this point. If it got larger, then we should definitely split it
+ # pylint: disable=too-many-branches,too-many-locals,too-many-statements
+
+ # default values
+ if obj is None and use_default:
+ return self._create_default(default)
+
+ # NoneType
+ elif is_none_type(tp):
+ if obj is None:
+ return None
+ else:
+ raise DataValidationError(f"expected None, found '{obj}'.", object_path)
+
+ # Optional[T] (could be technically handled by Union[*variants], but this way we have better error reporting)
+ elif is_optional(tp):
+ return self._create_optional(tp, obj, object_path)
+
+ # Union[*variants]
+ elif is_union(tp):
+ return self._create_union(tp, obj, object_path)
+
+ # after this, there is no place for a None object
+ elif obj is None:
+ raise DataValidationError(f"unexpected value 'None' for type {tp}", object_path)
+
+ # int
+ elif tp == int:
+ return self._create_int(obj, object_path)
+
+ # str
+ elif tp == str:
+ return self._create_str(obj, object_path)
+
+ # bool
+ elif tp == bool:
+ return self._create_bool(obj, object_path)
+
+ # float
+ elif tp == float:
+ raise NotImplementedError(
+ "Floating point values are not supported in the object mapper."
+ " Please implement them and be careful with type coercions"
+ )
+
+ # Literal[T]
+ elif is_literal(tp):
+ return self._create_literal(tp, obj, object_path)
+
+ # Dict[K,V]
+ elif is_dict(tp):
+ return self._create_dict(tp, obj, object_path)
+
+ # any Enums (probably used only internally in DataValidator)
+ elif is_enum(tp):
+ if isinstance(obj, tp):
+ return obj
+ else:
+ raise DataValidationError(f"unexpected value '{obj}' for enum '{tp}'", object_path)
+
+ # List[T]
+ elif is_list(tp):
+ return self._create_list(tp, obj, object_path)
+
+ # Tuple[A,B,C,D,...]
+ elif is_tuple(tp):
+ return self._create_tuple(tp, obj, object_path)
+
+ # type of obj and cls type match
+ elif is_obj_type(obj, tp):
+ return obj
+
+ # when the specified type is Any, just return the given value
+ # (pylint does something weird on the following line and it happens only on python 3.10)
+ elif tp == Any: # pylint: disable=comparison-with-callable
+ return obj
+
+ # BaseValueType subclasses
+ elif inspect.isclass(tp) and issubclass(tp, BaseValueType):
+ return self.create_value_type_object(tp, obj, object_path)
+
+ # BaseGenericTypeWrapper subclasses
+ elif is_generic_type_wrapper(tp):
+ inner_type = get_generic_type_wrapper_argument(tp)
+ obj_valid = self.map_object(inner_type, obj, object_path)
+ return tp(obj_valid, object_path=object_path) # type: ignore
+
+ # nested BaseSchema subclasses
+ elif inspect.isclass(tp) and issubclass(tp, BaseSchema):
+ return self._create_base_schema_object(tp, obj, object_path)
+
+ # if the object matches, just pass it through
+ elif inspect.isclass(tp) and isinstance(obj, tp):
+ return obj
+
+ # default error handler
+ else:
+ raise DataValidationError(
+ f"Type {tp} cannot be parsed. This is a implementation error. "
+ "Please fix your types in the class or improve the parser/validator.",
+ object_path,
+ )
+
+ def is_obj_type_valid(self, obj: Any, tp: Type[Any]) -> bool:
+ """
+ Runtime type checking. Validate, that a given object is of a given type.
+ """
+
+ try:
+ self.map_object(tp, obj)
+ return True
+ except (DataValidationError, ValueError):
+ return False
+
+ def _assign_default(self, obj: Any, name: str, python_type: Any, object_path: str) -> None:
+ cls = obj.__class__
+ default = self._create_default(getattr(cls, name, None))
+ value = self.map_object(python_type, default, object_path=f"{object_path}/{name}")
+ setattr(obj, name, value)
+
+ def _assign_field(self, obj: Any, name: str, python_type: Any, value: Any, object_path: str) -> None:
+ value = self.map_object(python_type, value, object_path=f"{object_path}/{name}")
+ setattr(obj, name, value)
+
+ def _assign_fields(self, obj: Any, source: Union[Dict[str, Any], "BaseSchema", None], object_path: str) -> Set[str]:
+ """
+ Order of assignment:
+ 1. all direct assignments
+ 2. assignments with conversion method
+ """
+ cls = obj.__class__
+ annot = cls.__dict__.get("__annotations__", {})
+ errs: List[DataValidationError] = []
+
+ used_keys: Set[str] = set()
+ for name, python_type in annot.items():
+ try:
+ if is_internal_field_name(name):
+ continue
+
+ # populate field
+ if source is None:
+ self._assign_default(obj, name, python_type, object_path)
+
+ # check for invalid configuration with both transformation function and default value
+ elif hasattr(obj, f"_{name}") and hasattr(obj, name):
+ raise RuntimeError(
+ f"Field '{obj.__class__.__name__}.{name}' has default value and transformation function at"
+ " the same time. That is now allowed. Store the default in the transformation function."
+ )
+
+ # there is a transformation function to create the value
+ elif hasattr(obj, f"_{name}") and callable(getattr(obj, f"_{name}")):
+ val = self._get_converted_value(obj, name, source, object_path)
+ self._assign_field(obj, name, python_type, val, object_path)
+ used_keys.add(name)
+
+ # source just contains the value
+ elif name in source:
+ val = source[name]
+ self._assign_field(obj, name, python_type, val, object_path)
+ used_keys.add(name)
+
+ # there is a default value, or the type is optional => store the default or null
+ elif hasattr(obj, name) or is_optional(python_type):
+ self._assign_default(obj, name, python_type, object_path)
+
+ # we expected a value but it was not there
+ else:
+ errs.append(DataValidationError(f"missing attribute '{name}'.", object_path))
+ except DataValidationError as e:
+ errs.append(e)
+
+ if len(errs) == 1:
+ raise errs[0]
+ elif len(errs) > 1:
+ raise AggregateDataValidationError(object_path, errs)
+ return used_keys
+
+ def _get_converted_value(self, obj: Any, key: str, source: TSource, object_path: str) -> Any:
+ """
+ Get a value of a field by invoking appropriate transformation function.
+ """
+ try:
+ func = getattr(obj.__class__, f"_{key}")
+ argc = len(inspect.signature(func).parameters)
+ if argc == 1:
+ # it is a static method
+ return func(source)
+ elif argc == 2:
+ # it is a instance method
+ return func(_create_untouchable("obj"), source)
+ else:
+ raise RuntimeError("Transformation function has wrong number of arguments")
+ except ValueError as e:
+ if len(e.args) > 0 and isinstance(e.args[0], str):
+ msg = e.args[0]
+ else:
+ msg = "Failed to validate value type"
+ raise DataValidationError(msg, object_path) from e
+
+ def object_constructor(self, obj: Any, source: Union["BaseSchema", Dict[Any, Any]], object_path: str) -> None:
+ """
+ Delegated constructor for the NoRenameBaseSchema class.
+
+ The reason this method is delegated to the mapper is due to renaming. Like this, we don't have to
+ worry about a different BaseSchema class, when we want to have dynamically renamed fields.
+ """
+ # As this is a delegated constructor, we must ignore protected access warnings
+ # pylint: disable=protected-access
+
+ # sanity check
+ if not isinstance(source, (BaseSchema, dict)): # type: ignore
+ raise DataValidationError(f"expected dict-like object, found '{type(source)}'", object_path)
+
+ # construct lower level schema first if configured to do so
+ if obj._LAYER is not None:
+ source = obj._LAYER(source, object_path=object_path) # pylint: disable=not-callable
+
+ # assign fields
+ used_keys = self._assign_fields(obj, source, object_path)
+
+ # check for unused keys in the source object
+ if source and not isinstance(source, BaseSchema):
+ unused = source.keys() - used_keys
+ if len(unused) > 0:
+ keys = ", ".join((f"'{u}'" for u in unused))
+ raise DataValidationError(
+ f"unexpected extra key(s) {keys}",
+ object_path,
+ )
+
+ # validate the constructed value
+ try:
+ obj._validate()
+ except ValueError as e:
+ raise DataValidationError(e.args[0] if len(e.args) > 0 else "Validation error", object_path) from e
+
+
+class BaseSchema(Serializable):
+ """
+ Base class for modeling configuration schema. It somewhat resembles standard dataclasses with additional
+ functionality:
+
+ * type validation
+ * data conversion
+
+ To create an instance of this class, you have to provide source data in the form of dict-like object.
+ Generally, raw dict or another `BaseSchema` instance. The provided data object is traversed, transformed
+ and validated before assigned to the appropriate fields (attributes).
+
+ Fields (attributes)
+ ===================
+
+ The fields (or attributes) of the class are defined the same way as in a dataclass by creating a class-level
+ type-annotated fields. An example of that is:
+
+ class A(BaseSchema):
+ awesome_number: int
+
+ If your `BaseSchema` instance has a field with type of a BaseSchema, its value is recursively created
+ from the nested input data. This way, you can specify a complex tree of BaseSchema's and use the root
+ BaseSchema to create instance of everything.
+
+ Transformation
+ ==============
+
+ You can provide the BaseSchema class with a field and a function with the same name, but starting with
+ underscore ('_'). For example, you could have field called `awesome_number` and function called
+ `_awesome_number(self, source)`. The function takes one argument - the source data (optionally with self,
+ but you are not supposed to touch that). It can read any data from the source object and return a value of
+ an appropriate type, which will be assigned to the field `awesome_number`. If you want to report an error
+ during validation, raise a `ValueError` exception.
+
+ Using this, you can convert any input values into any type and field you want. To make the conversion easier
+ to write, you could also specify a special class variable called `_LAYER` pointing to another
+ BaseSchema class. This causes the source object to be first parsed as the specified additional layer of BaseSchema and after that
+ used a source for this class. This therefore allows nesting of transformation functions.
+
+ Validation
+ ==========
+
+ All assignments to fields during object construction are checked at runtime for proper types. This means,
+ you are free to use an untrusted source object and turn it into a data structure, where you are sure what
+ is what.
+
+ You can also define a `_validate` method, which will be called once the whole data structure is built. You
+ can validate the data in there and raise a `ValueError`, if they are invalid.
+
+ Default values
+ ==============
+
+ If you create a field with a value, it will be used as a default value whenever the data in source object
+ are not present. As a special case, default value for Optional type is None if not specified otherwise. You
+ are not allowed to have a field with a default value and a transformation function at once.
+
+ Example
+ =======
+
+ See tests/utils/test_modelling.py for example usage.
+ """
+
+ _LAYER: Optional[Type["BaseSchema"]] = None
+ _MAPPER: ObjectMapper = ObjectMapper()
+
+ def __init__(self, source: TSource = None, object_path: str = ""): # pylint: disable=[super-init-not-called]
+ # save source data (and drop information about nullness)
+ source = source or {}
+ self.__source: Union[Dict[str, Any], BaseSchema] = source
+
+ # delegate the rest of the constructor
+ self._MAPPER.object_constructor(self, source, object_path)
+
+ def get_unparsed_data(self) -> Dict[str, Any]:
+ if isinstance(self.__source, BaseSchema):
+ return self.__source.get_unparsed_data()
+ elif isinstance(self.__source, Renamed):
+ return self.__source.original()
+ else:
+ return self.__source
+
+ def __getitem__(self, key: str) -> Any:
+ if not hasattr(self, key):
+ raise RuntimeError(f"Object '{self}' of type '{type(self)}' does not have field named '{key}'")
+ return getattr(self, key)
+
+ def __contains__(self, item: Any) -> bool:
+ return hasattr(self, item)
+
+ def _validate(self) -> None:
+ """
+ Validation procedure called after all field are assigned. Should throw a ValueError in case of failure.
+ """
+
+ def __eq__(self, o: object) -> bool:
+ cls = self.__class__
+ if not isinstance(o, cls):
+ return False
+
+ annot = cls.__dict__.get("__annotations__", {})
+ for name in annot.keys():
+ if getattr(self, name) != getattr(o, name):
+ return False
+
+ return True
+
+ @classmethod
+ def json_schema(cls: Type["BaseSchema"], include_schema_definition: bool = True) -> Dict[Any, Any]:
+ if cls._LAYER is not None:
+ return cls._LAYER.json_schema(include_schema_definition=include_schema_definition)
+
+ schema: Dict[Any, Any] = {}
+ if include_schema_definition:
+ schema["$schema"] = "https://json-schema.org/draft/2020-12/schema"
+ if cls.__doc__ is not None:
+ schema["description"] = _split_docstring(cls.__doc__)[0]
+ schema["type"] = "object"
+ schema["properties"] = _get_properties_schema(cls)
+
+ return schema
+
+ def to_dict(self) -> Dict[Any, Any]:
+ res: Dict[Any, Any] = {}
+ cls = self.__class__
+ annot = cls.__dict__.get("__annotations__", {})
+
+ for name in annot:
+ res[name] = Serializable.serialize(getattr(self, name))
+ return res
+
+
+class RenamingObjectMapper(ObjectMapper):
+ """
+ Same as object mapper, but it uses collection wrappers from the module `renamed` to perform dynamic field renaming.
+
+ More specifically:
+ - it renames all properties in (nested) objects
+ - it does not rename keys in dictionaries
+ """
+
+ def _create_dict(self, tp: Type[Any], obj: Dict[Any, Any], object_path: str) -> Dict[Any, Any]:
+ if isinstance(obj, Renamed):
+ obj = obj.original()
+ return super()._create_dict(tp, obj, object_path)
+
+ def _create_base_schema_object(self, tp: Type[Any], obj: Any, object_path: str) -> "BaseSchema":
+ if isinstance(obj, dict):
+ obj = renamed(obj)
+ return super()._create_base_schema_object(tp, obj, object_path)
+
+ def object_constructor(self, obj: Any, source: Union["BaseSchema", Dict[Any, Any]], object_path: str) -> None:
+ if isinstance(source, dict):
+ source = renamed(source)
+ return super().object_constructor(obj, source, object_path)
+
+
+# export as a standalone functions for simplicity compatibility
+is_obj_type_valid = ObjectMapper().is_obj_type_valid
+map_object = ObjectMapper().map_object
+
+
+class ConfigSchema(BaseSchema):
+ """
+ Same as BaseSchema, but maps with RenamingObjectMapper
+ """
+
+ _MAPPER: ObjectMapper = RenamingObjectMapper()
diff --git a/manager/knot_resolver_manager/utils/modeling/base_value_type.py b/manager/knot_resolver_manager/utils/modeling/base_value_type.py
new file mode 100644
index 00000000..dff4a3fe
--- /dev/null
+++ b/manager/knot_resolver_manager/utils/modeling/base_value_type.py
@@ -0,0 +1,45 @@
+from abc import ABC, abstractmethod # pylint: disable=[no-name-in-module]
+from typing import Any, Dict, Type
+
+
+class BaseTypeABC(ABC):
+ @abstractmethod
+ def __init__(self, source_value: Any, object_path: str = "/") -> None:
+ pass
+
+ @abstractmethod
+ def __int__(self) -> int:
+ raise NotImplementedError(f" return 'int()' value for {type(self).__name__} is not implemented.")
+
+ @abstractmethod
+ def __str__(self) -> str:
+ raise NotImplementedError(f"return 'str()' value for {type(self).__name__} is not implemented.")
+
+ @abstractmethod
+ def serialize(self) -> Any:
+ """
+ Used for dumping configuration. Returns a JSON-serializable object from which the object
+ can be recreated again using the constructor.
+
+ It's not necessary to return the same structure that was given as an input. It only has
+ to be the same semantically.
+ """
+ raise NotImplementedError(f"{type(self).__name__}'s' 'serialize()' not implemented.")
+
+
+class BaseValueType(BaseTypeABC):
+ """
+ Subclasses of this class can be used as type annotations in 'DataParser'. When a value
+ is being parsed from a serialized format (e.g. JSON/YAML), an object will be created by
+ calling the constructor of the appropriate type on the field value. The only limitation
+ is that the value MUST NOT be `None`.
+
+ There is no validation done on the wrapped value. The only condition is that
+ it can't be `None`. If you want to perform any validation during creation,
+ raise a `ValueError` in case of errors.
+ """
+
+ @classmethod
+ @abstractmethod
+ def json_schema(cls: Type["BaseValueType"]) -> Dict[Any, Any]:
+ raise NotImplementedError()
diff --git a/manager/knot_resolver_manager/utils/modeling/exceptions.py b/manager/knot_resolver_manager/utils/modeling/exceptions.py
new file mode 100644
index 00000000..dafb1ee2
--- /dev/null
+++ b/manager/knot_resolver_manager/utils/modeling/exceptions.py
@@ -0,0 +1,56 @@
+from typing import Iterable, List
+
+from knot_resolver_manager.exceptions import KresManagerException
+
+
+class DataModelingBaseException(KresManagerException):
+ """
+ Base class for all exceptions used in modelling.
+ """
+
+
+class DataParsingError(DataModelingBaseException):
+ pass
+
+
+class DataDescriptionError(DataModelingBaseException):
+ pass
+
+
+class DataValidationError(DataModelingBaseException):
+ def __init__(self, msg: str, tree_path: str, child_exceptions: "Iterable[DataValidationError]" = tuple()) -> None:
+ super().__init__(msg)
+ self._tree_path = tree_path
+ self._child_exceptions = child_exceptions
+
+ def where(self) -> str:
+ return self._tree_path
+
+ def msg(self):
+ return f"[{self.where()}] " + super().__str__()
+
+ def recursive_msg(self, indentation_level: int = 0) -> str:
+ INDENT = indentation_level * "\t"
+ msg_parts: List[str] = [f"{INDENT}{self.msg()}"]
+ for c in self._child_exceptions:
+ msg_parts.append(c.recursive_msg(indentation_level + 1))
+ return "\n".join(msg_parts)
+
+ def __str__(self) -> str:
+ return self.recursive_msg()
+
+
+class AggregateDataValidationError(DataValidationError):
+ def __init__(self, object_path: str, child_exceptions: "Iterable[DataValidationError]") -> None:
+ super().__init__("error due to lower level exceptions", object_path, child_exceptions)
+
+ def recursive_msg(self, indentation_level: int = 0) -> str:
+ inc = 0
+ msg_parts: List[str] = []
+ if indentation_level == 0:
+ inc = 1
+ msg_parts.append("multiple configuration errors detected:")
+
+ for c in self._child_exceptions:
+ msg_parts.append(c.recursive_msg(indentation_level + inc))
+ return "\n".join(msg_parts)
diff --git a/manager/knot_resolver_manager/utils/modeling/json_pointer.py b/manager/knot_resolver_manager/utils/modeling/json_pointer.py
new file mode 100644
index 00000000..adbfa36d
--- /dev/null
+++ b/manager/knot_resolver_manager/utils/modeling/json_pointer.py
@@ -0,0 +1,89 @@
+"""
+Implements JSON pointer resolution based on RFC 6901:
+https://www.rfc-editor.org/rfc/rfc6901
+"""
+
+
+from typing import Any, Optional, Tuple, Union
+
+# JSONPtrAddressable = Optional[Union[Dict[str, "JSONPtrAddressable"], List["JSONPtrAddressable"], int, float, bool, str, None]]
+JSONPtrAddressable = Any # the recursive definition above is not valid :(
+
+
+class _JSONPtr:
+ @staticmethod
+ def _decode_token(token: str) -> str:
+ """
+ Resolves escaped characters ~ and /
+ """
+
+ # the order of the replace statements is important, do not change without
+ # consulting the RFC
+ return token.replace("~1", "/").replace("~0", "~")
+
+ @staticmethod
+ def _encode_token(token: str) -> str:
+ return token.replace("~", "~0").replace("/", "~1")
+
+ def __init__(self, ptr: str):
+ if ptr == "":
+ # pointer to the root
+ self.tokens = []
+
+ else:
+ if ptr[0] != "/":
+ raise SyntaxError(
+ f"JSON pointer '{ptr}' invalid: the first character MUST be '/' or the pointer must be empty"
+ )
+
+ ptr = ptr[1:]
+ self.tokens = [_JSONPtr._decode_token(tok) for tok in ptr.split("/")]
+
+ def resolve(
+ self, obj: JSONPtrAddressable
+ ) -> Tuple[Optional[JSONPtrAddressable], JSONPtrAddressable, Union[str, int, None]]:
+ """
+ Returns (Optional[parent], Optional[direct value], key of value in the parent object)
+ """
+
+ parent: Optional[JSONPtrAddressable] = None
+ current = obj
+ current_ptr = ""
+ token: Union[int, str, None] = None
+
+ for token in self.tokens:
+ if current is None:
+ raise ValueError(
+ f"JSON pointer cannot reference nested non-existent object: object at ptr '{current_ptr}' already points to None, cannot nest deeper with token '{token}'"
+ )
+
+ elif isinstance(current, (bool, int, float, str)):
+ raise ValueError(f"object at '{current_ptr}' is a scalar, JSON pointer cannot point into it")
+
+ else:
+ parent = current
+ if isinstance(current, list):
+ if token == "-":
+ current = None
+ else:
+ try:
+ token = int(token)
+ current = current[token]
+ except ValueError:
+ raise ValueError(
+ f"invalid JSON pointer: list '{current_ptr}' require numbers as keys, instead got '{token}'"
+ )
+
+ elif isinstance(current, dict):
+ current = current.get(token, None)
+
+ current_ptr += f"/{token}"
+
+ return parent, current, token
+
+
+def json_ptr_resolve(
+ obj: JSONPtrAddressable,
+ ptr: str,
+) -> Tuple[Optional[JSONPtrAddressable], Optional[JSONPtrAddressable], Union[str, int, None]]:
+ return _JSONPtr(ptr).resolve(obj)
diff --git a/manager/knot_resolver_manager/utils/modeling/parsing.py b/manager/knot_resolver_manager/utils/modeling/parsing.py
new file mode 100644
index 00000000..32d2a2ea
--- /dev/null
+++ b/manager/knot_resolver_manager/utils/modeling/parsing.py
@@ -0,0 +1,91 @@
+import json
+from enum import Enum, auto
+from typing import Any, Dict, List, Optional, Tuple, Union
+
+import yaml
+from yaml.constructor import ConstructorError
+from yaml.nodes import MappingNode
+
+from .exceptions import DataParsingError
+from .renaming import renamed
+
+
+# custom hook for 'json.loads()' to detect duplicate keys in data
+# source: https://stackoverflow.com/q/14902299/12858520
+def _json_raise_duplicates(pairs: List[Tuple[Any, Any]]) -> Optional[Any]:
+ dict_out: Dict[Any, Any] = {}
+ for key, val in pairs:
+ if key in dict_out:
+ raise DataParsingError(f"Duplicate attribute key detected: {key}")
+ dict_out[key] = val
+ return dict_out
+
+
+# custom loader for 'yaml.load()' to detect duplicate keys in data
+# source: https://gist.github.com/pypt/94d747fe5180851196eb
+class _RaiseDuplicatesLoader(yaml.SafeLoader):
+ def construct_mapping(self, node: Union[MappingNode, Any], deep: bool = False) -> Dict[Any, Any]:
+ if not isinstance(node, MappingNode):
+ raise ConstructorError(None, None, f"expected a mapping node, but found {node.id}", node.start_mark)
+ mapping: Dict[Any, Any] = {}
+ for key_node, value_node in node.value:
+ key = self.construct_object(key_node, deep=deep) # type: ignore
+ # we need to check, that the key object can be used in a hash table
+ try:
+ _ = hash(key) # type: ignore
+ except TypeError as exc:
+ raise ConstructorError(
+ "while constructing a mapping",
+ node.start_mark,
+ f"found unacceptable key ({exc})",
+ key_node.start_mark,
+ )
+
+ # check for duplicate keys
+ if key in mapping:
+ raise DataParsingError(f"duplicate key detected: {key_node.start_mark}")
+ value = self.construct_object(value_node, deep=deep) # type: ignore
+ mapping[key] = value
+ return mapping
+
+
+class DataFormat(Enum):
+ YAML = auto()
+ JSON = auto()
+
+ def parse_to_dict(self, text: str) -> Any:
+ if self is DataFormat.YAML:
+ # RaiseDuplicatesLoader extends yaml.SafeLoader, so this should be safe
+ # https://python.land/data-processing/python-yaml#PyYAML_safe_load_vs_load
+ return renamed(yaml.load(text, Loader=_RaiseDuplicatesLoader)) # type: ignore
+ elif self is DataFormat.JSON:
+ return renamed(json.loads(text, object_pairs_hook=_json_raise_duplicates))
+ else:
+ raise NotImplementedError(f"Parsing of format '{self}' is not implemented")
+
+ def dict_dump(self, data: Dict[str, Any]) -> str:
+ if self is DataFormat.YAML:
+ return yaml.safe_dump(data) # type: ignore
+ elif self is DataFormat.JSON:
+ return json.dumps(data)
+ else:
+ raise NotImplementedError(f"Exporting to '{self}' format is not implemented")
+
+
+def parse_yaml(data: str) -> Any:
+ return DataFormat.YAML.parse_to_dict(data)
+
+
+def parse_json(data: str) -> Any:
+ return DataFormat.JSON.parse_to_dict(data)
+
+
+def try_to_parse(data: str) -> Any:
+ """Attempt to parse the data as a YAML or JSON string."""
+ try:
+ return parse_yaml(data)
+ except yaml.YAMLError as ye:
+ try:
+ return parse_json(data)
+ except json.JSONDecodeError as je:
+ raise DataParsingError(f"failed to parse data, YAML: {ye}, JSON: {je}")
diff --git a/manager/knot_resolver_manager/utils/modeling/query.py b/manager/knot_resolver_manager/utils/modeling/query.py
new file mode 100644
index 00000000..cfea82f6
--- /dev/null
+++ b/manager/knot_resolver_manager/utils/modeling/query.py
@@ -0,0 +1,183 @@
+import copy
+from abc import ABC, abstractmethod # pylint: disable=[no-name-in-module]
+from typing import Any, List, Optional, Tuple, Union
+
+from typing_extensions import Literal
+
+from knot_resolver_manager.utils.modeling.base_schema import BaseSchema, map_object
+from knot_resolver_manager.utils.modeling.json_pointer import json_ptr_resolve
+
+
+class PatchError(Exception):
+ pass
+
+
+class Op(BaseSchema, ABC):
+ @abstractmethod
+ def eval(self, fakeroot: Any) -> Any:
+ """
+ modifies the given fakeroot, returns a new one
+ """
+
+ def _resolve_ptr(self, fakeroot: Any, ptr: str) -> Tuple[Any, Any, Union[str, int, None]]:
+ # Lookup tree part based on the given JSON pointer
+ parent, obj, token = json_ptr_resolve(fakeroot["root"], ptr)
+
+ # the lookup was on pure data, wrap the results in QueryTree
+ if parent is None:
+ parent = fakeroot
+ token = "root"
+
+ assert token is not None
+
+ return parent, obj, token
+
+
+class AddOp(Op):
+ op: Literal["add"]
+ path: str
+ value: Any
+
+ def eval(self, fakeroot: Any) -> Any:
+ parent, _obj, token = self._resolve_ptr(fakeroot, self.path)
+
+ if isinstance(parent, dict):
+ parent[token] = self.value
+ elif isinstance(parent, list):
+ if token == "-":
+ parent.append(self.value)
+ else:
+ assert isinstance(token, int)
+ parent.insert(token, self.value)
+ else:
+ assert False, "never happens"
+
+ return fakeroot
+
+
+class RemoveOp(Op):
+ op: Literal["remove"]
+ path: str
+
+ def eval(self, fakeroot: Any) -> Any:
+ parent, _obj, token = self._resolve_ptr(fakeroot, self.path)
+ del parent[token]
+ return fakeroot
+
+
+class ReplaceOp(Op):
+ op: Literal["replace"]
+ path: str
+ value: str
+
+ def eval(self, fakeroot: Any) -> Any:
+ parent, obj, token = self._resolve_ptr(fakeroot, self.path)
+
+ if obj is None:
+ raise PatchError("the value you are trying to replace is null")
+ parent[token] = self.value
+ return fakeroot
+
+
+class MoveOp(Op):
+ op: Literal["move"]
+ source: str
+ path: str
+
+ def _source(self, source):
+ if "from" not in source:
+ raise ValueError("missing property 'from' in 'move' JSON patch operation")
+ return str(source["from"])
+
+ def eval(self, fakeroot: Any) -> Any:
+ if self.path.startswith(self.source):
+ raise PatchError("can't move value into itself")
+
+ _parent, obj, _token = self._resolve_ptr(fakeroot, self.source)
+ newobj = copy.deepcopy(obj)
+
+ fakeroot = RemoveOp({"op": "remove", "path": self.source}).eval(fakeroot)
+ fakeroot = AddOp({"path": self.path, "value": newobj, "op": "add"}).eval(fakeroot)
+ return fakeroot
+
+
+class CopyOp(Op):
+ op: Literal["copy"]
+ source: str
+ path: str
+
+ def _source(self, source):
+ if "from" not in source:
+ raise ValueError("missing property 'from' in 'copy' JSON patch operation")
+ return str(source["from"])
+
+ def eval(self, fakeroot: Any) -> Any:
+ _parent, obj, _token = self._resolve_ptr(fakeroot, self.source)
+ newobj = copy.deepcopy(obj)
+
+ fakeroot = AddOp({"path": self.path, "value": newobj, "op": "add"}).eval(fakeroot)
+ return fakeroot
+
+
+class TestOp(Op):
+ op: Literal["test"]
+ path: str
+ value: Any
+
+ def eval(self, fakeroot: Any) -> Any:
+ _parent, obj, _token = self._resolve_ptr(fakeroot, self.path)
+
+ if obj != self.value:
+ raise PatchError("test failed")
+
+ return fakeroot
+
+
+def query(
+ original: Any, method: Literal["get", "delete", "put", "patch"], ptr: str, payload: Any
+) -> Tuple[Any, Optional[Any]]:
+ ########################################
+ # Prepare data we will be working on
+
+ # First of all, we consider the original data to be immutable. So we need to make a copy
+ # in order to freely mutate them
+ dataroot = copy.deepcopy(original)
+
+ # To simplify referencing the root, create a fake root node
+ fakeroot = {"root": dataroot}
+
+ #########################################
+ # Handle the actual requested operation
+
+ # get = return what the path selector picks
+ if method == "get":
+ parent, obj, token = json_ptr_resolve(fakeroot, f"/root{ptr}")
+ return fakeroot["root"], obj
+
+ elif method == "delete":
+ fakeroot = RemoveOp({"op": "remove", "path": ptr}).eval(fakeroot)
+ return fakeroot["root"], None
+
+ elif method == "put":
+ parent, obj, token = json_ptr_resolve(fakeroot, f"/root{ptr}")
+ assert parent is not None # we know this due to the fakeroot
+ if isinstance(parent, list) and token == "-":
+ parent.append(payload)
+ else:
+ parent[token] = payload
+ return fakeroot["root"], None
+
+ elif method == "patch":
+ tp = List[Union[AddOp, RemoveOp, MoveOp, CopyOp, TestOp, ReplaceOp]]
+ transaction: tp = map_object(tp, payload)
+
+ for i, op in enumerate(transaction):
+ try:
+ fakeroot = op.eval(fakeroot)
+ except PatchError as e:
+ raise ValueError(f"json patch transaction failed on step {i}") from e
+
+ return fakeroot["root"], None
+
+ else:
+ assert False, "invalid operation, never happens"
diff --git a/manager/knot_resolver_manager/utils/modeling/renaming.py b/manager/knot_resolver_manager/utils/modeling/renaming.py
new file mode 100644
index 00000000..2420ed04
--- /dev/null
+++ b/manager/knot_resolver_manager/utils/modeling/renaming.py
@@ -0,0 +1,90 @@
+"""
+This module implements a standard dict and list alternatives, which can dynamically rename its keys replacing `-` with `_`.
+They persist in nested data structes, meaning that if you try to obtain a dict from Renamed variant, you will actually
+get RenamedDict back instead.
+
+Usage:
+
+d = dict()
+l = list()
+
+rd = renamed(d)
+rl = renamed(l)
+
+assert isinstance(rd, Renamed) == True
+assert l = rl.original()
+"""
+
+from abc import ABC, abstractmethod # pylint: disable=[no-name-in-module]
+from typing import Any, Dict, List, TypeVar
+
+
+class Renamed(ABC):
+ @abstractmethod
+ def original(self) -> Any:
+ """
+ Returns a data structure, which is the source without dynamic renamings
+ """
+
+ @staticmethod
+ def map_public_to_private(name: Any) -> Any:
+ if isinstance(name, str):
+ return name.replace("_", "-")
+ return name
+
+ @staticmethod
+ def map_private_to_public(name: Any) -> Any:
+ if isinstance(name, str):
+ return name.replace("-", "_")
+ return name
+
+
+K = TypeVar("K")
+V = TypeVar("V")
+
+
+class RenamedDict(Dict[K, V], Renamed):
+ def keys(self) -> Any:
+ keys = super().keys()
+ return {Renamed.map_private_to_public(key) for key in keys}
+
+ def __getitem__(self, key: K) -> V:
+ key = Renamed.map_public_to_private(key)
+ res = super().__getitem__(key)
+ return renamed(res)
+
+ def __setitem__(self, key: K, value: V) -> None:
+ key = Renamed.map_public_to_private(key)
+ return super().__setitem__(key, value)
+
+ def __contains__(self, key: object) -> bool:
+ key = Renamed.map_public_to_private(key)
+ return super().__contains__(key)
+
+ def items(self) -> Any:
+ for k, v in super().items():
+ yield Renamed.map_private_to_public(k), renamed(v)
+
+ def original(self) -> Dict[K, V]:
+ return dict(super().items())
+
+
+class RenamedList(List[V], Renamed): # type: ignore
+ def __getitem__(self, key: Any) -> Any:
+ res = super().__getitem__(key)
+ return renamed(res)
+
+ def original(self) -> Any:
+ return list(super().__iter__())
+
+
+def renamed(obj: Any) -> Any:
+ if isinstance(obj, dict):
+ return RenamedDict(**obj)
+ elif isinstance(obj, list):
+ return RenamedList(obj)
+ else:
+ return obj
+
+
+__all__ = ["renamed", "Renamed"]
diff --git a/manager/knot_resolver_manager/utils/modeling/types.py b/manager/knot_resolver_manager/utils/modeling/types.py
new file mode 100644
index 00000000..4ce9aecc
--- /dev/null
+++ b/manager/knot_resolver_manager/utils/modeling/types.py
@@ -0,0 +1,105 @@
+# pylint: disable=comparison-with-callable
+
+
+import enum
+import inspect
+import sys
+from typing import Any, Dict, List, Optional, Tuple, Type, TypeVar, Union
+
+from typing_extensions import Literal
+
+from .base_generic_type_wrapper import BaseGenericTypeWrapper
+
+NoneType = type(None)
+
+
+def is_optional(tp: Any) -> bool:
+ origin = getattr(tp, "__origin__", None)
+ args = get_generic_type_arguments(tp)
+
+ return origin == Union and len(args) == 2 and args[1] == NoneType # type: ignore
+
+
+def is_dict(tp: Any) -> bool:
+ return getattr(tp, "__origin__", None) in (Dict, dict)
+
+
+def is_enum(tp: Any) -> bool:
+ return inspect.isclass(tp) and issubclass(tp, enum.Enum)
+
+
+def is_list(tp: Any) -> bool:
+ return getattr(tp, "__origin__", None) in (List, list)
+
+
+def is_tuple(tp: Any) -> bool:
+ return getattr(tp, "__origin__", None) in (Tuple, tuple)
+
+
+def is_union(tp: Any) -> bool:
+ """Returns true even for optional types, because they are just a Union[T, NoneType]"""
+ return getattr(tp, "__origin__", None) == Union # type: ignore
+
+
+def is_literal(tp: Any) -> bool:
+ if sys.version_info.minor == 6:
+ return isinstance(tp, type(Literal))
+ else:
+ return getattr(tp, "__origin__", None) == Literal
+
+
+def is_generic_type_wrapper(tp: Any) -> bool:
+ orig = getattr(tp, "__origin__", None)
+ return inspect.isclass(orig) and issubclass(orig, BaseGenericTypeWrapper)
+
+
+def get_generic_type_arguments(tp: Any) -> List[Any]:
+ default: List[Any] = []
+ if sys.version_info.minor == 6 and is_literal(tp):
+ return getattr(tp, "__values__")
+ else:
+ return getattr(tp, "__args__", default)
+
+
+def get_generic_type_argument(tp: Any) -> Any:
+ """same as function get_generic_type_arguments, but expects just one type argument"""
+
+ args = get_generic_type_arguments(tp)
+ assert len(args) == 1
+ return args[0]
+
+
+def get_generic_type_wrapper_argument(tp: Type["BaseGenericTypeWrapper[Any]"]) -> Any:
+ assert hasattr(tp, "__origin__")
+ origin = getattr(tp, "__origin__")
+
+ assert hasattr(origin, "__orig_bases__")
+ orig_base: List[Any] = getattr(origin, "__orig_bases__", [])[0]
+
+ arg = get_generic_type_argument(tp)
+ return get_generic_type_argument(orig_base[arg])
+
+
+def is_none_type(tp: Any) -> bool:
+ return tp is None or tp == NoneType
+
+
+def get_attr_type(obj: Any, attr_name: str) -> Any:
+ assert hasattr(obj, attr_name)
+ assert hasattr(obj, "__annotations__")
+ annot = getattr(type(obj), "__annotations__")
+ assert attr_name in annot
+ return annot[attr_name]
+
+
+T = TypeVar("T")
+
+
+def get_optional_inner_type(optional: Type[Optional[T]]) -> Type[T]:
+ assert is_optional(optional)
+ t: Type[T] = get_generic_type_arguments(optional)[0]
+ return t
+
+
+def is_internal_field_name(field_name: str) -> bool:
+ return field_name.startswith("_")
diff --git a/manager/knot_resolver_manager/utils/requests.py b/manager/knot_resolver_manager/utils/requests.py
new file mode 100644
index 00000000..ab95c5d2
--- /dev/null
+++ b/manager/knot_resolver_manager/utils/requests.py
@@ -0,0 +1,88 @@
+import socket
+import sys
+from http.client import HTTPConnection
+from typing import Any, Optional, Union
+from urllib.error import HTTPError, URLError
+from urllib.request import AbstractHTTPHandler, Request, build_opener, install_opener, urlopen
+
+from typing_extensions import Literal
+
+
+class Response:
+ def __init__(self, status: int, body: str) -> None:
+ self.status = status
+ self.body = body
+
+ def __repr__(self) -> str:
+ return f"status: {self.status}\nbody:\n{self.body}"
+
+
+def request(
+ method: Literal["GET", "POST", "HEAD", "PUT", "DELETE"],
+ url: str,
+ body: Optional[str] = None,
+ content_type: str = "application/json",
+) -> Response:
+ req = Request(
+ url,
+ method=method,
+ data=body.encode("utf8") if body is not None else None,
+ headers={"Content-Type": content_type},
+ )
+ # req.add_header("Authorization", _authorization_header)
+
+ try:
+ with urlopen(req) as response:
+ return Response(response.status, response.read().decode("utf8"))
+ except HTTPError as err:
+ return Response(err.code, err.read().decode("utf8"))
+ except URLError as err:
+ if err.errno == 111 or isinstance(err.reason, ConnectionRefusedError):
+ print("Connection refused.")
+ print(f"\tURL: {url}")
+ print("Is the URL correct?")
+ print("\tUnix socket would start with http+unix:// and URL encoded path.")
+ print("\tInet sockets would start with http:// and domain or ip")
+ else:
+ print(f"{err}: url={url}", file=sys.stderr)
+ sys.exit(1)
+
+
+# Code heavily inspired by requests-unixsocket
+# https://github.com/msabramo/requests-unixsocket/blob/master/requests_unixsocket/adapters.py
+class UnixHTTPConnection(HTTPConnection):
+ def __init__(self, unix_socket_url: str, timeout: Union[int, float] = 60):
+ """Create an HTTP connection to a unix domain socket
+ :param unix_socket_url: A URL with a scheme of 'http+unix' and the
+ netloc is a percent-encoded path to a unix domain socket. E.g.:
+ 'http+unix://%2Ftmp%2Fprofilesvc.sock/status/pid'
+ """
+ super().__init__("localhost", timeout=timeout)
+ self.unix_socket_path = unix_socket_url
+ self.timeout = timeout
+ self.sock: Optional[socket.socket] = None
+
+ def __del__(self): # base class does not have d'tor
+ if self.sock:
+ self.sock.close()
+
+ def connect(self):
+ sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ sock.settimeout(1) # there is something weird stored in self.timeout
+ sock.connect(self.unix_socket_path)
+ self.sock = sock
+
+
+class UnixHTTPHandler(AbstractHTTPHandler):
+ def __init__(self) -> None:
+ super().__init__()
+
+ def open_(self: UnixHTTPHandler, req: Any) -> Any:
+ return self.do_open(UnixHTTPConnection, req) # type: ignore[arg-type]
+
+ setattr(UnixHTTPHandler, "http+unix_open", open_)
+ setattr(UnixHTTPHandler, "http+unix_request", AbstractHTTPHandler.do_request_)
+
+
+opener = build_opener(UnixHTTPHandler())
+install_opener(opener)
diff --git a/manager/knot_resolver_manager/utils/systemd_notify.py b/manager/knot_resolver_manager/utils/systemd_notify.py
new file mode 100644
index 00000000..44e8dee1
--- /dev/null
+++ b/manager/knot_resolver_manager/utils/systemd_notify.py
@@ -0,0 +1,54 @@
+import enum
+import logging
+import os
+import socket
+
+logger = logging.getLogger(__name__)
+
+
+class _Status(enum.Enum):
+ NOT_INITIALIZED = 1
+ FUNCTIONAL = 2
+ FAILED = 3
+
+
+_status = _Status.NOT_INITIALIZED
+_socket = None
+
+
+def systemd_notify(**values: str) -> None:
+ global _status
+ global _socket
+
+ if _status is _Status.NOT_INITIALIZED:
+ socket_addr = os.getenv("NOTIFY_SOCKET")
+ os.unsetenv("NOTIFY_SOCKET")
+ if socket_addr is None:
+ _status = _Status.FAILED
+ return
+ if socket_addr.startswith("@"):
+ socket_addr = socket_addr.replace("@", "\0", 1)
+
+ try:
+ _socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
+ _socket.connect(socket_addr)
+ _status = _Status.FUNCTIONAL
+ except Exception:
+ _socket = None
+ _status = _Status.FAILED
+ logger.warning(f"Failed to connect to $NOTIFY_SOCKET at '{socket_addr}'", exc_info=True)
+ return
+
+ elif _status is _Status.FAILED:
+ return
+
+ if _status is _Status.FUNCTIONAL:
+ assert _socket is not None
+ payload = "\n".join((f"{key}={value}" for key, value in values.items()))
+ try:
+ _socket.send(payload.encode("utf8"))
+ except Exception:
+ logger.warning("Failed to send notification to systemd", exc_info=True)
+ _status = _Status.FAILED
+ _socket.close()
+ _socket = None
diff --git a/manager/knot_resolver_manager/utils/which.py b/manager/knot_resolver_manager/utils/which.py
new file mode 100644
index 00000000..450102f3
--- /dev/null
+++ b/manager/knot_resolver_manager/utils/which.py
@@ -0,0 +1,22 @@
+import functools
+import os
+from pathlib import Path
+
+
+@functools.lru_cache(maxsize=16)
+def which(binary_name: str) -> Path:
+ """
+ Given a name of an executable, search $PATH and return
+ the absolute path of that executable. The results of this function
+ are LRU cached.
+
+ If not found, throws an RuntimeError.
+ """
+
+ possible_directories = os.get_exec_path()
+ for dr in possible_directories:
+ p = Path(dr, binary_name)
+ if p.exists():
+ return p.absolute()
+
+ raise RuntimeError(f"Executable {binary_name} was not found in $PATH")
diff --git a/manager/meson.build b/manager/meson.build
new file mode 100644
index 00000000..c80301dc
--- /dev/null
+++ b/manager/meson.build
@@ -0,0 +1,37 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+build_manager = false
+
+if get_option('manager') != 'disabled'
+ message('--- manager dependencies ---')
+
+ pymod = import('python')
+ py3 = pymod.find_installation('python3')
+ py3_deps = run_command(py3, 'tests/packaging/dependencies.py', 'setup.py', check: false)
+
+ if py3.language_version().version_compare('<3.6')
+ error('At least Python 3.6 is required.')
+ elif py3_deps.returncode() != 0
+ error(py3_deps.stderr().strip())
+ else
+ message('all dependencies found')
+ build_manager = true
+ endif
+
+ message('----------------------------')
+endif
+
+if build_manager
+
+ # shell completion
+ subdir('shell-completion')
+
+ # installation script
+ meson.add_install_script('scripts/install.sh', py3.path())
+
+ # YAML config configuration file
+ install_data(
+ sources: 'etc/knot-resolver/config.yml',
+ install_dir: etc_dir,
+ )
+endif \ No newline at end of file
diff --git a/manager/poe b/manager/poe
new file mode 100755
index 00000000..e64b0675
--- /dev/null
+++ b/manager/poe
@@ -0,0 +1,3 @@
+#!/bin/bash
+
+poetry run poe $@
diff --git a/manager/poetry.lock b/manager/poetry.lock
new file mode 100644
index 00000000..a4ad595e
--- /dev/null
+++ b/manager/poetry.lock
@@ -0,0 +1,2945 @@
+# This file is automatically @generated by Poetry 1.4.2 and should not be changed by hand.
+
+[[package]]
+name = "aiohttp"
+version = "3.8.4"
+description = "Async http client/server framework (asyncio)"
+category = "main"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "aiohttp-3.8.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:5ce45967538fb747370308d3145aa68a074bdecb4f3a300869590f725ced69c1"},
+ {file = "aiohttp-3.8.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b744c33b6f14ca26b7544e8d8aadff6b765a80ad6164fb1a430bbadd593dfb1a"},
+ {file = "aiohttp-3.8.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1a45865451439eb320784918617ba54b7a377e3501fb70402ab84d38c2cd891b"},
+ {file = "aiohttp-3.8.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a86d42d7cba1cec432d47ab13b6637bee393a10f664c425ea7b305d1301ca1a3"},
+ {file = "aiohttp-3.8.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ee3c36df21b5714d49fc4580247947aa64bcbe2939d1b77b4c8dcb8f6c9faecc"},
+ {file = "aiohttp-3.8.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:176a64b24c0935869d5bbc4c96e82f89f643bcdf08ec947701b9dbb3c956b7dd"},
+ {file = "aiohttp-3.8.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c844fd628851c0bc309f3c801b3a3d58ce430b2ce5b359cd918a5a76d0b20cb5"},
+ {file = "aiohttp-3.8.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5393fb786a9e23e4799fec788e7e735de18052f83682ce2dfcabaf1c00c2c08e"},
+ {file = "aiohttp-3.8.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e4b09863aae0dc965c3ef36500d891a3ff495a2ea9ae9171e4519963c12ceefd"},
+ {file = "aiohttp-3.8.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:adfbc22e87365a6e564c804c58fc44ff7727deea782d175c33602737b7feadb6"},
+ {file = "aiohttp-3.8.4-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:147ae376f14b55f4f3c2b118b95be50a369b89b38a971e80a17c3fd623f280c9"},
+ {file = "aiohttp-3.8.4-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:eafb3e874816ebe2a92f5e155f17260034c8c341dad1df25672fb710627c6949"},
+ {file = "aiohttp-3.8.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c6cc15d58053c76eacac5fa9152d7d84b8d67b3fde92709195cb984cfb3475ea"},
+ {file = "aiohttp-3.8.4-cp310-cp310-win32.whl", hash = "sha256:59f029a5f6e2d679296db7bee982bb3d20c088e52a2977e3175faf31d6fb75d1"},
+ {file = "aiohttp-3.8.4-cp310-cp310-win_amd64.whl", hash = "sha256:fe7ba4a51f33ab275515f66b0a236bcde4fb5561498fe8f898d4e549b2e4509f"},
+ {file = "aiohttp-3.8.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:3d8ef1a630519a26d6760bc695842579cb09e373c5f227a21b67dc3eb16cfea4"},
+ {file = "aiohttp-3.8.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5b3f2e06a512e94722886c0827bee9807c86a9f698fac6b3aee841fab49bbfb4"},
+ {file = "aiohttp-3.8.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3a80464982d41b1fbfe3154e440ba4904b71c1a53e9cd584098cd41efdb188ef"},
+ {file = "aiohttp-3.8.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b631e26df63e52f7cce0cce6507b7a7f1bc9b0c501fcde69742130b32e8782f"},
+ {file = "aiohttp-3.8.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3f43255086fe25e36fd5ed8f2ee47477408a73ef00e804cb2b5cba4bf2ac7f5e"},
+ {file = "aiohttp-3.8.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4d347a172f866cd1d93126d9b239fcbe682acb39b48ee0873c73c933dd23bd0f"},
+ {file = "aiohttp-3.8.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a3fec6a4cb5551721cdd70473eb009d90935b4063acc5f40905d40ecfea23e05"},
+ {file = "aiohttp-3.8.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:80a37fe8f7c1e6ce8f2d9c411676e4bc633a8462844e38f46156d07a7d401654"},
+ {file = "aiohttp-3.8.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d1e6a862b76f34395a985b3cd39a0d949ca80a70b6ebdea37d3ab39ceea6698a"},
+ {file = "aiohttp-3.8.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:cd468460eefef601ece4428d3cf4562459157c0f6523db89365202c31b6daebb"},
+ {file = "aiohttp-3.8.4-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:618c901dd3aad4ace71dfa0f5e82e88b46ef57e3239fc7027773cb6d4ed53531"},
+ {file = "aiohttp-3.8.4-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:652b1bff4f15f6287550b4670546a2947f2a4575b6c6dff7760eafb22eacbf0b"},
+ {file = "aiohttp-3.8.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:80575ba9377c5171407a06d0196b2310b679dc752d02a1fcaa2bc20b235dbf24"},
+ {file = "aiohttp-3.8.4-cp311-cp311-win32.whl", hash = "sha256:bbcf1a76cf6f6dacf2c7f4d2ebd411438c275faa1dc0c68e46eb84eebd05dd7d"},
+ {file = "aiohttp-3.8.4-cp311-cp311-win_amd64.whl", hash = "sha256:6e74dd54f7239fcffe07913ff8b964e28b712f09846e20de78676ce2a3dc0bfc"},
+ {file = "aiohttp-3.8.4-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:880e15bb6dad90549b43f796b391cfffd7af373f4646784795e20d92606b7a51"},
+ {file = "aiohttp-3.8.4-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb96fa6b56bb536c42d6a4a87dfca570ff8e52de2d63cabebfd6fb67049c34b6"},
+ {file = "aiohttp-3.8.4-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4a6cadebe132e90cefa77e45f2d2f1a4b2ce5c6b1bfc1656c1ddafcfe4ba8131"},
+ {file = "aiohttp-3.8.4-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f352b62b45dff37b55ddd7b9c0c8672c4dd2eb9c0f9c11d395075a84e2c40f75"},
+ {file = "aiohttp-3.8.4-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ab43061a0c81198d88f39aaf90dae9a7744620978f7ef3e3708339b8ed2ef01"},
+ {file = "aiohttp-3.8.4-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c9cb1565a7ad52e096a6988e2ee0397f72fe056dadf75d17fa6b5aebaea05622"},
+ {file = "aiohttp-3.8.4-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:1b3ea7edd2d24538959c1c1abf97c744d879d4e541d38305f9bd7d9b10c9ec41"},
+ {file = "aiohttp-3.8.4-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:7c7837fe8037e96b6dd5cfcf47263c1620a9d332a87ec06a6ca4564e56bd0f36"},
+ {file = "aiohttp-3.8.4-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:3b90467ebc3d9fa5b0f9b6489dfb2c304a1db7b9946fa92aa76a831b9d587e99"},
+ {file = "aiohttp-3.8.4-cp36-cp36m-musllinux_1_1_s390x.whl", hash = "sha256:cab9401de3ea52b4b4c6971db5fb5c999bd4260898af972bf23de1c6b5dd9d71"},
+ {file = "aiohttp-3.8.4-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:d1f9282c5f2b5e241034a009779e7b2a1aa045f667ff521e7948ea9b56e0c5ff"},
+ {file = "aiohttp-3.8.4-cp36-cp36m-win32.whl", hash = "sha256:5e14f25765a578a0a634d5f0cd1e2c3f53964553a00347998dfdf96b8137f777"},
+ {file = "aiohttp-3.8.4-cp36-cp36m-win_amd64.whl", hash = "sha256:4c745b109057e7e5f1848c689ee4fb3a016c8d4d92da52b312f8a509f83aa05e"},
+ {file = "aiohttp-3.8.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:aede4df4eeb926c8fa70de46c340a1bc2c6079e1c40ccf7b0eae1313ffd33519"},
+ {file = "aiohttp-3.8.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ddaae3f3d32fc2cb4c53fab020b69a05c8ab1f02e0e59665c6f7a0d3a5be54f"},
+ {file = "aiohttp-3.8.4-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c4eb3b82ca349cf6fadcdc7abcc8b3a50ab74a62e9113ab7a8ebc268aad35bb9"},
+ {file = "aiohttp-3.8.4-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9bcb89336efa095ea21b30f9e686763f2be4478f1b0a616969551982c4ee4c3b"},
+ {file = "aiohttp-3.8.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c08e8ed6fa3d477e501ec9db169bfac8140e830aa372d77e4a43084d8dd91ab"},
+ {file = "aiohttp-3.8.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c6cd05ea06daca6ad6a4ca3ba7fe7dc5b5de063ff4daec6170ec0f9979f6c332"},
+ {file = "aiohttp-3.8.4-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:b7a00a9ed8d6e725b55ef98b1b35c88013245f35f68b1b12c5cd4100dddac333"},
+ {file = "aiohttp-3.8.4-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:de04b491d0e5007ee1b63a309956eaed959a49f5bb4e84b26c8f5d49de140fa9"},
+ {file = "aiohttp-3.8.4-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:40653609b3bf50611356e6b6554e3a331f6879fa7116f3959b20e3528783e699"},
+ {file = "aiohttp-3.8.4-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:dbf3a08a06b3f433013c143ebd72c15cac33d2914b8ea4bea7ac2c23578815d6"},
+ {file = "aiohttp-3.8.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:854f422ac44af92bfe172d8e73229c270dc09b96535e8a548f99c84f82dde241"},
+ {file = "aiohttp-3.8.4-cp37-cp37m-win32.whl", hash = "sha256:aeb29c84bb53a84b1a81c6c09d24cf33bb8432cc5c39979021cc0f98c1292a1a"},
+ {file = "aiohttp-3.8.4-cp37-cp37m-win_amd64.whl", hash = "sha256:db3fc6120bce9f446d13b1b834ea5b15341ca9ff3f335e4a951a6ead31105480"},
+ {file = "aiohttp-3.8.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:fabb87dd8850ef0f7fe2b366d44b77d7e6fa2ea87861ab3844da99291e81e60f"},
+ {file = "aiohttp-3.8.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:91f6d540163f90bbaef9387e65f18f73ffd7c79f5225ac3d3f61df7b0d01ad15"},
+ {file = "aiohttp-3.8.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:d265f09a75a79a788237d7f9054f929ced2e69eb0bb79de3798c468d8a90f945"},
+ {file = "aiohttp-3.8.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3d89efa095ca7d442a6d0cbc755f9e08190ba40069b235c9886a8763b03785da"},
+ {file = "aiohttp-3.8.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4dac314662f4e2aa5009977b652d9b8db7121b46c38f2073bfeed9f4049732cd"},
+ {file = "aiohttp-3.8.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fe11310ae1e4cd560035598c3f29d86cef39a83d244c7466f95c27ae04850f10"},
+ {file = "aiohttp-3.8.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6ddb2a2026c3f6a68c3998a6c47ab6795e4127315d2e35a09997da21865757f8"},
+ {file = "aiohttp-3.8.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e75b89ac3bd27d2d043b234aa7b734c38ba1b0e43f07787130a0ecac1e12228a"},
+ {file = "aiohttp-3.8.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6e601588f2b502c93c30cd5a45bfc665faaf37bbe835b7cfd461753068232074"},
+ {file = "aiohttp-3.8.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a5d794d1ae64e7753e405ba58e08fcfa73e3fad93ef9b7e31112ef3c9a0efb52"},
+ {file = "aiohttp-3.8.4-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:a1f4689c9a1462f3df0a1f7e797791cd6b124ddbee2b570d34e7f38ade0e2c71"},
+ {file = "aiohttp-3.8.4-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:3032dcb1c35bc330134a5b8a5d4f68c1a87252dfc6e1262c65a7e30e62298275"},
+ {file = "aiohttp-3.8.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8189c56eb0ddbb95bfadb8f60ea1b22fcfa659396ea36f6adcc521213cd7b44d"},
+ {file = "aiohttp-3.8.4-cp38-cp38-win32.whl", hash = "sha256:33587f26dcee66efb2fff3c177547bd0449ab7edf1b73a7f5dea1e38609a0c54"},
+ {file = "aiohttp-3.8.4-cp38-cp38-win_amd64.whl", hash = "sha256:e595432ac259af2d4630008bf638873d69346372d38255774c0e286951e8b79f"},
+ {file = "aiohttp-3.8.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:5a7bdf9e57126dc345b683c3632e8ba317c31d2a41acd5800c10640387d193ed"},
+ {file = "aiohttp-3.8.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:22f6eab15b6db242499a16de87939a342f5a950ad0abaf1532038e2ce7d31567"},
+ {file = "aiohttp-3.8.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7235604476a76ef249bd64cb8274ed24ccf6995c4a8b51a237005ee7a57e8643"},
+ {file = "aiohttp-3.8.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ea9eb976ffdd79d0e893869cfe179a8f60f152d42cb64622fca418cd9b18dc2a"},
+ {file = "aiohttp-3.8.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:92c0cea74a2a81c4c76b62ea1cac163ecb20fb3ba3a75c909b9fa71b4ad493cf"},
+ {file = "aiohttp-3.8.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:493f5bc2f8307286b7799c6d899d388bbaa7dfa6c4caf4f97ef7521b9cb13719"},
+ {file = "aiohttp-3.8.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0a63f03189a6fa7c900226e3ef5ba4d3bd047e18f445e69adbd65af433add5a2"},
+ {file = "aiohttp-3.8.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:10c8cefcff98fd9168cdd86c4da8b84baaa90bf2da2269c6161984e6737bf23e"},
+ {file = "aiohttp-3.8.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:bca5f24726e2919de94f047739d0a4fc01372801a3672708260546aa2601bf57"},
+ {file = "aiohttp-3.8.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:03baa76b730e4e15a45f81dfe29a8d910314143414e528737f8589ec60cf7391"},
+ {file = "aiohttp-3.8.4-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:8c29c77cc57e40f84acef9bfb904373a4e89a4e8b74e71aa8075c021ec9078c2"},
+ {file = "aiohttp-3.8.4-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:03543dcf98a6619254b409be2d22b51f21ec66272be4ebda7b04e6412e4b2e14"},
+ {file = "aiohttp-3.8.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:17b79c2963db82086229012cff93ea55196ed31f6493bb1ccd2c62f1724324e4"},
+ {file = "aiohttp-3.8.4-cp39-cp39-win32.whl", hash = "sha256:34ce9f93a4a68d1272d26030655dd1b58ff727b3ed2a33d80ec433561b03d67a"},
+ {file = "aiohttp-3.8.4-cp39-cp39-win_amd64.whl", hash = "sha256:41a86a69bb63bb2fc3dc9ad5ea9f10f1c9c8e282b471931be0268ddd09430b04"},
+ {file = "aiohttp-3.8.4.tar.gz", hash = "sha256:bf2e1a9162c1e441bf805a1fd166e249d574ca04e03b34f97e2928769e91ab5c"},
+]
+
+[package.dependencies]
+aiosignal = ">=1.1.2"
+async-timeout = ">=4.0.0a3,<5.0"
+asynctest = {version = "0.13.0", markers = "python_version < \"3.8\""}
+attrs = ">=17.3.0"
+charset-normalizer = ">=2.0,<4.0"
+frozenlist = ">=1.1.1"
+multidict = ">=4.5,<7.0"
+typing-extensions = {version = ">=3.7.4", markers = "python_version < \"3.8\""}
+yarl = ">=1.0,<2.0"
+
+[package.extras]
+speedups = ["Brotli", "aiodns", "cchardet"]
+
+[[package]]
+name = "aiosignal"
+version = "1.3.1"
+description = "aiosignal: a list of registered asynchronous callbacks"
+category = "main"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "aiosignal-1.3.1-py3-none-any.whl", hash = "sha256:f8376fb07dd1e86a584e4fcdec80b36b7f81aac666ebc724e2c090300dd83b17"},
+ {file = "aiosignal-1.3.1.tar.gz", hash = "sha256:54cd96e15e1649b75d6c87526a6ff0b6c1b0dd3459f43d9ca11d48c339b68cfc"},
+]
+
+[package.dependencies]
+frozenlist = ">=1.1.0"
+
+[[package]]
+name = "alabaster"
+version = "0.7.13"
+description = "A configurable sidebar-enabled Sphinx theme"
+category = "dev"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "alabaster-0.7.13-py3-none-any.whl", hash = "sha256:1ee19aca801bbabb5ba3f5f258e4422dfa86f82f3e9cefb0859b283cdd7f62a3"},
+ {file = "alabaster-0.7.13.tar.gz", hash = "sha256:a27a4a084d5e690e16e01e03ad2b2e552c61a65469419b907243193de1a84ae2"},
+]
+
+[[package]]
+name = "astroid"
+version = "2.11.7"
+description = "An abstract syntax tree for Python with inference support."
+category = "dev"
+optional = false
+python-versions = ">=3.6.2"
+files = [
+ {file = "astroid-2.11.7-py3-none-any.whl", hash = "sha256:86b0a340a512c65abf4368b80252754cda17c02cdbbd3f587dddf98112233e7b"},
+ {file = "astroid-2.11.7.tar.gz", hash = "sha256:bb24615c77f4837c707669d16907331374ae8a964650a66999da3f5ca68dc946"},
+]
+
+[package.dependencies]
+lazy-object-proxy = ">=1.4.0"
+setuptools = ">=20.0"
+typed-ast = {version = ">=1.4.0,<2.0", markers = "implementation_name == \"cpython\" and python_version < \"3.8\""}
+typing-extensions = {version = ">=3.10", markers = "python_version < \"3.10\""}
+wrapt = ">=1.11,<2"
+
+[[package]]
+name = "async-timeout"
+version = "4.0.2"
+description = "Timeout context manager for asyncio programs"
+category = "main"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "async-timeout-4.0.2.tar.gz", hash = "sha256:2163e1640ddb52b7a8c80d0a67a08587e5d245cc9c553a74a847056bc2976b15"},
+ {file = "async_timeout-4.0.2-py3-none-any.whl", hash = "sha256:8ca1e4fcf50d07413d66d1a5e416e42cfdf5851c981d679a09851a6853383b3c"},
+]
+
+[package.dependencies]
+typing-extensions = {version = ">=3.6.5", markers = "python_version < \"3.8\""}
+
+[[package]]
+name = "asynctest"
+version = "0.13.0"
+description = "Enhance the standard unittest package with features for testing asyncio libraries"
+category = "main"
+optional = false
+python-versions = ">=3.5"
+files = [
+ {file = "asynctest-0.13.0-py3-none-any.whl", hash = "sha256:5da6118a7e6d6b54d83a8f7197769d046922a44d2a99c21382f0a6e4fadae676"},
+ {file = "asynctest-0.13.0.tar.gz", hash = "sha256:c27862842d15d83e6a34eb0b2866c323880eb3a75e4485b079ea11748fd77fac"},
+]
+
+[[package]]
+name = "attrs"
+version = "23.1.0"
+description = "Classes Without Boilerplate"
+category = "main"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "attrs-23.1.0-py3-none-any.whl", hash = "sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04"},
+ {file = "attrs-23.1.0.tar.gz", hash = "sha256:6279836d581513a26f1bf235f9acd333bc9115683f14f7e8fae46c98fc50e015"},
+]
+
+[package.dependencies]
+importlib-metadata = {version = "*", markers = "python_version < \"3.8\""}
+
+[package.extras]
+cov = ["attrs[tests]", "coverage[toml] (>=5.3)"]
+dev = ["attrs[docs,tests]", "pre-commit"]
+docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope-interface"]
+tests = ["attrs[tests-no-zope]", "zope-interface"]
+tests-no-zope = ["cloudpickle", "hypothesis", "mypy (>=1.1.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
+
+[[package]]
+name = "babel"
+version = "2.12.1"
+description = "Internationalization utilities"
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "Babel-2.12.1-py3-none-any.whl", hash = "sha256:b4246fb7677d3b98f501a39d43396d3cafdc8eadb045f4a31be01863f655c610"},
+ {file = "Babel-2.12.1.tar.gz", hash = "sha256:cc2d99999cd01d44420ae725a21c9e3711b3aadc7976d6147f622d8581963455"},
+]
+
+[package.dependencies]
+pytz = {version = ">=2015.7", markers = "python_version < \"3.9\""}
+
+[[package]]
+name = "backports-cached-property"
+version = "1.0.2"
+description = "cached_property() - computed once per instance, cached as attribute"
+category = "dev"
+optional = false
+python-versions = ">=3.6.0"
+files = [
+ {file = "backports.cached-property-1.0.2.tar.gz", hash = "sha256:9306f9eed6ec55fd156ace6bc1094e2c86fae5fb2bf07b6a9c00745c656e75dd"},
+ {file = "backports.cached_property-1.0.2-py3-none-any.whl", hash = "sha256:baeb28e1cd619a3c9ab8941431fe34e8490861fb998c6c4590693d50171db0cc"},
+]
+
+[[package]]
+name = "black"
+version = "23.3.0"
+description = "The uncompromising code formatter."
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "black-23.3.0-cp310-cp310-macosx_10_16_arm64.whl", hash = "sha256:0945e13506be58bf7db93ee5853243eb368ace1c08a24c65ce108986eac65915"},
+ {file = "black-23.3.0-cp310-cp310-macosx_10_16_universal2.whl", hash = "sha256:67de8d0c209eb5b330cce2469503de11bca4085880d62f1628bd9972cc3366b9"},
+ {file = "black-23.3.0-cp310-cp310-macosx_10_16_x86_64.whl", hash = "sha256:7c3eb7cea23904399866c55826b31c1f55bbcd3890ce22ff70466b907b6775c2"},
+ {file = "black-23.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:32daa9783106c28815d05b724238e30718f34155653d4d6e125dc7daec8e260c"},
+ {file = "black-23.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:35d1381d7a22cc5b2be2f72c7dfdae4072a3336060635718cc7e1ede24221d6c"},
+ {file = "black-23.3.0-cp311-cp311-macosx_10_16_arm64.whl", hash = "sha256:a8a968125d0a6a404842fa1bf0b349a568634f856aa08ffaff40ae0dfa52e7c6"},
+ {file = "black-23.3.0-cp311-cp311-macosx_10_16_universal2.whl", hash = "sha256:c7ab5790333c448903c4b721b59c0d80b11fe5e9803d8703e84dcb8da56fec1b"},
+ {file = "black-23.3.0-cp311-cp311-macosx_10_16_x86_64.whl", hash = "sha256:a6f6886c9869d4daae2d1715ce34a19bbc4b95006d20ed785ca00fa03cba312d"},
+ {file = "black-23.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6f3c333ea1dd6771b2d3777482429864f8e258899f6ff05826c3a4fcc5ce3f70"},
+ {file = "black-23.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:11c410f71b876f961d1de77b9699ad19f939094c3a677323f43d7a29855fe326"},
+ {file = "black-23.3.0-cp37-cp37m-macosx_10_16_x86_64.whl", hash = "sha256:1d06691f1eb8de91cd1b322f21e3bfc9efe0c7ca1f0e1eb1db44ea367dff656b"},
+ {file = "black-23.3.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:50cb33cac881766a5cd9913e10ff75b1e8eb71babf4c7104f2e9c52da1fb7de2"},
+ {file = "black-23.3.0-cp37-cp37m-win_amd64.whl", hash = "sha256:e114420bf26b90d4b9daa597351337762b63039752bdf72bf361364c1aa05925"},
+ {file = "black-23.3.0-cp38-cp38-macosx_10_16_arm64.whl", hash = "sha256:48f9d345675bb7fbc3dd85821b12487e1b9a75242028adad0333ce36ed2a6d27"},
+ {file = "black-23.3.0-cp38-cp38-macosx_10_16_universal2.whl", hash = "sha256:714290490c18fb0126baa0fca0a54ee795f7502b44177e1ce7624ba1c00f2331"},
+ {file = "black-23.3.0-cp38-cp38-macosx_10_16_x86_64.whl", hash = "sha256:064101748afa12ad2291c2b91c960be28b817c0c7eaa35bec09cc63aa56493c5"},
+ {file = "black-23.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:562bd3a70495facf56814293149e51aa1be9931567474993c7942ff7d3533961"},
+ {file = "black-23.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:e198cf27888ad6f4ff331ca1c48ffc038848ea9f031a3b40ba36aced7e22f2c8"},
+ {file = "black-23.3.0-cp39-cp39-macosx_10_16_arm64.whl", hash = "sha256:3238f2aacf827d18d26db07524e44741233ae09a584273aa059066d644ca7b30"},
+ {file = "black-23.3.0-cp39-cp39-macosx_10_16_universal2.whl", hash = "sha256:f0bd2f4a58d6666500542b26354978218a9babcdc972722f4bf90779524515f3"},
+ {file = "black-23.3.0-cp39-cp39-macosx_10_16_x86_64.whl", hash = "sha256:92c543f6854c28a3c7f39f4d9b7694f9a6eb9d3c5e2ece488c327b6e7ea9b266"},
+ {file = "black-23.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a150542a204124ed00683f0db1f5cf1c2aaaa9cc3495b7a3b5976fb136090ab"},
+ {file = "black-23.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:6b39abdfb402002b8a7d030ccc85cf5afff64ee90fa4c5aebc531e3ad0175ddb"},
+ {file = "black-23.3.0-py3-none-any.whl", hash = "sha256:ec751418022185b0c1bb7d7736e6933d40bbb14c14a0abcf9123d1b159f98dd4"},
+ {file = "black-23.3.0.tar.gz", hash = "sha256:1c7b8d606e728a41ea1ccbd7264677e494e87cf630e399262ced92d4a8dac940"},
+]
+
+[package.dependencies]
+click = ">=8.0.0"
+mypy-extensions = ">=0.4.3"
+packaging = ">=22.0"
+pathspec = ">=0.9.0"
+platformdirs = ">=2"
+tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""}
+typed-ast = {version = ">=1.4.2", markers = "python_version < \"3.8\" and implementation_name == \"cpython\""}
+typing-extensions = {version = ">=3.10.0.0", markers = "python_version < \"3.10\""}
+
+[package.extras]
+colorama = ["colorama (>=0.4.3)"]
+d = ["aiohttp (>=3.7.4)"]
+jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"]
+uvloop = ["uvloop (>=0.15.2)"]
+
+[[package]]
+name = "build"
+version = "0.10.0"
+description = "A simple, correct Python build frontend"
+category = "dev"
+optional = false
+python-versions = ">= 3.7"
+files = [
+ {file = "build-0.10.0-py3-none-any.whl", hash = "sha256:af266720050a66c893a6096a2f410989eeac74ff9a68ba194b3f6473e8e26171"},
+ {file = "build-0.10.0.tar.gz", hash = "sha256:d5b71264afdb5951d6704482aac78de887c80691c52b88a9ad195983ca2c9269"},
+]
+
+[package.dependencies]
+colorama = {version = "*", markers = "os_name == \"nt\""}
+importlib-metadata = {version = ">=0.22", markers = "python_version < \"3.8\""}
+packaging = ">=19.0"
+pyproject_hooks = "*"
+tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""}
+
+[package.extras]
+docs = ["furo (>=2021.08.31)", "sphinx (>=4.0,<5.0)", "sphinx-argparse-cli (>=1.5)", "sphinx-autodoc-typehints (>=1.10)"]
+test = ["filelock (>=3)", "pytest (>=6.2.4)", "pytest-cov (>=2.12)", "pytest-mock (>=2)", "pytest-rerunfailures (>=9.1)", "pytest-xdist (>=1.34)", "setuptools (>=42.0.0)", "setuptools (>=56.0.0)", "toml (>=0.10.0)", "wheel (>=0.36.0)"]
+typing = ["importlib-metadata (>=5.1)", "mypy (==0.991)", "tomli", "typing-extensions (>=3.7.4.3)"]
+virtualenv = ["virtualenv (>=20.0.35)"]
+
+[[package]]
+name = "cachecontrol"
+version = "0.12.14"
+description = "httplib2 caching for requests"
+category = "dev"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "CacheControl-0.12.14-py2.py3-none-any.whl", hash = "sha256:1c2939be362a70c4e5f02c6249462b3b7a24441e4f1ced5e9ef028172edf356a"},
+ {file = "CacheControl-0.12.14.tar.gz", hash = "sha256:d1087f45781c0e00616479bfd282c78504371ca71da017b49df9f5365a95feba"},
+]
+
+[package.dependencies]
+lockfile = {version = ">=0.9", optional = true, markers = "extra == \"filecache\""}
+msgpack = ">=0.5.2"
+requests = "*"
+
+[package.extras]
+filecache = ["lockfile (>=0.9)"]
+redis = ["redis (>=2.10.5)"]
+
+[[package]]
+name = "cachetools"
+version = "5.3.1"
+description = "Extensible memoizing collections and decorators"
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "cachetools-5.3.1-py3-none-any.whl", hash = "sha256:95ef631eeaea14ba2e36f06437f36463aac3a096799e876ee55e5cdccb102590"},
+ {file = "cachetools-5.3.1.tar.gz", hash = "sha256:dce83f2d9b4e1f732a8cd44af8e8fab2dbe46201467fc98b3ef8f269092bf62b"},
+]
+
+[[package]]
+name = "certifi"
+version = "2023.5.7"
+description = "Python package for providing Mozilla's CA Bundle."
+category = "dev"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "certifi-2023.5.7-py3-none-any.whl", hash = "sha256:c6c2e98f5c7869efca1f8916fed228dd91539f9f1b444c314c06eef02980c716"},
+ {file = "certifi-2023.5.7.tar.gz", hash = "sha256:0f0d56dc5a6ad56fd4ba36484d6cc34451e1c6548c61daad8c320169f91eddc7"},
+]
+
+[[package]]
+name = "cffi"
+version = "1.15.1"
+description = "Foreign Function Interface for Python calling C code."
+category = "dev"
+optional = false
+python-versions = "*"
+files = [
+ {file = "cffi-1.15.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:a66d3508133af6e8548451b25058d5812812ec3798c886bf38ed24a98216fab2"},
+ {file = "cffi-1.15.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:470c103ae716238bbe698d67ad020e1db9d9dba34fa5a899b5e21577e6d52ed2"},
+ {file = "cffi-1.15.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:9ad5db27f9cabae298d151c85cf2bad1d359a1b9c686a275df03385758e2f914"},
+ {file = "cffi-1.15.1-cp27-cp27m-win32.whl", hash = "sha256:b3bbeb01c2b273cca1e1e0c5df57f12dce9a4dd331b4fa1635b8bec26350bde3"},
+ {file = "cffi-1.15.1-cp27-cp27m-win_amd64.whl", hash = "sha256:e00b098126fd45523dd056d2efba6c5a63b71ffe9f2bbe1a4fe1716e1d0c331e"},
+ {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:d61f4695e6c866a23a21acab0509af1cdfd2c013cf256bbf5b6b5e2695827162"},
+ {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:ed9cb427ba5504c1dc15ede7d516b84757c3e3d7868ccc85121d9310d27eed0b"},
+ {file = "cffi-1.15.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:39d39875251ca8f612b6f33e6b1195af86d1b3e60086068be9cc053aa4376e21"},
+ {file = "cffi-1.15.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:285d29981935eb726a4399badae8f0ffdff4f5050eaa6d0cfc3f64b857b77185"},
+ {file = "cffi-1.15.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3eb6971dcff08619f8d91607cfc726518b6fa2a9eba42856be181c6d0d9515fd"},
+ {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:21157295583fe8943475029ed5abdcf71eb3911894724e360acff1d61c1d54bc"},
+ {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5635bd9cb9731e6d4a1132a498dd34f764034a8ce60cef4f5319c0541159392f"},
+ {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2012c72d854c2d03e45d06ae57f40d78e5770d252f195b93f581acf3ba44496e"},
+ {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd86c085fae2efd48ac91dd7ccffcfc0571387fe1193d33b6394db7ef31fe2a4"},
+ {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:fa6693661a4c91757f4412306191b6dc88c1703f780c8234035eac011922bc01"},
+ {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:59c0b02d0a6c384d453fece7566d1c7e6b7bae4fc5874ef2ef46d56776d61c9e"},
+ {file = "cffi-1.15.1-cp310-cp310-win32.whl", hash = "sha256:cba9d6b9a7d64d4bd46167096fc9d2f835e25d7e4c121fb2ddfc6528fb0413b2"},
+ {file = "cffi-1.15.1-cp310-cp310-win_amd64.whl", hash = "sha256:ce4bcc037df4fc5e3d184794f27bdaab018943698f4ca31630bc7f84a7b69c6d"},
+ {file = "cffi-1.15.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3d08afd128ddaa624a48cf2b859afef385b720bb4b43df214f85616922e6a5ac"},
+ {file = "cffi-1.15.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3799aecf2e17cf585d977b780ce79ff0dc9b78d799fc694221ce814c2c19db83"},
+ {file = "cffi-1.15.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a591fe9e525846e4d154205572a029f653ada1a78b93697f3b5a8f1f2bc055b9"},
+ {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3548db281cd7d2561c9ad9984681c95f7b0e38881201e157833a2342c30d5e8c"},
+ {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:91fc98adde3d7881af9b59ed0294046f3806221863722ba7d8d120c575314325"},
+ {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94411f22c3985acaec6f83c6df553f2dbe17b698cc7f8ae751ff2237d96b9e3c"},
+ {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:03425bdae262c76aad70202debd780501fabeaca237cdfddc008987c0e0f59ef"},
+ {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cc4d65aeeaa04136a12677d3dd0b1c0c94dc43abac5860ab33cceb42b801c1e8"},
+ {file = "cffi-1.15.1-cp311-cp311-win32.whl", hash = "sha256:a0f100c8912c114ff53e1202d0078b425bee3649ae34d7b070e9697f93c5d52d"},
+ {file = "cffi-1.15.1-cp311-cp311-win_amd64.whl", hash = "sha256:04ed324bda3cda42b9b695d51bb7d54b680b9719cfab04227cdd1e04e5de3104"},
+ {file = "cffi-1.15.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50a74364d85fd319352182ef59c5c790484a336f6db772c1a9231f1c3ed0cbd7"},
+ {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e263d77ee3dd201c3a142934a086a4450861778baaeeb45db4591ef65550b0a6"},
+ {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cec7d9412a9102bdc577382c3929b337320c4c4c4849f2c5cdd14d7368c5562d"},
+ {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4289fc34b2f5316fbb762d75362931e351941fa95fa18789191b33fc4cf9504a"},
+ {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:173379135477dc8cac4bc58f45db08ab45d228b3363adb7af79436135d028405"},
+ {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6975a3fac6bc83c4a65c9f9fcab9e47019a11d3d2cf7f3c0d03431bf145a941e"},
+ {file = "cffi-1.15.1-cp36-cp36m-win32.whl", hash = "sha256:2470043b93ff09bf8fb1d46d1cb756ce6132c54826661a32d4e4d132e1977adf"},
+ {file = "cffi-1.15.1-cp36-cp36m-win_amd64.whl", hash = "sha256:30d78fbc8ebf9c92c9b7823ee18eb92f2e6ef79b45ac84db507f52fbe3ec4497"},
+ {file = "cffi-1.15.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:198caafb44239b60e252492445da556afafc7d1e3ab7a1fb3f0584ef6d742375"},
+ {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5ef34d190326c3b1f822a5b7a45f6c4535e2f47ed06fec77d3d799c450b2651e"},
+ {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8102eaf27e1e448db915d08afa8b41d6c7ca7a04b7d73af6514df10a3e74bd82"},
+ {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5df2768244d19ab7f60546d0c7c63ce1581f7af8b5de3eb3004b9b6fc8a9f84b"},
+ {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8c4917bd7ad33e8eb21e9a5bbba979b49d9a97acb3a803092cbc1133e20343c"},
+ {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2642fe3142e4cc4af0799748233ad6da94c62a8bec3a6648bf8ee68b1c7426"},
+ {file = "cffi-1.15.1-cp37-cp37m-win32.whl", hash = "sha256:e229a521186c75c8ad9490854fd8bbdd9a0c9aa3a524326b55be83b54d4e0ad9"},
+ {file = "cffi-1.15.1-cp37-cp37m-win_amd64.whl", hash = "sha256:a0b71b1b8fbf2b96e41c4d990244165e2c9be83d54962a9a1d118fd8657d2045"},
+ {file = "cffi-1.15.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:320dab6e7cb2eacdf0e658569d2575c4dad258c0fcc794f46215e1e39f90f2c3"},
+ {file = "cffi-1.15.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e74c6b51a9ed6589199c787bf5f9875612ca4a8a0785fb2d4a84429badaf22a"},
+ {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5c84c68147988265e60416b57fc83425a78058853509c1b0629c180094904a5"},
+ {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b926aa83d1edb5aa5b427b4053dc420ec295a08e40911296b9eb1b6170f6cca"},
+ {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:87c450779d0914f2861b8526e035c5e6da0a3199d8f1add1a665e1cbc6fc6d02"},
+ {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f2c9f67e9821cad2e5f480bc8d83b8742896f1242dba247911072d4fa94c192"},
+ {file = "cffi-1.15.1-cp38-cp38-win32.whl", hash = "sha256:8b7ee99e510d7b66cdb6c593f21c043c248537a32e0bedf02e01e9553a172314"},
+ {file = "cffi-1.15.1-cp38-cp38-win_amd64.whl", hash = "sha256:00a9ed42e88df81ffae7a8ab6d9356b371399b91dbdf0c3cb1e84c03a13aceb5"},
+ {file = "cffi-1.15.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:54a2db7b78338edd780e7ef7f9f6c442500fb0d41a5a4ea24fff1c929d5af585"},
+ {file = "cffi-1.15.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:fcd131dd944808b5bdb38e6f5b53013c5aa4f334c5cad0c72742f6eba4b73db0"},
+ {file = "cffi-1.15.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7473e861101c9e72452f9bf8acb984947aa1661a7704553a9f6e4baa5ba64415"},
+ {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c9a799e985904922a4d207a94eae35c78ebae90e128f0c4e521ce339396be9d"},
+ {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3bcde07039e586f91b45c88f8583ea7cf7a0770df3a1649627bf598332cb6984"},
+ {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:33ab79603146aace82c2427da5ca6e58f2b3f2fb5da893ceac0c42218a40be35"},
+ {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d598b938678ebf3c67377cdd45e09d431369c3b1a5b331058c338e201f12b27"},
+ {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:db0fbb9c62743ce59a9ff687eb5f4afbe77e5e8403d6697f7446e5f609976f76"},
+ {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:98d85c6a2bef81588d9227dde12db8a7f47f639f4a17c9ae08e773aa9c697bf3"},
+ {file = "cffi-1.15.1-cp39-cp39-win32.whl", hash = "sha256:40f4774f5a9d4f5e344f31a32b5096977b5d48560c5592e2f3d2c4374bd543ee"},
+ {file = "cffi-1.15.1-cp39-cp39-win_amd64.whl", hash = "sha256:70df4e3b545a17496c9b3f41f5115e69a4f2e77e94e1d2a8e1070bc0c38c8a3c"},
+ {file = "cffi-1.15.1.tar.gz", hash = "sha256:d400bfb9a37b1351253cb402671cea7e89bdecc294e8016a707f6d1d8ac934f9"},
+]
+
+[package.dependencies]
+pycparser = "*"
+
+[[package]]
+name = "chardet"
+version = "5.1.0"
+description = "Universal encoding detector for Python 3"
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "chardet-5.1.0-py3-none-any.whl", hash = "sha256:362777fb014af596ad31334fde1e8c327dfdb076e1960d1694662d46a6917ab9"},
+ {file = "chardet-5.1.0.tar.gz", hash = "sha256:0d62712b956bc154f85fb0a266e2a3c5913c2967e00348701b32411d6def31e5"},
+]
+
+[[package]]
+name = "charset-normalizer"
+version = "3.1.0"
+description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
+category = "main"
+optional = false
+python-versions = ">=3.7.0"
+files = [
+ {file = "charset-normalizer-3.1.0.tar.gz", hash = "sha256:34e0a2f9c370eb95597aae63bf85eb5e96826d81e3dcf88b8886012906f509b5"},
+ {file = "charset_normalizer-3.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e0ac8959c929593fee38da1c2b64ee9778733cdf03c482c9ff1d508b6b593b2b"},
+ {file = "charset_normalizer-3.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d7fc3fca01da18fbabe4625d64bb612b533533ed10045a2ac3dd194bfa656b60"},
+ {file = "charset_normalizer-3.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:04eefcee095f58eaabe6dc3cc2262f3bcd776d2c67005880894f447b3f2cb9c1"},
+ {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20064ead0717cf9a73a6d1e779b23d149b53daf971169289ed2ed43a71e8d3b0"},
+ {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1435ae15108b1cb6fffbcea2af3d468683b7afed0169ad718451f8db5d1aff6f"},
+ {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c84132a54c750fda57729d1e2599bb598f5fa0344085dbde5003ba429a4798c0"},
+ {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75f2568b4189dda1c567339b48cba4ac7384accb9c2a7ed655cd86b04055c795"},
+ {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:11d3bcb7be35e7b1bba2c23beedac81ee893ac9871d0ba79effc7fc01167db6c"},
+ {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:891cf9b48776b5c61c700b55a598621fdb7b1e301a550365571e9624f270c203"},
+ {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:5f008525e02908b20e04707a4f704cd286d94718f48bb33edddc7d7b584dddc1"},
+ {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:b06f0d3bf045158d2fb8837c5785fe9ff9b8c93358be64461a1089f5da983137"},
+ {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:49919f8400b5e49e961f320c735388ee686a62327e773fa5b3ce6721f7e785ce"},
+ {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:22908891a380d50738e1f978667536f6c6b526a2064156203d418f4856d6e86a"},
+ {file = "charset_normalizer-3.1.0-cp310-cp310-win32.whl", hash = "sha256:12d1a39aa6b8c6f6248bb54550efcc1c38ce0d8096a146638fd4738e42284448"},
+ {file = "charset_normalizer-3.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:65ed923f84a6844de5fd29726b888e58c62820e0769b76565480e1fdc3d062f8"},
+ {file = "charset_normalizer-3.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9a3267620866c9d17b959a84dd0bd2d45719b817245e49371ead79ed4f710d19"},
+ {file = "charset_normalizer-3.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6734e606355834f13445b6adc38b53c0fd45f1a56a9ba06c2058f86893ae8017"},
+ {file = "charset_normalizer-3.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f8303414c7b03f794347ad062c0516cee0e15f7a612abd0ce1e25caf6ceb47df"},
+ {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aaf53a6cebad0eae578f062c7d462155eada9c172bd8c4d250b8c1d8eb7f916a"},
+ {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3dc5b6a8ecfdc5748a7e429782598e4f17ef378e3e272eeb1340ea57c9109f41"},
+ {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e1b25e3ad6c909f398df8921780d6a3d120d8c09466720226fc621605b6f92b1"},
+ {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ca564606d2caafb0abe6d1b5311c2649e8071eb241b2d64e75a0d0065107e62"},
+ {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b82fab78e0b1329e183a65260581de4375f619167478dddab510c6c6fb04d9b6"},
+ {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:bd7163182133c0c7701b25e604cf1611c0d87712e56e88e7ee5d72deab3e76b5"},
+ {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:11d117e6c63e8f495412d37e7dc2e2fff09c34b2d09dbe2bee3c6229577818be"},
+ {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:cf6511efa4801b9b38dc5546d7547d5b5c6ef4b081c60b23e4d941d0eba9cbeb"},
+ {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:abc1185d79f47c0a7aaf7e2412a0eb2c03b724581139193d2d82b3ad8cbb00ac"},
+ {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cb7b2ab0188829593b9de646545175547a70d9a6e2b63bf2cd87a0a391599324"},
+ {file = "charset_normalizer-3.1.0-cp311-cp311-win32.whl", hash = "sha256:c36bcbc0d5174a80d6cccf43a0ecaca44e81d25be4b7f90f0ed7bcfbb5a00909"},
+ {file = "charset_normalizer-3.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:cca4def576f47a09a943666b8f829606bcb17e2bc2d5911a46c8f8da45f56755"},
+ {file = "charset_normalizer-3.1.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:0c95f12b74681e9ae127728f7e5409cbbef9cd914d5896ef238cc779b8152373"},
+ {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fca62a8301b605b954ad2e9c3666f9d97f63872aa4efcae5492baca2056b74ab"},
+ {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ac0aa6cd53ab9a31d397f8303f92c42f534693528fafbdb997c82bae6e477ad9"},
+ {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c3af8e0f07399d3176b179f2e2634c3ce9c1301379a6b8c9c9aeecd481da494f"},
+ {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a5fc78f9e3f501a1614a98f7c54d3969f3ad9bba8ba3d9b438c3bc5d047dd28"},
+ {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:628c985afb2c7d27a4800bfb609e03985aaecb42f955049957814e0491d4006d"},
+ {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:74db0052d985cf37fa111828d0dd230776ac99c740e1a758ad99094be4f1803d"},
+ {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:1e8fcdd8f672a1c4fc8d0bd3a2b576b152d2a349782d1eb0f6b8e52e9954731d"},
+ {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:04afa6387e2b282cf78ff3dbce20f0cc071c12dc8f685bd40960cc68644cfea6"},
+ {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:dd5653e67b149503c68c4018bf07e42eeed6b4e956b24c00ccdf93ac79cdff84"},
+ {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d2686f91611f9e17f4548dbf050e75b079bbc2a82be565832bc8ea9047b61c8c"},
+ {file = "charset_normalizer-3.1.0-cp37-cp37m-win32.whl", hash = "sha256:4155b51ae05ed47199dc5b2a4e62abccb274cee6b01da5b895099b61b1982974"},
+ {file = "charset_normalizer-3.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:322102cdf1ab682ecc7d9b1c5eed4ec59657a65e1c146a0da342b78f4112db23"},
+ {file = "charset_normalizer-3.1.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:e633940f28c1e913615fd624fcdd72fdba807bf53ea6925d6a588e84e1151531"},
+ {file = "charset_normalizer-3.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:3a06f32c9634a8705f4ca9946d667609f52cf130d5548881401f1eb2c39b1e2c"},
+ {file = "charset_normalizer-3.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7381c66e0561c5757ffe616af869b916c8b4e42b367ab29fedc98481d1e74e14"},
+ {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3573d376454d956553c356df45bb824262c397c6e26ce43e8203c4c540ee0acb"},
+ {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e89df2958e5159b811af9ff0f92614dabf4ff617c03a4c1c6ff53bf1c399e0e1"},
+ {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:78cacd03e79d009d95635e7d6ff12c21eb89b894c354bd2b2ed0b4763373693b"},
+ {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de5695a6f1d8340b12a5d6d4484290ee74d61e467c39ff03b39e30df62cf83a0"},
+ {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1c60b9c202d00052183c9be85e5eaf18a4ada0a47d188a83c8f5c5b23252f649"},
+ {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:f645caaf0008bacf349875a974220f1f1da349c5dbe7c4ec93048cdc785a3326"},
+ {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:ea9f9c6034ea2d93d9147818f17c2a0860d41b71c38b9ce4d55f21b6f9165a11"},
+ {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:80d1543d58bd3d6c271b66abf454d437a438dff01c3e62fdbcd68f2a11310d4b"},
+ {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:73dc03a6a7e30b7edc5b01b601e53e7fc924b04e1835e8e407c12c037e81adbd"},
+ {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6f5c2e7bc8a4bf7c426599765b1bd33217ec84023033672c1e9a8b35eaeaaaf8"},
+ {file = "charset_normalizer-3.1.0-cp38-cp38-win32.whl", hash = "sha256:12a2b561af122e3d94cdb97fe6fb2bb2b82cef0cdca131646fdb940a1eda04f0"},
+ {file = "charset_normalizer-3.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:3160a0fd9754aab7d47f95a6b63ab355388d890163eb03b2d2b87ab0a30cfa59"},
+ {file = "charset_normalizer-3.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:38e812a197bf8e71a59fe55b757a84c1f946d0ac114acafaafaf21667a7e169e"},
+ {file = "charset_normalizer-3.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6baf0baf0d5d265fa7944feb9f7451cc316bfe30e8df1a61b1bb08577c554f31"},
+ {file = "charset_normalizer-3.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8f25e17ab3039b05f762b0a55ae0b3632b2e073d9c8fc88e89aca31a6198e88f"},
+ {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3747443b6a904001473370d7810aa19c3a180ccd52a7157aacc264a5ac79265e"},
+ {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b116502087ce8a6b7a5f1814568ccbd0e9f6cfd99948aa59b0e241dc57cf739f"},
+ {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d16fd5252f883eb074ca55cb622bc0bee49b979ae4e8639fff6ca3ff44f9f854"},
+ {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21fa558996782fc226b529fdd2ed7866c2c6ec91cee82735c98a197fae39f706"},
+ {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6f6c7a8a57e9405cad7485f4c9d3172ae486cfef1344b5ddd8e5239582d7355e"},
+ {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ac3775e3311661d4adace3697a52ac0bab17edd166087d493b52d4f4f553f9f0"},
+ {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:10c93628d7497c81686e8e5e557aafa78f230cd9e77dd0c40032ef90c18f2230"},
+ {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:6f4f4668e1831850ebcc2fd0b1cd11721947b6dc7c00bf1c6bd3c929ae14f2c7"},
+ {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:0be65ccf618c1e7ac9b849c315cc2e8a8751d9cfdaa43027d4f6624bd587ab7e"},
+ {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:53d0a3fa5f8af98a1e261de6a3943ca631c526635eb5817a87a59d9a57ebf48f"},
+ {file = "charset_normalizer-3.1.0-cp39-cp39-win32.whl", hash = "sha256:a04f86f41a8916fe45ac5024ec477f41f886b3c435da2d4e3d2709b22ab02af1"},
+ {file = "charset_normalizer-3.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:830d2948a5ec37c386d3170c483063798d7879037492540f10a475e3fd6f244b"},
+ {file = "charset_normalizer-3.1.0-py3-none-any.whl", hash = "sha256:3d9098b479e78c85080c98e1e35ff40b4a31d8953102bb0fd7d1b6f8a2111a3d"},
+]
+
+[[package]]
+name = "cleo"
+version = "2.0.1"
+description = "Cleo allows you to create beautiful and testable command-line interfaces."
+category = "dev"
+optional = false
+python-versions = ">=3.7,<4.0"
+files = [
+ {file = "cleo-2.0.1-py3-none-any.whl", hash = "sha256:6eb133670a3ed1f3b052d53789017b6e50fca66d1287e6e6696285f4cb8ea448"},
+ {file = "cleo-2.0.1.tar.gz", hash = "sha256:eb4b2e1f3063c11085cebe489a6e9124163c226575a3c3be69b2e51af4a15ec5"},
+]
+
+[package.dependencies]
+crashtest = ">=0.4.1,<0.5.0"
+rapidfuzz = ">=2.2.0,<3.0.0"
+
+[[package]]
+name = "click"
+version = "8.1.3"
+description = "Composable command line interface toolkit"
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "click-8.1.3-py3-none-any.whl", hash = "sha256:bb4d8133cb15a609f44e8213d9b391b0809795062913b383c62be0ee95b1db48"},
+ {file = "click-8.1.3.tar.gz", hash = "sha256:7682dc8afb30297001674575ea00d1814d808d6a36af415a82bd481d37ba7b8e"},
+]
+
+[package.dependencies]
+colorama = {version = "*", markers = "platform_system == \"Windows\""}
+importlib-metadata = {version = "*", markers = "python_version < \"3.8\""}
+
+[[package]]
+name = "colorama"
+version = "0.4.6"
+description = "Cross-platform colored terminal text."
+category = "dev"
+optional = false
+python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7"
+files = [
+ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"},
+ {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"},
+]
+
+[[package]]
+name = "coverage"
+version = "7.2.7"
+description = "Code coverage measurement for Python"
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "coverage-7.2.7-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d39b5b4f2a66ccae8b7263ac3c8170994b65266797fb96cbbfd3fb5b23921db8"},
+ {file = "coverage-7.2.7-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6d040ef7c9859bb11dfeb056ff5b3872436e3b5e401817d87a31e1750b9ae2fb"},
+ {file = "coverage-7.2.7-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ba90a9563ba44a72fda2e85302c3abc71c5589cea608ca16c22b9804262aaeb6"},
+ {file = "coverage-7.2.7-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e7d9405291c6928619403db1d10bd07888888ec1abcbd9748fdaa971d7d661b2"},
+ {file = "coverage-7.2.7-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:31563e97dae5598556600466ad9beea39fb04e0229e61c12eaa206e0aa202063"},
+ {file = "coverage-7.2.7-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:ebba1cd308ef115925421d3e6a586e655ca5a77b5bf41e02eb0e4562a111f2d1"},
+ {file = "coverage-7.2.7-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:cb017fd1b2603ef59e374ba2063f593abe0fc45f2ad9abdde5b4d83bd922a353"},
+ {file = "coverage-7.2.7-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:d62a5c7dad11015c66fbb9d881bc4caa5b12f16292f857842d9d1871595f4495"},
+ {file = "coverage-7.2.7-cp310-cp310-win32.whl", hash = "sha256:ee57190f24fba796e36bb6d3aa8a8783c643d8fa9760c89f7a98ab5455fbf818"},
+ {file = "coverage-7.2.7-cp310-cp310-win_amd64.whl", hash = "sha256:f75f7168ab25dd93110c8a8117a22450c19976afbc44234cbf71481094c1b850"},
+ {file = "coverage-7.2.7-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:06a9a2be0b5b576c3f18f1a241f0473575c4a26021b52b2a85263a00f034d51f"},
+ {file = "coverage-7.2.7-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5baa06420f837184130752b7c5ea0808762083bf3487b5038d68b012e5937dbe"},
+ {file = "coverage-7.2.7-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fdec9e8cbf13a5bf63290fc6013d216a4c7232efb51548594ca3631a7f13c3a3"},
+ {file = "coverage-7.2.7-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:52edc1a60c0d34afa421c9c37078817b2e67a392cab17d97283b64c5833f427f"},
+ {file = "coverage-7.2.7-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:63426706118b7f5cf6bb6c895dc215d8a418d5952544042c8a2d9fe87fcf09cb"},
+ {file = "coverage-7.2.7-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:afb17f84d56068a7c29f5fa37bfd38d5aba69e3304af08ee94da8ed5b0865833"},
+ {file = "coverage-7.2.7-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:48c19d2159d433ccc99e729ceae7d5293fbffa0bdb94952d3579983d1c8c9d97"},
+ {file = "coverage-7.2.7-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:0e1f928eaf5469c11e886fe0885ad2bf1ec606434e79842a879277895a50942a"},
+ {file = "coverage-7.2.7-cp311-cp311-win32.whl", hash = "sha256:33d6d3ea29d5b3a1a632b3c4e4f4ecae24ef170b0b9ee493883f2df10039959a"},
+ {file = "coverage-7.2.7-cp311-cp311-win_amd64.whl", hash = "sha256:5b7540161790b2f28143191f5f8ec02fb132660ff175b7747b95dcb77ac26562"},
+ {file = "coverage-7.2.7-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f2f67fe12b22cd130d34d0ef79206061bfb5eda52feb6ce0dba0644e20a03cf4"},
+ {file = "coverage-7.2.7-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a342242fe22407f3c17f4b499276a02b01e80f861f1682ad1d95b04018e0c0d4"},
+ {file = "coverage-7.2.7-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:171717c7cb6b453aebac9a2ef603699da237f341b38eebfee9be75d27dc38e01"},
+ {file = "coverage-7.2.7-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49969a9f7ffa086d973d91cec8d2e31080436ef0fb4a359cae927e742abfaaa6"},
+ {file = "coverage-7.2.7-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:b46517c02ccd08092f4fa99f24c3b83d8f92f739b4657b0f146246a0ca6a831d"},
+ {file = "coverage-7.2.7-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:a3d33a6b3eae87ceaefa91ffdc130b5e8536182cd6dfdbfc1aa56b46ff8c86de"},
+ {file = "coverage-7.2.7-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:976b9c42fb2a43ebf304fa7d4a310e5f16cc99992f33eced91ef6f908bd8f33d"},
+ {file = "coverage-7.2.7-cp312-cp312-win32.whl", hash = "sha256:8de8bb0e5ad103888d65abef8bca41ab93721647590a3f740100cd65c3b00511"},
+ {file = "coverage-7.2.7-cp312-cp312-win_amd64.whl", hash = "sha256:9e31cb64d7de6b6f09702bb27c02d1904b3aebfca610c12772452c4e6c21a0d3"},
+ {file = "coverage-7.2.7-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:58c2ccc2f00ecb51253cbe5d8d7122a34590fac9646a960d1430d5b15321d95f"},
+ {file = "coverage-7.2.7-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d22656368f0e6189e24722214ed8d66b8022db19d182927b9a248a2a8a2f67eb"},
+ {file = "coverage-7.2.7-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a895fcc7b15c3fc72beb43cdcbdf0ddb7d2ebc959edac9cef390b0d14f39f8a9"},
+ {file = "coverage-7.2.7-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e84606b74eb7de6ff581a7915e2dab7a28a0517fbe1c9239eb227e1354064dcd"},
+ {file = "coverage-7.2.7-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:0a5f9e1dbd7fbe30196578ca36f3fba75376fb99888c395c5880b355e2875f8a"},
+ {file = "coverage-7.2.7-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:419bfd2caae268623dd469eff96d510a920c90928b60f2073d79f8fe2bbc5959"},
+ {file = "coverage-7.2.7-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:2aee274c46590717f38ae5e4650988d1af340fe06167546cc32fe2f58ed05b02"},
+ {file = "coverage-7.2.7-cp37-cp37m-win32.whl", hash = "sha256:61b9a528fb348373c433e8966535074b802c7a5d7f23c4f421e6c6e2f1697a6f"},
+ {file = "coverage-7.2.7-cp37-cp37m-win_amd64.whl", hash = "sha256:b1c546aca0ca4d028901d825015dc8e4d56aac4b541877690eb76490f1dc8ed0"},
+ {file = "coverage-7.2.7-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:54b896376ab563bd38453cecb813c295cf347cf5906e8b41d340b0321a5433e5"},
+ {file = "coverage-7.2.7-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:3d376df58cc111dc8e21e3b6e24606b5bb5dee6024f46a5abca99124b2229ef5"},
+ {file = "coverage-7.2.7-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5e330fc79bd7207e46c7d7fd2bb4af2963f5f635703925543a70b99574b0fea9"},
+ {file = "coverage-7.2.7-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e9d683426464e4a252bf70c3498756055016f99ddaec3774bf368e76bbe02b6"},
+ {file = "coverage-7.2.7-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d13c64ee2d33eccf7437961b6ea7ad8673e2be040b4f7fd4fd4d4d28d9ccb1e"},
+ {file = "coverage-7.2.7-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b7aa5f8a41217360e600da646004f878250a0d6738bcdc11a0a39928d7dc2050"},
+ {file = "coverage-7.2.7-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:8fa03bce9bfbeeef9f3b160a8bed39a221d82308b4152b27d82d8daa7041fee5"},
+ {file = "coverage-7.2.7-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:245167dd26180ab4c91d5e1496a30be4cd721a5cf2abf52974f965f10f11419f"},
+ {file = "coverage-7.2.7-cp38-cp38-win32.whl", hash = "sha256:d2c2db7fd82e9b72937969bceac4d6ca89660db0a0967614ce2481e81a0b771e"},
+ {file = "coverage-7.2.7-cp38-cp38-win_amd64.whl", hash = "sha256:2e07b54284e381531c87f785f613b833569c14ecacdcb85d56b25c4622c16c3c"},
+ {file = "coverage-7.2.7-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:537891ae8ce59ef63d0123f7ac9e2ae0fc8b72c7ccbe5296fec45fd68967b6c9"},
+ {file = "coverage-7.2.7-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:06fb182e69f33f6cd1d39a6c597294cff3143554b64b9825d1dc69d18cc2fff2"},
+ {file = "coverage-7.2.7-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:201e7389591af40950a6480bd9edfa8ed04346ff80002cec1a66cac4549c1ad7"},
+ {file = "coverage-7.2.7-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f6951407391b639504e3b3be51b7ba5f3528adbf1a8ac3302b687ecababf929e"},
+ {file = "coverage-7.2.7-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6f48351d66575f535669306aa7d6d6f71bc43372473b54a832222803eb956fd1"},
+ {file = "coverage-7.2.7-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b29019c76039dc3c0fd815c41392a044ce555d9bcdd38b0fb60fb4cd8e475ba9"},
+ {file = "coverage-7.2.7-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:81c13a1fc7468c40f13420732805a4c38a105d89848b7c10af65a90beff25250"},
+ {file = "coverage-7.2.7-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:975d70ab7e3c80a3fe86001d8751f6778905ec723f5b110aed1e450da9d4b7f2"},
+ {file = "coverage-7.2.7-cp39-cp39-win32.whl", hash = "sha256:7ee7d9d4822c8acc74a5e26c50604dff824710bc8de424904c0982e25c39c6cb"},
+ {file = "coverage-7.2.7-cp39-cp39-win_amd64.whl", hash = "sha256:eb393e5ebc85245347950143969b241d08b52b88a3dc39479822e073a1a8eb27"},
+ {file = "coverage-7.2.7-pp37.pp38.pp39-none-any.whl", hash = "sha256:b7b4c971f05e6ae490fef852c218b0e79d4e52f79ef0c8475566584a8fb3e01d"},
+ {file = "coverage-7.2.7.tar.gz", hash = "sha256:924d94291ca674905fe9481f12294eb11f2d3d3fd1adb20314ba89e94f44ed59"},
+]
+
+[package.dependencies]
+tomli = {version = "*", optional = true, markers = "python_full_version <= \"3.11.0a6\" and extra == \"toml\""}
+
+[package.extras]
+toml = ["tomli"]
+
+[[package]]
+name = "crashtest"
+version = "0.4.1"
+description = "Manage Python errors with ease"
+category = "dev"
+optional = false
+python-versions = ">=3.7,<4.0"
+files = [
+ {file = "crashtest-0.4.1-py3-none-any.whl", hash = "sha256:8d23eac5fa660409f57472e3851dab7ac18aba459a8d19cbbba86d3d5aecd2a5"},
+ {file = "crashtest-0.4.1.tar.gz", hash = "sha256:80d7b1f316ebfbd429f648076d6275c877ba30ba48979de4191714a75266f0ce"},
+]
+
+[[package]]
+name = "cryptography"
+version = "41.0.1"
+description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers."
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "cryptography-41.0.1-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:f73bff05db2a3e5974a6fd248af2566134d8981fd7ab012e5dd4ddb1d9a70699"},
+ {file = "cryptography-41.0.1-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:1a5472d40c8f8e91ff7a3d8ac6dfa363d8e3138b961529c996f3e2df0c7a411a"},
+ {file = "cryptography-41.0.1-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7fa01527046ca5facdf973eef2535a27fec4cb651e4daec4d043ef63f6ecd4ca"},
+ {file = "cryptography-41.0.1-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b46e37db3cc267b4dea1f56da7346c9727e1209aa98487179ee8ebed09d21e43"},
+ {file = "cryptography-41.0.1-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:d198820aba55660b4d74f7b5fd1f17db3aa5eb3e6893b0a41b75e84e4f9e0e4b"},
+ {file = "cryptography-41.0.1-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:948224d76c4b6457349d47c0c98657557f429b4e93057cf5a2f71d603e2fc3a3"},
+ {file = "cryptography-41.0.1-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:059e348f9a3c1950937e1b5d7ba1f8e968508ab181e75fc32b879452f08356db"},
+ {file = "cryptography-41.0.1-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:b4ceb5324b998ce2003bc17d519080b4ec8d5b7b70794cbd2836101406a9be31"},
+ {file = "cryptography-41.0.1-cp37-abi3-win32.whl", hash = "sha256:8f4ab7021127a9b4323537300a2acfb450124b2def3756f64dc3a3d2160ee4b5"},
+ {file = "cryptography-41.0.1-cp37-abi3-win_amd64.whl", hash = "sha256:1fee5aacc7367487b4e22484d3c7e547992ed726d14864ee33c0176ae43b0d7c"},
+ {file = "cryptography-41.0.1-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:9a6c7a3c87d595608a39980ebaa04d5a37f94024c9f24eb7d10262b92f739ddb"},
+ {file = "cryptography-41.0.1-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:5d092fdfedaec4cbbffbf98cddc915ba145313a6fdaab83c6e67f4e6c218e6f3"},
+ {file = "cryptography-41.0.1-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:1a8e6c2de6fbbcc5e14fd27fb24414507cb3333198ea9ab1258d916f00bc3039"},
+ {file = "cryptography-41.0.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:cb33ccf15e89f7ed89b235cff9d49e2e62c6c981a6061c9c8bb47ed7951190bc"},
+ {file = "cryptography-41.0.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5f0ff6e18d13a3de56f609dd1fd11470918f770c6bd5d00d632076c727d35485"},
+ {file = "cryptography-41.0.1-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:7bfc55a5eae8b86a287747053140ba221afc65eb06207bedf6e019b8934b477c"},
+ {file = "cryptography-41.0.1-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:eb8163f5e549a22888c18b0d53d6bb62a20510060a22fd5a995ec8a05268df8a"},
+ {file = "cryptography-41.0.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:8dde71c4169ec5ccc1087bb7521d54251c016f126f922ab2dfe6649170a3b8c5"},
+ {file = "cryptography-41.0.1.tar.gz", hash = "sha256:d34579085401d3f49762d2f7d6634d6b6c2ae1242202e860f4d26b046e3a1006"},
+]
+
+[package.dependencies]
+cffi = ">=1.12"
+
+[package.extras]
+docs = ["sphinx (>=5.3.0)", "sphinx-rtd-theme (>=1.1.1)"]
+docstest = ["pyenchant (>=1.6.11)", "sphinxcontrib-spelling (>=4.0.1)", "twine (>=1.12.0)"]
+nox = ["nox"]
+pep8test = ["black", "check-sdist", "mypy", "ruff"]
+sdist = ["build"]
+ssh = ["bcrypt (>=3.1.5)"]
+test = ["pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"]
+test-randomorder = ["pytest-randomly"]
+
+[[package]]
+name = "debugpy"
+version = "1.6.7"
+description = "An implementation of the Debug Adapter Protocol for Python"
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "debugpy-1.6.7-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:b3e7ac809b991006ad7f857f016fa92014445085711ef111fdc3f74f66144096"},
+ {file = "debugpy-1.6.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e3876611d114a18aafef6383695dfc3f1217c98a9168c1aaf1a02b01ec7d8d1e"},
+ {file = "debugpy-1.6.7-cp310-cp310-win32.whl", hash = "sha256:33edb4afa85c098c24cc361d72ba7c21bb92f501104514d4ffec1fb36e09c01a"},
+ {file = "debugpy-1.6.7-cp310-cp310-win_amd64.whl", hash = "sha256:ed6d5413474e209ba50b1a75b2d9eecf64d41e6e4501977991cdc755dc83ab0f"},
+ {file = "debugpy-1.6.7-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:38ed626353e7c63f4b11efad659be04c23de2b0d15efff77b60e4740ea685d07"},
+ {file = "debugpy-1.6.7-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:279d64c408c60431c8ee832dfd9ace7c396984fd7341fa3116aee414e7dcd88d"},
+ {file = "debugpy-1.6.7-cp37-cp37m-win32.whl", hash = "sha256:dbe04e7568aa69361a5b4c47b4493d5680bfa3a911d1e105fbea1b1f23f3eb45"},
+ {file = "debugpy-1.6.7-cp37-cp37m-win_amd64.whl", hash = "sha256:f90a2d4ad9a035cee7331c06a4cf2245e38bd7c89554fe3b616d90ab8aab89cc"},
+ {file = "debugpy-1.6.7-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:5224eabbbeddcf1943d4e2821876f3e5d7d383f27390b82da5d9558fd4eb30a9"},
+ {file = "debugpy-1.6.7-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bae1123dff5bfe548ba1683eb972329ba6d646c3a80e6b4c06cd1b1dd0205e9b"},
+ {file = "debugpy-1.6.7-cp38-cp38-win32.whl", hash = "sha256:9cd10cf338e0907fdcf9eac9087faa30f150ef5445af5a545d307055141dd7a4"},
+ {file = "debugpy-1.6.7-cp38-cp38-win_amd64.whl", hash = "sha256:aaf6da50377ff4056c8ed470da24632b42e4087bc826845daad7af211e00faad"},
+ {file = "debugpy-1.6.7-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:0679b7e1e3523bd7d7869447ec67b59728675aadfc038550a63a362b63029d2c"},
+ {file = "debugpy-1.6.7-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de86029696e1b3b4d0d49076b9eba606c226e33ae312a57a46dca14ff370894d"},
+ {file = "debugpy-1.6.7-cp39-cp39-win32.whl", hash = "sha256:d71b31117779d9a90b745720c0eab54ae1da76d5b38c8026c654f4a066b0130a"},
+ {file = "debugpy-1.6.7-cp39-cp39-win_amd64.whl", hash = "sha256:c0ff93ae90a03b06d85b2c529eca51ab15457868a377c4cc40a23ab0e4e552a3"},
+ {file = "debugpy-1.6.7-py2.py3-none-any.whl", hash = "sha256:53f7a456bc50706a0eaabecf2d3ce44c4d5010e46dfc65b6b81a518b42866267"},
+ {file = "debugpy-1.6.7.zip", hash = "sha256:c4c2f0810fa25323abfdfa36cbbbb24e5c3b1a42cb762782de64439c575d67f2"},
+]
+
+[[package]]
+name = "dill"
+version = "0.3.6"
+description = "serialize all of python"
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "dill-0.3.6-py3-none-any.whl", hash = "sha256:a07ffd2351b8c678dfc4a856a3005f8067aea51d6ba6c700796a4d9e280f39f0"},
+ {file = "dill-0.3.6.tar.gz", hash = "sha256:e5db55f3687856d8fbdab002ed78544e1c4559a130302693d839dfe8f93f2373"},
+]
+
+[package.extras]
+graph = ["objgraph (>=1.7.2)"]
+
+[[package]]
+name = "distlib"
+version = "0.3.6"
+description = "Distribution utilities"
+category = "dev"
+optional = false
+python-versions = "*"
+files = [
+ {file = "distlib-0.3.6-py2.py3-none-any.whl", hash = "sha256:f35c4b692542ca110de7ef0bea44d73981caeb34ca0b9b6b2e6d7790dda8f80e"},
+ {file = "distlib-0.3.6.tar.gz", hash = "sha256:14bad2d9b04d3a36127ac97f30b12a19268f211063d8f8ee4f47108896e11b46"},
+]
+
+[[package]]
+name = "docutils"
+version = "0.19"
+description = "Docutils -- Python Documentation Utilities"
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "docutils-0.19-py3-none-any.whl", hash = "sha256:5e1de4d849fee02c63b040a4a3fd567f4ab104defd8a5511fbbc24a8a017efbc"},
+ {file = "docutils-0.19.tar.gz", hash = "sha256:33995a6753c30b7f577febfc2c50411fec6aac7f7ffeb7c4cfe5991072dcf9e6"},
+]
+
+[[package]]
+name = "dulwich"
+version = "0.21.5"
+description = "Python Git Library"
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "dulwich-0.21.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:8864719bc176cdd27847332a2059127e2f7bab7db2ff99a999873cb7fff54116"},
+ {file = "dulwich-0.21.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:3800cdc17d144c1f7e114972293bd6c46688f5bcc2c9228ed0537ded72394082"},
+ {file = "dulwich-0.21.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e2f676bfed8146966fe934ee734969d7d81548fbd250a8308582973670a9dab1"},
+ {file = "dulwich-0.21.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4db330fb59fe3b9d253bdf0e49a521739db83689520c4921ab1c5242aaf77b82"},
+ {file = "dulwich-0.21.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3e8f6d4f4f4d01dd1d3c968e486d4cd77f96f772da7265941bc506de0944ddb9"},
+ {file = "dulwich-0.21.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:1cc0c9ba19ac1b2372598802bc9201a9c45e5d6f1f7a80ec40deeb10acc4e9ae"},
+ {file = "dulwich-0.21.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:61e10242b5a7a82faa8996b2c76239cfb633620b02cdd2946e8af6e7eb31d651"},
+ {file = "dulwich-0.21.5-cp310-cp310-win32.whl", hash = "sha256:7f357639b56146a396f48e5e0bc9bbaca3d6d51c8340bd825299272b588fff5f"},
+ {file = "dulwich-0.21.5-cp310-cp310-win_amd64.whl", hash = "sha256:891d5c73e2b66d05dbb502e44f027dc0dbbd8f6198bc90dae348152e69d0befc"},
+ {file = "dulwich-0.21.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:45d6198e804b539708b73a003419e48fb42ff2c3c6dd93f63f3b134dff6dd259"},
+ {file = "dulwich-0.21.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c2a565d4e704d7f784cdf9637097141f6d47129c8fffc2fac699d57cb075a169"},
+ {file = "dulwich-0.21.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:823091d6b6a1ea07dc4839c9752198fb39193213d103ac189c7669736be2eaff"},
+ {file = "dulwich-0.21.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b2c9931b657f2206abec0964ec2355ee2c1e04d05f8864e823ffa23c548c4548"},
+ {file = "dulwich-0.21.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7dc358c2ee727322a09b7c6da43d47a1026049dbd3ad8d612eddca1f9074b298"},
+ {file = "dulwich-0.21.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:6155ab7388ee01c670f7c5d8003d4e133eebebc7085a856c007989f0ba921b36"},
+ {file = "dulwich-0.21.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:a605e10d72f90a39ea2e634fbfd80f866fc4df29a02ea6db52ae92e5fd4a2003"},
+ {file = "dulwich-0.21.5-cp311-cp311-win32.whl", hash = "sha256:daa607370722c3dce99a0022397c141caefb5ed32032a4f72506f4817ea6405b"},
+ {file = "dulwich-0.21.5-cp311-cp311-win_amd64.whl", hash = "sha256:5e56b2c1911c344527edb2bf1a4356e2fb7e086b1ba309666e1e5c2224cdca8a"},
+ {file = "dulwich-0.21.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:85d3401d08b1ec78c7d58ae987c4bb7b768a438f3daa74aeb8372bebc7fb16fa"},
+ {file = "dulwich-0.21.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:90479608e49db93d8c9e4323bc0ec5496678b535446e29d8fd67dc5bbb5d51bf"},
+ {file = "dulwich-0.21.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f9a6bf99f57bcac4c77fc60a58f1b322c91cc4d8c65dc341f76bf402622f89cb"},
+ {file = "dulwich-0.21.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:3e68b162af2aae995355e7920f89d50d72b53d56021e5ac0a546d493b17cbf7e"},
+ {file = "dulwich-0.21.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:0ab86d6d42e385bf3438e70f3c9b16de68018bd88929379e3484c0ef7990bd3c"},
+ {file = "dulwich-0.21.5-cp37-cp37m-win32.whl", hash = "sha256:f2eeca6d61366cf5ee8aef45bed4245a67d4c0f0d731dc2383eabb80fa695683"},
+ {file = "dulwich-0.21.5-cp37-cp37m-win_amd64.whl", hash = "sha256:1b20a3656b48c941d49c536824e1e5278a695560e8de1a83b53a630143c4552e"},
+ {file = "dulwich-0.21.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:3932b5e17503b265a85f1eda77ede647681c3bab53bc9572955b6b282abd26ea"},
+ {file = "dulwich-0.21.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6616132d219234580de88ceb85dd51480dc43b1bdc05887214b8dd9cfd4a9d40"},
+ {file = "dulwich-0.21.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:eaf6c7fb6b13495c19c9aace88821c2ade3c8c55b4e216cd7cc55d3e3807d7fa"},
+ {file = "dulwich-0.21.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:be12a46f73023970125808a4a78f610c055373096c1ecea3280edee41613eba8"},
+ {file = "dulwich-0.21.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baecef0d8b9199822c7912876a03a1af17833f6c0d461efb62decebd45897e49"},
+ {file = "dulwich-0.21.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:82f632afb9c7c341a875d46aaa3e6c5e586c7a64ce36c9544fa400f7e4f29754"},
+ {file = "dulwich-0.21.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:82cdf482f8f51fcc965ffad66180b54a9abaea9b1e985a32e1acbfedf6e0e363"},
+ {file = "dulwich-0.21.5-cp38-cp38-win32.whl", hash = "sha256:c8ded43dc0bd2e65420eb01e778034be5ca7f72e397a839167eda7dcb87c4248"},
+ {file = "dulwich-0.21.5-cp38-cp38-win_amd64.whl", hash = "sha256:2aba0fdad2a19bd5bb3aad6882580cb33359c67b48412ccd4cfccd932012b35e"},
+ {file = "dulwich-0.21.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:fd4ad079758514375f11469e081723ba8831ce4eaa1a64b41f06a3a866d5ac34"},
+ {file = "dulwich-0.21.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7fe62685bf356bfb4d0738f84a3fcf0d1fc9e11fee152e488a20b8c66a52429e"},
+ {file = "dulwich-0.21.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:aae448da7d80306dda4fc46292fed7efaa466294571ab3448be16714305076f1"},
+ {file = "dulwich-0.21.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b24cb1fad0525dba4872e9381bc576ea2a6dcdf06b0ed98f8e953e3b1d719b89"},
+ {file = "dulwich-0.21.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1e39b7c2c9bda6acae83b25054650a8bb7e373e886e2334721d384e1479bf04b"},
+ {file = "dulwich-0.21.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:26456dba39d1209fca17187db06967130e27eeecad2b3c2bbbe63467b0bf09d6"},
+ {file = "dulwich-0.21.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:281310644e02e3aa6d76bcaffe2063b9031213c4916b5f1a6e68c25bdecfaba4"},
+ {file = "dulwich-0.21.5-cp39-cp39-win32.whl", hash = "sha256:4814ca3209dabe0fe7719e9545fbdad7f8bb250c5a225964fe2a31069940c4cf"},
+ {file = "dulwich-0.21.5-cp39-cp39-win_amd64.whl", hash = "sha256:c922a4573267486be0ef85216f2da103fb38075b8465dc0e90457843884e4860"},
+ {file = "dulwich-0.21.5-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:e52b20c4368171b7d32bd3ab0f1d2402e76ad4f2ea915ff9aa73bc9fa2b54d6d"},
+ {file = "dulwich-0.21.5-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aeb736d777ee21f2117a90fc453ee181aa7eedb9e255b5ef07c51733f3fe5cb6"},
+ {file = "dulwich-0.21.5-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e8a79c1ed7166f32ad21974fa98d11bf6fd74e94a47e754c777c320e01257c6"},
+ {file = "dulwich-0.21.5-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:b943517e30bd651fbc275a892bb96774f3893d95fe5a4dedd84496a98eaaa8ab"},
+ {file = "dulwich-0.21.5-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:32493a456358a3a6c15bbda07106fc3d4cc50834ee18bc7717968d18be59b223"},
+ {file = "dulwich-0.21.5-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0aa44b812d978fc22a04531f5090c3c369d5facd03fa6e0501d460a661800c7f"},
+ {file = "dulwich-0.21.5-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6f46bcb6777e5f9f4af24a2bd029e88b77316269d24ce66be590e546a0d8f7b7"},
+ {file = "dulwich-0.21.5-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:a917fd3b4493db3716da2260f16f6b18f68d46fbe491d851d154fc0c2d984ae4"},
+ {file = "dulwich-0.21.5-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:684c52cff867d10c75a7238151ca307582b3d251bbcd6db9e9cffbc998ef804e"},
+ {file = "dulwich-0.21.5-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9019189d7a8f7394df6a22cd5b484238c5776e42282ad5d6d6c626b4c5f43597"},
+ {file = "dulwich-0.21.5-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:494024f74c2eef9988adb4352b3651ac1b6c0466176ec62b69d3d3672167ba68"},
+ {file = "dulwich-0.21.5-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:f9b6ac1b1c67fc6083c42b7b6cd3b211292c8a6517216c733caf23e8b103ab6d"},
+ {file = "dulwich-0.21.5.tar.gz", hash = "sha256:70955e4e249ddda6e34a4636b90f74e931e558f993b17c52570fa6144b993103"},
+]
+
+[package.dependencies]
+typing-extensions = {version = "*", markers = "python_version <= \"3.7\""}
+urllib3 = ">=1.25"
+
+[package.extras]
+fastimport = ["fastimport"]
+https = ["urllib3 (>=1.24.1)"]
+paramiko = ["paramiko"]
+pgp = ["gpg"]
+
+[[package]]
+name = "exceptiongroup"
+version = "1.1.1"
+description = "Backport of PEP 654 (exception groups)"
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "exceptiongroup-1.1.1-py3-none-any.whl", hash = "sha256:232c37c63e4f682982c8b6459f33a8981039e5fb8756b2074364e5055c498c9e"},
+ {file = "exceptiongroup-1.1.1.tar.gz", hash = "sha256:d484c3090ba2889ae2928419117447a14daf3c1231d5e30d0aae34f354f01785"},
+]
+
+[package.extras]
+test = ["pytest (>=6)"]
+
+[[package]]
+name = "filelock"
+version = "3.12.2"
+description = "A platform independent file lock."
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "filelock-3.12.2-py3-none-any.whl", hash = "sha256:cbb791cdea2a72f23da6ac5b5269ab0a0d161e9ef0100e653b69049a7706d1ec"},
+ {file = "filelock-3.12.2.tar.gz", hash = "sha256:002740518d8aa59a26b0c76e10fb8c6e15eae825d34b6fdf670333fd7b938d81"},
+]
+
+[package.extras]
+docs = ["furo (>=2023.5.20)", "sphinx (>=7.0.1)", "sphinx-autodoc-typehints (>=1.23,!=1.23.4)"]
+testing = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "diff-cover (>=7.5)", "pytest (>=7.3.1)", "pytest-cov (>=4.1)", "pytest-mock (>=3.10)", "pytest-timeout (>=2.1)"]
+
+[[package]]
+name = "flake8"
+version = "6.0.0"
+description = "the modular source code checker: pep8 pyflakes and co"
+category = "dev"
+optional = false
+python-versions = ">=3.8.1"
+files = [
+ {file = "flake8-6.0.0-py2.py3-none-any.whl", hash = "sha256:3833794e27ff64ea4e9cf5d410082a8b97ff1a06c16aa3d2027339cd0f1195c7"},
+ {file = "flake8-6.0.0.tar.gz", hash = "sha256:c61007e76655af75e6785a931f452915b371dc48f56efd765247c8fe68f2b181"},
+]
+
+[package.dependencies]
+mccabe = ">=0.7.0,<0.8.0"
+pycodestyle = ">=2.10.0,<2.11.0"
+pyflakes = ">=3.0.0,<3.1.0"
+
+[[package]]
+name = "frozenlist"
+version = "1.3.3"
+description = "A list-like structure which implements collections.abc.MutableSequence"
+category = "main"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "frozenlist-1.3.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ff8bf625fe85e119553b5383ba0fb6aa3d0ec2ae980295aaefa552374926b3f4"},
+ {file = "frozenlist-1.3.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:dfbac4c2dfcc082fcf8d942d1e49b6aa0766c19d3358bd86e2000bf0fa4a9cf0"},
+ {file = "frozenlist-1.3.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b1c63e8d377d039ac769cd0926558bb7068a1f7abb0f003e3717ee003ad85530"},
+ {file = "frozenlist-1.3.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7fdfc24dcfce5b48109867c13b4cb15e4660e7bd7661741a391f821f23dfdca7"},
+ {file = "frozenlist-1.3.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2c926450857408e42f0bbc295e84395722ce74bae69a3b2aa2a65fe22cb14b99"},
+ {file = "frozenlist-1.3.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1841e200fdafc3d51f974d9d377c079a0694a8f06de2e67b48150328d66d5483"},
+ {file = "frozenlist-1.3.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f470c92737afa7d4c3aacc001e335062d582053d4dbe73cda126f2d7031068dd"},
+ {file = "frozenlist-1.3.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:783263a4eaad7c49983fe4b2e7b53fa9770c136c270d2d4bbb6d2192bf4d9caf"},
+ {file = "frozenlist-1.3.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:924620eef691990dfb56dc4709f280f40baee568c794b5c1885800c3ecc69816"},
+ {file = "frozenlist-1.3.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:ae4dc05c465a08a866b7a1baf360747078b362e6a6dbeb0c57f234db0ef88ae0"},
+ {file = "frozenlist-1.3.3-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:bed331fe18f58d844d39ceb398b77d6ac0b010d571cba8267c2e7165806b00ce"},
+ {file = "frozenlist-1.3.3-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:02c9ac843e3390826a265e331105efeab489ffaf4dd86384595ee8ce6d35ae7f"},
+ {file = "frozenlist-1.3.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:9545a33965d0d377b0bc823dcabf26980e77f1b6a7caa368a365a9497fb09420"},
+ {file = "frozenlist-1.3.3-cp310-cp310-win32.whl", hash = "sha256:d5cd3ab21acbdb414bb6c31958d7b06b85eeb40f66463c264a9b343a4e238642"},
+ {file = "frozenlist-1.3.3-cp310-cp310-win_amd64.whl", hash = "sha256:b756072364347cb6aa5b60f9bc18e94b2f79632de3b0190253ad770c5df17db1"},
+ {file = "frozenlist-1.3.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:b4395e2f8d83fbe0c627b2b696acce67868793d7d9750e90e39592b3626691b7"},
+ {file = "frozenlist-1.3.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:14143ae966a6229350021384870458e4777d1eae4c28d1a7aa47f24d030e6678"},
+ {file = "frozenlist-1.3.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5d8860749e813a6f65bad8285a0520607c9500caa23fea6ee407e63debcdbef6"},
+ {file = "frozenlist-1.3.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:23d16d9f477bb55b6154654e0e74557040575d9d19fe78a161bd33d7d76808e8"},
+ {file = "frozenlist-1.3.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eb82dbba47a8318e75f679690190c10a5e1f447fbf9df41cbc4c3afd726d88cb"},
+ {file = "frozenlist-1.3.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9309869032abb23d196cb4e4db574232abe8b8be1339026f489eeb34a4acfd91"},
+ {file = "frozenlist-1.3.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a97b4fe50b5890d36300820abd305694cb865ddb7885049587a5678215782a6b"},
+ {file = "frozenlist-1.3.3-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c188512b43542b1e91cadc3c6c915a82a5eb95929134faf7fd109f14f9892ce4"},
+ {file = "frozenlist-1.3.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:303e04d422e9b911a09ad499b0368dc551e8c3cd15293c99160c7f1f07b59a48"},
+ {file = "frozenlist-1.3.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:0771aed7f596c7d73444c847a1c16288937ef988dc04fb9f7be4b2aa91db609d"},
+ {file = "frozenlist-1.3.3-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:66080ec69883597e4d026f2f71a231a1ee9887835902dbe6b6467d5a89216cf6"},
+ {file = "frozenlist-1.3.3-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:41fe21dc74ad3a779c3d73a2786bdf622ea81234bdd4faf90b8b03cad0c2c0b4"},
+ {file = "frozenlist-1.3.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f20380df709d91525e4bee04746ba612a4df0972c1b8f8e1e8af997e678c7b81"},
+ {file = "frozenlist-1.3.3-cp311-cp311-win32.whl", hash = "sha256:f30f1928162e189091cf4d9da2eac617bfe78ef907a761614ff577ef4edfb3c8"},
+ {file = "frozenlist-1.3.3-cp311-cp311-win_amd64.whl", hash = "sha256:a6394d7dadd3cfe3f4b3b186e54d5d8504d44f2d58dcc89d693698e8b7132b32"},
+ {file = "frozenlist-1.3.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8df3de3a9ab8325f94f646609a66cbeeede263910c5c0de0101079ad541af332"},
+ {file = "frozenlist-1.3.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0693c609e9742c66ba4870bcee1ad5ff35462d5ffec18710b4ac89337ff16e27"},
+ {file = "frozenlist-1.3.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cd4210baef299717db0a600d7a3cac81d46ef0e007f88c9335db79f8979c0d3d"},
+ {file = "frozenlist-1.3.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:394c9c242113bfb4b9aa36e2b80a05ffa163a30691c7b5a29eba82e937895d5e"},
+ {file = "frozenlist-1.3.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6327eb8e419f7d9c38f333cde41b9ae348bec26d840927332f17e887a8dcb70d"},
+ {file = "frozenlist-1.3.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e24900aa13212e75e5b366cb9065e78bbf3893d4baab6052d1aca10d46d944c"},
+ {file = "frozenlist-1.3.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:3843f84a6c465a36559161e6c59dce2f2ac10943040c2fd021cfb70d58c4ad56"},
+ {file = "frozenlist-1.3.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:84610c1502b2461255b4c9b7d5e9c48052601a8957cd0aea6ec7a7a1e1fb9420"},
+ {file = "frozenlist-1.3.3-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:c21b9aa40e08e4f63a2f92ff3748e6b6c84d717d033c7b3438dd3123ee18f70e"},
+ {file = "frozenlist-1.3.3-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:efce6ae830831ab6a22b9b4091d411698145cb9b8fc869e1397ccf4b4b6455cb"},
+ {file = "frozenlist-1.3.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:40de71985e9042ca00b7953c4f41eabc3dc514a2d1ff534027f091bc74416401"},
+ {file = "frozenlist-1.3.3-cp37-cp37m-win32.whl", hash = "sha256:180c00c66bde6146a860cbb81b54ee0df350d2daf13ca85b275123bbf85de18a"},
+ {file = "frozenlist-1.3.3-cp37-cp37m-win_amd64.whl", hash = "sha256:9bbbcedd75acdfecf2159663b87f1bb5cfc80e7cd99f7ddd9d66eb98b14a8411"},
+ {file = "frozenlist-1.3.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:034a5c08d36649591be1cbb10e09da9f531034acfe29275fc5454a3b101ce41a"},
+ {file = "frozenlist-1.3.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ba64dc2b3b7b158c6660d49cdb1d872d1d0bf4e42043ad8d5006099479a194e5"},
+ {file = "frozenlist-1.3.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:47df36a9fe24054b950bbc2db630d508cca3aa27ed0566c0baf661225e52c18e"},
+ {file = "frozenlist-1.3.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:008a054b75d77c995ea26629ab3a0c0d7281341f2fa7e1e85fa6153ae29ae99c"},
+ {file = "frozenlist-1.3.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:841ea19b43d438a80b4de62ac6ab21cfe6827bb8a9dc62b896acc88eaf9cecba"},
+ {file = "frozenlist-1.3.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e235688f42b36be2b6b06fc37ac2126a73b75fb8d6bc66dd632aa35286238703"},
+ {file = "frozenlist-1.3.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca713d4af15bae6e5d79b15c10c8522859a9a89d3b361a50b817c98c2fb402a2"},
+ {file = "frozenlist-1.3.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ac5995f2b408017b0be26d4a1d7c61bce106ff3d9e3324374d66b5964325448"},
+ {file = "frozenlist-1.3.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:a4ae8135b11652b08a8baf07631d3ebfe65a4c87909dbef5fa0cdde440444ee4"},
+ {file = "frozenlist-1.3.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:4ea42116ceb6bb16dbb7d526e242cb6747b08b7710d9782aa3d6732bd8d27649"},
+ {file = "frozenlist-1.3.3-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:810860bb4bdce7557bc0febb84bbd88198b9dbc2022d8eebe5b3590b2ad6c842"},
+ {file = "frozenlist-1.3.3-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:ee78feb9d293c323b59a6f2dd441b63339a30edf35abcb51187d2fc26e696d13"},
+ {file = "frozenlist-1.3.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:0af2e7c87d35b38732e810befb9d797a99279cbb85374d42ea61c1e9d23094b3"},
+ {file = "frozenlist-1.3.3-cp38-cp38-win32.whl", hash = "sha256:899c5e1928eec13fd6f6d8dc51be23f0d09c5281e40d9cf4273d188d9feeaf9b"},
+ {file = "frozenlist-1.3.3-cp38-cp38-win_amd64.whl", hash = "sha256:7f44e24fa70f6fbc74aeec3e971f60a14dde85da364aa87f15d1be94ae75aeef"},
+ {file = "frozenlist-1.3.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:2b07ae0c1edaa0a36339ec6cce700f51b14a3fc6545fdd32930d2c83917332cf"},
+ {file = "frozenlist-1.3.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ebb86518203e12e96af765ee89034a1dbb0c3c65052d1b0c19bbbd6af8a145e1"},
+ {file = "frozenlist-1.3.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5cf820485f1b4c91e0417ea0afd41ce5cf5965011b3c22c400f6d144296ccbc0"},
+ {file = "frozenlist-1.3.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c11e43016b9024240212d2a65043b70ed8dfd3b52678a1271972702d990ac6d"},
+ {file = "frozenlist-1.3.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8fa3c6e3305aa1146b59a09b32b2e04074945ffcfb2f0931836d103a2c38f936"},
+ {file = "frozenlist-1.3.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:352bd4c8c72d508778cf05ab491f6ef36149f4d0cb3c56b1b4302852255d05d5"},
+ {file = "frozenlist-1.3.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:65a5e4d3aa679610ac6e3569e865425b23b372277f89b5ef06cf2cdaf1ebf22b"},
+ {file = "frozenlist-1.3.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1e2c1185858d7e10ff045c496bbf90ae752c28b365fef2c09cf0fa309291669"},
+ {file = "frozenlist-1.3.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:f163d2fd041c630fed01bc48d28c3ed4a3b003c00acd396900e11ee5316b56bb"},
+ {file = "frozenlist-1.3.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:05cdb16d09a0832eedf770cb7bd1fe57d8cf4eaf5aced29c4e41e3f20b30a784"},
+ {file = "frozenlist-1.3.3-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:8bae29d60768bfa8fb92244b74502b18fae55a80eac13c88eb0b496d4268fd2d"},
+ {file = "frozenlist-1.3.3-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:eedab4c310c0299961ac285591acd53dc6723a1ebd90a57207c71f6e0c2153ab"},
+ {file = "frozenlist-1.3.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:3bbdf44855ed8f0fbcd102ef05ec3012d6a4fd7c7562403f76ce6a52aeffb2b1"},
+ {file = "frozenlist-1.3.3-cp39-cp39-win32.whl", hash = "sha256:efa568b885bca461f7c7b9e032655c0c143d305bf01c30caf6db2854a4532b38"},
+ {file = "frozenlist-1.3.3-cp39-cp39-win_amd64.whl", hash = "sha256:cfe33efc9cb900a4c46f91a5ceba26d6df370ffddd9ca386eb1d4f0ad97b9ea9"},
+ {file = "frozenlist-1.3.3.tar.gz", hash = "sha256:58bcc55721e8a90b88332d6cd441261ebb22342e238296bb330968952fbb3a6a"},
+]
+
+[[package]]
+name = "html5lib"
+version = "1.1"
+description = "HTML parser based on the WHATWG HTML specification"
+category = "dev"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
+files = [
+ {file = "html5lib-1.1-py2.py3-none-any.whl", hash = "sha256:0d78f8fde1c230e99fe37986a60526d7049ed4bf8a9fadbad5f00e22e58e041d"},
+ {file = "html5lib-1.1.tar.gz", hash = "sha256:b2e5b40261e20f354d198eae92afc10d750afb487ed5e50f9c4eaf07c184146f"},
+]
+
+[package.dependencies]
+six = ">=1.9"
+webencodings = "*"
+
+[package.extras]
+all = ["chardet (>=2.2)", "genshi", "lxml"]
+chardet = ["chardet (>=2.2)"]
+genshi = ["genshi"]
+lxml = ["lxml"]
+
+[[package]]
+name = "idna"
+version = "3.4"
+description = "Internationalized Domain Names in Applications (IDNA)"
+category = "main"
+optional = false
+python-versions = ">=3.5"
+files = [
+ {file = "idna-3.4-py3-none-any.whl", hash = "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"},
+ {file = "idna-3.4.tar.gz", hash = "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4"},
+]
+
+[[package]]
+name = "imagesize"
+version = "1.4.1"
+description = "Getting image size from png/jpeg/jpeg2000/gif file"
+category = "dev"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
+files = [
+ {file = "imagesize-1.4.1-py2.py3-none-any.whl", hash = "sha256:0d8d18d08f840c19d0ee7ca1fd82490fdc3729b7ac93f49870406ddde8ef8d8b"},
+ {file = "imagesize-1.4.1.tar.gz", hash = "sha256:69150444affb9cb0d5cc5a92b3676f0b2fb7cd9ae39e947a5e11a36b4497cd4a"},
+]
+
+[[package]]
+name = "importlib-metadata"
+version = "6.6.0"
+description = "Read metadata from Python packages"
+category = "main"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "importlib_metadata-6.6.0-py3-none-any.whl", hash = "sha256:43dd286a2cd8995d5eaef7fee2066340423b818ed3fd70adf0bad5f1fac53fed"},
+ {file = "importlib_metadata-6.6.0.tar.gz", hash = "sha256:92501cdf9cc66ebd3e612f1b4f0c0765dfa42f0fa38ffb319b6bd84dd675d705"},
+]
+
+[package.dependencies]
+typing-extensions = {version = ">=3.6.4", markers = "python_version < \"3.8\""}
+zipp = ">=0.5"
+
+[package.extras]
+docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
+perf = ["ipython"]
+testing = ["flake8 (<5)", "flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)"]
+
+[[package]]
+name = "importlib-resources"
+version = "5.12.0"
+description = "Read resources from Python packages"
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "importlib_resources-5.12.0-py3-none-any.whl", hash = "sha256:7b1deeebbf351c7578e09bf2f63fa2ce8b5ffec296e0d349139d43cca061a81a"},
+ {file = "importlib_resources-5.12.0.tar.gz", hash = "sha256:4be82589bf5c1d7999aedf2a45159d10cb3ca4f19b2271f8792bc8e6da7b22f6"},
+]
+
+[package.dependencies]
+zipp = {version = ">=3.1.0", markers = "python_version < \"3.10\""}
+
+[package.extras]
+docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
+testing = ["flake8 (<5)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"]
+
+[[package]]
+name = "iniconfig"
+version = "2.0.0"
+description = "brain-dead simple config-ini parsing"
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"},
+ {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"},
+]
+
+[[package]]
+name = "installer"
+version = "0.7.0"
+description = "A library for installing Python wheels."
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "installer-0.7.0-py3-none-any.whl", hash = "sha256:05d1933f0a5ba7d8d6296bb6d5018e7c94fa473ceb10cf198a92ccea19c27b53"},
+ {file = "installer-0.7.0.tar.gz", hash = "sha256:a26d3e3116289bb08216e0d0f7d925fcef0b0194eedfa0c944bcaaa106c4b631"},
+]
+
+[[package]]
+name = "isort"
+version = "4.3.21"
+description = "A Python utility / library to sort Python imports."
+category = "dev"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
+files = [
+ {file = "isort-4.3.21-py2.py3-none-any.whl", hash = "sha256:6e811fcb295968434526407adb8796944f1988c5b65e8139058f2014cbe100fd"},
+ {file = "isort-4.3.21.tar.gz", hash = "sha256:54da7e92468955c4fceacd0c86bd0ec997b0e1ee80d97f67c35a78b719dccab1"},
+]
+
+[package.extras]
+pipfile = ["pipreqs", "requirementslib"]
+pyproject = ["toml"]
+requirements = ["pip-api", "pipreqs"]
+xdg-home = ["appdirs (>=1.4.0)"]
+
+[[package]]
+name = "jaraco-classes"
+version = "3.2.3"
+description = "Utility functions for Python class constructs"
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "jaraco.classes-3.2.3-py3-none-any.whl", hash = "sha256:2353de3288bc6b82120752201c6b1c1a14b058267fa424ed5ce5984e3b922158"},
+ {file = "jaraco.classes-3.2.3.tar.gz", hash = "sha256:89559fa5c1d3c34eff6f631ad80bb21f378dbcbb35dd161fd2c6b93f5be2f98a"},
+]
+
+[package.dependencies]
+more-itertools = "*"
+
+[package.extras]
+docs = ["jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)"]
+testing = ["flake8 (<5)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"]
+
+[[package]]
+name = "jeepney"
+version = "0.8.0"
+description = "Low-level, pure Python DBus protocol wrapper."
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "jeepney-0.8.0-py3-none-any.whl", hash = "sha256:c0a454ad016ca575060802ee4d590dd912e35c122fa04e70306de3d076cce755"},
+ {file = "jeepney-0.8.0.tar.gz", hash = "sha256:5efe48d255973902f6badc3ce55e2aa6c5c3b3bc642059ef3a91247bcfcc5806"},
+]
+
+[package.extras]
+test = ["async-timeout", "pytest", "pytest-asyncio (>=0.17)", "pytest-trio", "testpath", "trio"]
+trio = ["async_generator", "trio"]
+
+[[package]]
+name = "jinja2"
+version = "3.1.2"
+description = "A very fast and expressive template engine."
+category = "main"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "Jinja2-3.1.2-py3-none-any.whl", hash = "sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61"},
+ {file = "Jinja2-3.1.2.tar.gz", hash = "sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852"},
+]
+
+[package.dependencies]
+MarkupSafe = ">=2.0"
+
+[package.extras]
+i18n = ["Babel (>=2.7)"]
+
+[[package]]
+name = "jsonschema"
+version = "4.17.3"
+description = "An implementation of JSON Schema validation for Python"
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "jsonschema-4.17.3-py3-none-any.whl", hash = "sha256:a870ad254da1a8ca84b6a2905cac29d265f805acc57af304784962a2aa6508f6"},
+ {file = "jsonschema-4.17.3.tar.gz", hash = "sha256:0f864437ab8b6076ba6707453ef8f98a6a0d512a80e93f8abdb676f737ecb60d"},
+]
+
+[package.dependencies]
+attrs = ">=17.4.0"
+importlib-metadata = {version = "*", markers = "python_version < \"3.8\""}
+importlib-resources = {version = ">=1.4.0", markers = "python_version < \"3.9\""}
+pkgutil-resolve-name = {version = ">=1.3.10", markers = "python_version < \"3.9\""}
+pyrsistent = ">=0.14.0,<0.17.0 || >0.17.0,<0.17.1 || >0.17.1,<0.17.2 || >0.17.2"
+typing-extensions = {version = "*", markers = "python_version < \"3.8\""}
+
+[package.extras]
+format = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3987", "uri-template", "webcolors (>=1.11)"]
+format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=1.11)"]
+
+[[package]]
+name = "keyring"
+version = "23.13.1"
+description = "Store and access your passwords safely."
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "keyring-23.13.1-py3-none-any.whl", hash = "sha256:771ed2a91909389ed6148631de678f82ddc73737d85a927f382a8a1b157898cd"},
+ {file = "keyring-23.13.1.tar.gz", hash = "sha256:ba2e15a9b35e21908d0aaf4e0a47acc52d6ae33444df0da2b49d41a46ef6d678"},
+]
+
+[package.dependencies]
+importlib-metadata = {version = ">=4.11.4", markers = "python_version < \"3.12\""}
+importlib-resources = {version = "*", markers = "python_version < \"3.9\""}
+"jaraco.classes" = "*"
+jeepney = {version = ">=0.4.2", markers = "sys_platform == \"linux\""}
+pywin32-ctypes = {version = ">=0.2.0", markers = "sys_platform == \"win32\""}
+SecretStorage = {version = ">=3.2", markers = "sys_platform == \"linux\""}
+
+[package.extras]
+completion = ["shtab"]
+docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)"]
+testing = ["flake8 (<5)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"]
+
+[[package]]
+name = "lazy-object-proxy"
+version = "1.9.0"
+description = "A fast and thorough lazy object proxy."
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "lazy-object-proxy-1.9.0.tar.gz", hash = "sha256:659fb5809fa4629b8a1ac5106f669cfc7bef26fbb389dda53b3e010d1ac4ebae"},
+ {file = "lazy_object_proxy-1.9.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b40387277b0ed2d0602b8293b94d7257e17d1479e257b4de114ea11a8cb7f2d7"},
+ {file = "lazy_object_proxy-1.9.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e8c6cfb338b133fbdbc5cfaa10fe3c6aeea827db80c978dbd13bc9dd8526b7d4"},
+ {file = "lazy_object_proxy-1.9.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:721532711daa7db0d8b779b0bb0318fa87af1c10d7fe5e52ef30f8eff254d0cd"},
+ {file = "lazy_object_proxy-1.9.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:66a3de4a3ec06cd8af3f61b8e1ec67614fbb7c995d02fa224813cb7afefee701"},
+ {file = "lazy_object_proxy-1.9.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:1aa3de4088c89a1b69f8ec0dcc169aa725b0ff017899ac568fe44ddc1396df46"},
+ {file = "lazy_object_proxy-1.9.0-cp310-cp310-win32.whl", hash = "sha256:f0705c376533ed2a9e5e97aacdbfe04cecd71e0aa84c7c0595d02ef93b6e4455"},
+ {file = "lazy_object_proxy-1.9.0-cp310-cp310-win_amd64.whl", hash = "sha256:ea806fd4c37bf7e7ad82537b0757999264d5f70c45468447bb2b91afdbe73a6e"},
+ {file = "lazy_object_proxy-1.9.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:946d27deaff6cf8452ed0dba83ba38839a87f4f7a9732e8f9fd4107b21e6ff07"},
+ {file = "lazy_object_proxy-1.9.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79a31b086e7e68b24b99b23d57723ef7e2c6d81ed21007b6281ebcd1688acb0a"},
+ {file = "lazy_object_proxy-1.9.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f699ac1c768270c9e384e4cbd268d6e67aebcfae6cd623b4d7c3bfde5a35db59"},
+ {file = "lazy_object_proxy-1.9.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:bfb38f9ffb53b942f2b5954e0f610f1e721ccebe9cce9025a38c8ccf4a5183a4"},
+ {file = "lazy_object_proxy-1.9.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:189bbd5d41ae7a498397287c408617fe5c48633e7755287b21d741f7db2706a9"},
+ {file = "lazy_object_proxy-1.9.0-cp311-cp311-win32.whl", hash = "sha256:81fc4d08b062b535d95c9ea70dbe8a335c45c04029878e62d744bdced5141586"},
+ {file = "lazy_object_proxy-1.9.0-cp311-cp311-win_amd64.whl", hash = "sha256:f2457189d8257dd41ae9b434ba33298aec198e30adf2dcdaaa3a28b9994f6adb"},
+ {file = "lazy_object_proxy-1.9.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:d9e25ef10a39e8afe59a5c348a4dbf29b4868ab76269f81ce1674494e2565a6e"},
+ {file = "lazy_object_proxy-1.9.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cbf9b082426036e19c6924a9ce90c740a9861e2bdc27a4834fd0a910742ac1e8"},
+ {file = "lazy_object_proxy-1.9.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f5fa4a61ce2438267163891961cfd5e32ec97a2c444e5b842d574251ade27d2"},
+ {file = "lazy_object_proxy-1.9.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:8fa02eaab317b1e9e03f69aab1f91e120e7899b392c4fc19807a8278a07a97e8"},
+ {file = "lazy_object_proxy-1.9.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:e7c21c95cae3c05c14aafffe2865bbd5e377cfc1348c4f7751d9dc9a48ca4bda"},
+ {file = "lazy_object_proxy-1.9.0-cp37-cp37m-win32.whl", hash = "sha256:f12ad7126ae0c98d601a7ee504c1122bcef553d1d5e0c3bfa77b16b3968d2734"},
+ {file = "lazy_object_proxy-1.9.0-cp37-cp37m-win_amd64.whl", hash = "sha256:edd20c5a55acb67c7ed471fa2b5fb66cb17f61430b7a6b9c3b4a1e40293b1671"},
+ {file = "lazy_object_proxy-1.9.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2d0daa332786cf3bb49e10dc6a17a52f6a8f9601b4cf5c295a4f85854d61de63"},
+ {file = "lazy_object_proxy-1.9.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9cd077f3d04a58e83d04b20e334f678c2b0ff9879b9375ed107d5d07ff160171"},
+ {file = "lazy_object_proxy-1.9.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:660c94ea760b3ce47d1855a30984c78327500493d396eac4dfd8bd82041b22be"},
+ {file = "lazy_object_proxy-1.9.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:212774e4dfa851e74d393a2370871e174d7ff0ebc980907723bb67d25c8a7c30"},
+ {file = "lazy_object_proxy-1.9.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:f0117049dd1d5635bbff65444496c90e0baa48ea405125c088e93d9cf4525b11"},
+ {file = "lazy_object_proxy-1.9.0-cp38-cp38-win32.whl", hash = "sha256:0a891e4e41b54fd5b8313b96399f8b0e173bbbfc03c7631f01efbe29bb0bcf82"},
+ {file = "lazy_object_proxy-1.9.0-cp38-cp38-win_amd64.whl", hash = "sha256:9990d8e71b9f6488e91ad25f322898c136b008d87bf852ff65391b004da5e17b"},
+ {file = "lazy_object_proxy-1.9.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9e7551208b2aded9c1447453ee366f1c4070602b3d932ace044715d89666899b"},
+ {file = "lazy_object_proxy-1.9.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f83ac4d83ef0ab017683d715ed356e30dd48a93746309c8f3517e1287523ef4"},
+ {file = "lazy_object_proxy-1.9.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7322c3d6f1766d4ef1e51a465f47955f1e8123caee67dd641e67d539a534d006"},
+ {file = "lazy_object_proxy-1.9.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:18b78ec83edbbeb69efdc0e9c1cb41a3b1b1ed11ddd8ded602464c3fc6020494"},
+ {file = "lazy_object_proxy-1.9.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:09763491ce220c0299688940f8dc2c5d05fd1f45af1e42e636b2e8b2303e4382"},
+ {file = "lazy_object_proxy-1.9.0-cp39-cp39-win32.whl", hash = "sha256:9090d8e53235aa280fc9239a86ae3ea8ac58eff66a705fa6aa2ec4968b95c821"},
+ {file = "lazy_object_proxy-1.9.0-cp39-cp39-win_amd64.whl", hash = "sha256:db1c1722726f47e10e0b5fdbf15ac3b8adb58c091d12b3ab713965795036985f"},
+]
+
+[[package]]
+name = "lockfile"
+version = "0.12.2"
+description = "Platform-independent file locking module"
+category = "dev"
+optional = false
+python-versions = "*"
+files = [
+ {file = "lockfile-0.12.2-py2.py3-none-any.whl", hash = "sha256:6c3cb24f344923d30b2785d5ad75182c8ea7ac1b6171b08657258ec7429d50fa"},
+ {file = "lockfile-0.12.2.tar.gz", hash = "sha256:6aed02de03cba24efabcd600b30540140634fc06cfa603822d508d5361e9f799"},
+]
+
+[[package]]
+name = "markupsafe"
+version = "2.1.3"
+description = "Safely add untrusted strings to HTML/XML markup."
+category = "main"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "MarkupSafe-2.1.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:cd0f502fe016460680cd20aaa5a76d241d6f35a1c3350c474bac1273803893fa"},
+ {file = "MarkupSafe-2.1.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e09031c87a1e51556fdcb46e5bd4f59dfb743061cf93c4d6831bf894f125eb57"},
+ {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68e78619a61ecf91e76aa3e6e8e33fc4894a2bebe93410754bd28fce0a8a4f9f"},
+ {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65c1a9bcdadc6c28eecee2c119465aebff8f7a584dd719facdd9e825ec61ab52"},
+ {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:525808b8019e36eb524b8c68acdd63a37e75714eac50e988180b169d64480a00"},
+ {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:962f82a3086483f5e5f64dbad880d31038b698494799b097bc59c2edf392fce6"},
+ {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:aa7bd130efab1c280bed0f45501b7c8795f9fdbeb02e965371bbef3523627779"},
+ {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c9c804664ebe8f83a211cace637506669e7890fec1b4195b505c214e50dd4eb7"},
+ {file = "MarkupSafe-2.1.3-cp310-cp310-win32.whl", hash = "sha256:10bbfe99883db80bdbaff2dcf681dfc6533a614f700da1287707e8a5d78a8431"},
+ {file = "MarkupSafe-2.1.3-cp310-cp310-win_amd64.whl", hash = "sha256:1577735524cdad32f9f694208aa75e422adba74f1baee7551620e43a3141f559"},
+ {file = "MarkupSafe-2.1.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ad9e82fb8f09ade1c3e1b996a6337afac2b8b9e365f926f5a61aacc71adc5b3c"},
+ {file = "MarkupSafe-2.1.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3c0fae6c3be832a0a0473ac912810b2877c8cb9d76ca48de1ed31e1c68386575"},
+ {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b076b6226fb84157e3f7c971a47ff3a679d837cf338547532ab866c57930dbee"},
+ {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bfce63a9e7834b12b87c64d6b155fdd9b3b96191b6bd334bf37db7ff1fe457f2"},
+ {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:338ae27d6b8745585f87218a3f23f1512dbf52c26c28e322dbe54bcede54ccb9"},
+ {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e4dd52d80b8c83fdce44e12478ad2e85c64ea965e75d66dbeafb0a3e77308fcc"},
+ {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:df0be2b576a7abbf737b1575f048c23fb1d769f267ec4358296f31c2479db8f9"},
+ {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac"},
+ {file = "MarkupSafe-2.1.3-cp311-cp311-win32.whl", hash = "sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb"},
+ {file = "MarkupSafe-2.1.3-cp311-cp311-win_amd64.whl", hash = "sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686"},
+ {file = "MarkupSafe-2.1.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2"},
+ {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b"},
+ {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707"},
+ {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca379055a47383d02a5400cb0d110cef0a776fc644cda797db0c5696cfd7e18e"},
+ {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:b7ff0f54cb4ff66dd38bebd335a38e2c22c41a8ee45aa608efc890ac3e3931bc"},
+ {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:c011a4149cfbcf9f03994ec2edffcb8b1dc2d2aede7ca243746df97a5d41ce48"},
+ {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:56d9f2ecac662ca1611d183feb03a3fa4406469dafe241673d521dd5ae92a155"},
+ {file = "MarkupSafe-2.1.3-cp37-cp37m-win32.whl", hash = "sha256:8758846a7e80910096950b67071243da3e5a20ed2546e6392603c096778d48e0"},
+ {file = "MarkupSafe-2.1.3-cp37-cp37m-win_amd64.whl", hash = "sha256:787003c0ddb00500e49a10f2844fac87aa6ce977b90b0feaaf9de23c22508b24"},
+ {file = "MarkupSafe-2.1.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:2ef12179d3a291be237280175b542c07a36e7f60718296278d8593d21ca937d4"},
+ {file = "MarkupSafe-2.1.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2c1b19b3aaacc6e57b7e25710ff571c24d6c3613a45e905b1fde04d691b98ee0"},
+ {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8afafd99945ead6e075b973fefa56379c5b5c53fd8937dad92c662da5d8fd5ee"},
+ {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8c41976a29d078bb235fea9b2ecd3da465df42a562910f9022f1a03107bd02be"},
+ {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d080e0a5eb2529460b30190fcfcc4199bd7f827663f858a226a81bc27beaa97e"},
+ {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:69c0f17e9f5a7afdf2cc9fb2d1ce6aabdb3bafb7f38017c0b77862bcec2bbad8"},
+ {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:504b320cd4b7eff6f968eddf81127112db685e81f7e36e75f9f84f0df46041c3"},
+ {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:42de32b22b6b804f42c5d98be4f7e5e977ecdd9ee9b660fda1a3edf03b11792d"},
+ {file = "MarkupSafe-2.1.3-cp38-cp38-win32.whl", hash = "sha256:ceb01949af7121f9fc39f7d27f91be8546f3fb112c608bc4029aef0bab86a2a5"},
+ {file = "MarkupSafe-2.1.3-cp38-cp38-win_amd64.whl", hash = "sha256:1b40069d487e7edb2676d3fbdb2b0829ffa2cd63a2ec26c4938b2d34391b4ecc"},
+ {file = "MarkupSafe-2.1.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:8023faf4e01efadfa183e863fefde0046de576c6f14659e8782065bcece22198"},
+ {file = "MarkupSafe-2.1.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6b2b56950d93e41f33b4223ead100ea0fe11f8e6ee5f641eb753ce4b77a7042b"},
+ {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9dcdfd0eaf283af041973bff14a2e143b8bd64e069f4c383416ecd79a81aab58"},
+ {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:05fb21170423db021895e1ea1e1f3ab3adb85d1c2333cbc2310f2a26bc77272e"},
+ {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:282c2cb35b5b673bbcadb33a585408104df04f14b2d9b01d4c345a3b92861c2c"},
+ {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ab4a0df41e7c16a1392727727e7998a467472d0ad65f3ad5e6e765015df08636"},
+ {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7ef3cb2ebbf91e330e3bb937efada0edd9003683db6b57bb108c4001f37a02ea"},
+ {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:0a4e4a1aff6c7ac4cd55792abf96c915634c2b97e3cc1c7129578aa68ebd754e"},
+ {file = "MarkupSafe-2.1.3-cp39-cp39-win32.whl", hash = "sha256:fec21693218efe39aa7f8599346e90c705afa52c5b31ae019b2e57e8f6542bb2"},
+ {file = "MarkupSafe-2.1.3-cp39-cp39-win_amd64.whl", hash = "sha256:3fd4abcb888d15a94f32b75d8fd18ee162ca0c064f35b11134be77050296d6ba"},
+ {file = "MarkupSafe-2.1.3.tar.gz", hash = "sha256:af598ed32d6ae86f1b747b82783958b1a4ab8f617b06fe68795c7f026abbdcad"},
+]
+
+[[package]]
+name = "mccabe"
+version = "0.7.0"
+description = "McCabe checker, plugin for flake8"
+category = "dev"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e"},
+ {file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"},
+]
+
+[[package]]
+name = "more-itertools"
+version = "9.1.0"
+description = "More routines for operating on iterables, beyond itertools"
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "more-itertools-9.1.0.tar.gz", hash = "sha256:cabaa341ad0389ea83c17a94566a53ae4c9d07349861ecb14dc6d0345cf9ac5d"},
+ {file = "more_itertools-9.1.0-py3-none-any.whl", hash = "sha256:d2bc7f02446e86a68911e58ded76d6561eea00cddfb2a91e7019bbb586c799f3"},
+]
+
+[[package]]
+name = "msgpack"
+version = "1.0.5"
+description = "MessagePack serializer"
+category = "dev"
+optional = false
+python-versions = "*"
+files = [
+ {file = "msgpack-1.0.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:525228efd79bb831cf6830a732e2e80bc1b05436b086d4264814b4b2955b2fa9"},
+ {file = "msgpack-1.0.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4f8d8b3bf1ff2672567d6b5c725a1b347fe838b912772aa8ae2bf70338d5a198"},
+ {file = "msgpack-1.0.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:cdc793c50be3f01106245a61b739328f7dccc2c648b501e237f0699fe1395b81"},
+ {file = "msgpack-1.0.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5cb47c21a8a65b165ce29f2bec852790cbc04936f502966768e4aae9fa763cb7"},
+ {file = "msgpack-1.0.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e42b9594cc3bf4d838d67d6ed62b9e59e201862a25e9a157019e171fbe672dd3"},
+ {file = "msgpack-1.0.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:55b56a24893105dc52c1253649b60f475f36b3aa0fc66115bffafb624d7cb30b"},
+ {file = "msgpack-1.0.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:1967f6129fc50a43bfe0951c35acbb729be89a55d849fab7686004da85103f1c"},
+ {file = "msgpack-1.0.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:20a97bf595a232c3ee6d57ddaadd5453d174a52594bf9c21d10407e2a2d9b3bd"},
+ {file = "msgpack-1.0.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:d25dd59bbbbb996eacf7be6b4ad082ed7eacc4e8f3d2df1ba43822da9bfa122a"},
+ {file = "msgpack-1.0.5-cp310-cp310-win32.whl", hash = "sha256:382b2c77589331f2cb80b67cc058c00f225e19827dbc818d700f61513ab47bea"},
+ {file = "msgpack-1.0.5-cp310-cp310-win_amd64.whl", hash = "sha256:4867aa2df9e2a5fa5f76d7d5565d25ec76e84c106b55509e78c1ede0f152659a"},
+ {file = "msgpack-1.0.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9f5ae84c5c8a857ec44dc180a8b0cc08238e021f57abdf51a8182e915e6299f0"},
+ {file = "msgpack-1.0.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:9e6ca5d5699bcd89ae605c150aee83b5321f2115695e741b99618f4856c50898"},
+ {file = "msgpack-1.0.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5494ea30d517a3576749cad32fa27f7585c65f5f38309c88c6d137877fa28a5a"},
+ {file = "msgpack-1.0.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1ab2f3331cb1b54165976a9d976cb251a83183631c88076613c6c780f0d6e45a"},
+ {file = "msgpack-1.0.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:28592e20bbb1620848256ebc105fc420436af59515793ed27d5c77a217477705"},
+ {file = "msgpack-1.0.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fe5c63197c55bce6385d9aee16c4d0641684628f63ace85f73571e65ad1c1e8d"},
+ {file = "msgpack-1.0.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ed40e926fa2f297e8a653c954b732f125ef97bdd4c889f243182299de27e2aa9"},
+ {file = "msgpack-1.0.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:b2de4c1c0538dcb7010902a2b97f4e00fc4ddf2c8cda9749af0e594d3b7fa3d7"},
+ {file = "msgpack-1.0.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:bf22a83f973b50f9d38e55c6aade04c41ddda19b00c4ebc558930d78eecc64ed"},
+ {file = "msgpack-1.0.5-cp311-cp311-win32.whl", hash = "sha256:c396e2cc213d12ce017b686e0f53497f94f8ba2b24799c25d913d46c08ec422c"},
+ {file = "msgpack-1.0.5-cp311-cp311-win_amd64.whl", hash = "sha256:6c4c68d87497f66f96d50142a2b73b97972130d93677ce930718f68828b382e2"},
+ {file = "msgpack-1.0.5-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:a2b031c2e9b9af485d5e3c4520f4220d74f4d222a5b8dc8c1a3ab9448ca79c57"},
+ {file = "msgpack-1.0.5-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4f837b93669ce4336e24d08286c38761132bc7ab29782727f8557e1eb21b2080"},
+ {file = "msgpack-1.0.5-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1d46dfe3832660f53b13b925d4e0fa1432b00f5f7210eb3ad3bb9a13c6204a6"},
+ {file = "msgpack-1.0.5-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:366c9a7b9057e1547f4ad51d8facad8b406bab69c7d72c0eb6f529cf76d4b85f"},
+ {file = "msgpack-1.0.5-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:4c075728a1095efd0634a7dccb06204919a2f67d1893b6aa8e00497258bf926c"},
+ {file = "msgpack-1.0.5-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:f933bbda5a3ee63b8834179096923b094b76f0c7a73c1cfe8f07ad608c58844b"},
+ {file = "msgpack-1.0.5-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:36961b0568c36027c76e2ae3ca1132e35123dcec0706c4b7992683cc26c1320c"},
+ {file = "msgpack-1.0.5-cp36-cp36m-win32.whl", hash = "sha256:b5ef2f015b95f912c2fcab19c36814963b5463f1fb9049846994b007962743e9"},
+ {file = "msgpack-1.0.5-cp36-cp36m-win_amd64.whl", hash = "sha256:288e32b47e67f7b171f86b030e527e302c91bd3f40fd9033483f2cacc37f327a"},
+ {file = "msgpack-1.0.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:137850656634abddfb88236008339fdaba3178f4751b28f270d2ebe77a563b6c"},
+ {file = "msgpack-1.0.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0c05a4a96585525916b109bb85f8cb6511db1c6f5b9d9cbcbc940dc6b4be944b"},
+ {file = "msgpack-1.0.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:56a62ec00b636583e5cb6ad313bbed36bb7ead5fa3a3e38938503142c72cba4f"},
+ {file = "msgpack-1.0.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ef8108f8dedf204bb7b42994abf93882da1159728a2d4c5e82012edd92c9da9f"},
+ {file = "msgpack-1.0.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1835c84d65f46900920b3708f5ba829fb19b1096c1800ad60bae8418652a951d"},
+ {file = "msgpack-1.0.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:e57916ef1bd0fee4f21c4600e9d1da352d8816b52a599c46460e93a6e9f17086"},
+ {file = "msgpack-1.0.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:17358523b85973e5f242ad74aa4712b7ee560715562554aa2134d96e7aa4cbbf"},
+ {file = "msgpack-1.0.5-cp37-cp37m-win32.whl", hash = "sha256:cb5aaa8c17760909ec6cb15e744c3ebc2ca8918e727216e79607b7bbce9c8f77"},
+ {file = "msgpack-1.0.5-cp37-cp37m-win_amd64.whl", hash = "sha256:ab31e908d8424d55601ad7075e471b7d0140d4d3dd3272daf39c5c19d936bd82"},
+ {file = "msgpack-1.0.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:b72d0698f86e8d9ddf9442bdedec15b71df3598199ba33322d9711a19f08145c"},
+ {file = "msgpack-1.0.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:379026812e49258016dd84ad79ac8446922234d498058ae1d415f04b522d5b2d"},
+ {file = "msgpack-1.0.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:332360ff25469c346a1c5e47cbe2a725517919892eda5cfaffe6046656f0b7bb"},
+ {file = "msgpack-1.0.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:476a8fe8fae289fdf273d6d2a6cb6e35b5a58541693e8f9f019bfe990a51e4ba"},
+ {file = "msgpack-1.0.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9985b214f33311df47e274eb788a5893a761d025e2b92c723ba4c63936b69b1"},
+ {file = "msgpack-1.0.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:48296af57cdb1d885843afd73c4656be5c76c0c6328db3440c9601a98f303d87"},
+ {file = "msgpack-1.0.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:addab7e2e1fcc04bd08e4eb631c2a90960c340e40dfc4a5e24d2ff0d5a3b3edb"},
+ {file = "msgpack-1.0.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:916723458c25dfb77ff07f4c66aed34e47503b2eb3188b3adbec8d8aa6e00f48"},
+ {file = "msgpack-1.0.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:821c7e677cc6acf0fd3f7ac664c98803827ae6de594a9f99563e48c5a2f27eb0"},
+ {file = "msgpack-1.0.5-cp38-cp38-win32.whl", hash = "sha256:1c0f7c47f0087ffda62961d425e4407961a7ffd2aa004c81b9c07d9269512f6e"},
+ {file = "msgpack-1.0.5-cp38-cp38-win_amd64.whl", hash = "sha256:bae7de2026cbfe3782c8b78b0db9cbfc5455e079f1937cb0ab8d133496ac55e1"},
+ {file = "msgpack-1.0.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:20c784e66b613c7f16f632e7b5e8a1651aa5702463d61394671ba07b2fc9e025"},
+ {file = "msgpack-1.0.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:266fa4202c0eb94d26822d9bfd7af25d1e2c088927fe8de9033d929dd5ba24c5"},
+ {file = "msgpack-1.0.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:18334484eafc2b1aa47a6d42427da7fa8f2ab3d60b674120bce7a895a0a85bdd"},
+ {file = "msgpack-1.0.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:57e1f3528bd95cc44684beda696f74d3aaa8a5e58c816214b9046512240ef437"},
+ {file = "msgpack-1.0.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:586d0d636f9a628ddc6a17bfd45aa5b5efaf1606d2b60fa5d87b8986326e933f"},
+ {file = "msgpack-1.0.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a740fa0e4087a734455f0fc3abf5e746004c9da72fbd541e9b113013c8dc3282"},
+ {file = "msgpack-1.0.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:3055b0455e45810820db1f29d900bf39466df96ddca11dfa6d074fa47054376d"},
+ {file = "msgpack-1.0.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:a61215eac016f391129a013c9e46f3ab308db5f5ec9f25811e811f96962599a8"},
+ {file = "msgpack-1.0.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:362d9655cd369b08fda06b6657a303eb7172d5279997abe094512e919cf74b11"},
+ {file = "msgpack-1.0.5-cp39-cp39-win32.whl", hash = "sha256:ac9dd47af78cae935901a9a500104e2dea2e253207c924cc95de149606dc43cc"},
+ {file = "msgpack-1.0.5-cp39-cp39-win_amd64.whl", hash = "sha256:06f5174b5f8ed0ed919da0e62cbd4ffde676a374aba4020034da05fab67b9164"},
+ {file = "msgpack-1.0.5.tar.gz", hash = "sha256:c075544284eadc5cddc70f4757331d99dcbc16b2bbd4849d15f8aae4cf36d31c"},
+]
+
+[[package]]
+name = "multidict"
+version = "6.0.4"
+description = "multidict implementation"
+category = "main"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "multidict-6.0.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0b1a97283e0c85772d613878028fec909f003993e1007eafa715b24b377cb9b8"},
+ {file = "multidict-6.0.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:eeb6dcc05e911516ae3d1f207d4b0520d07f54484c49dfc294d6e7d63b734171"},
+ {file = "multidict-6.0.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d6d635d5209b82a3492508cf5b365f3446afb65ae7ebd755e70e18f287b0adf7"},
+ {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c048099e4c9e9d615545e2001d3d8a4380bd403e1a0578734e0d31703d1b0c0b"},
+ {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ea20853c6dbbb53ed34cb4d080382169b6f4554d394015f1bef35e881bf83547"},
+ {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:16d232d4e5396c2efbbf4f6d4df89bfa905eb0d4dc5b3549d872ab898451f569"},
+ {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:36c63aaa167f6c6b04ef2c85704e93af16c11d20de1d133e39de6a0e84582a93"},
+ {file = "multidict-6.0.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:64bdf1086b6043bf519869678f5f2757f473dee970d7abf6da91ec00acb9cb98"},
+ {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:43644e38f42e3af682690876cff722d301ac585c5b9e1eacc013b7a3f7b696a0"},
+ {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:7582a1d1030e15422262de9f58711774e02fa80df0d1578995c76214f6954988"},
+ {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:ddff9c4e225a63a5afab9dd15590432c22e8057e1a9a13d28ed128ecf047bbdc"},
+ {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:ee2a1ece51b9b9e7752e742cfb661d2a29e7bcdba2d27e66e28a99f1890e4fa0"},
+ {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a2e4369eb3d47d2034032a26c7a80fcb21a2cb22e1173d761a162f11e562caa5"},
+ {file = "multidict-6.0.4-cp310-cp310-win32.whl", hash = "sha256:574b7eae1ab267e5f8285f0fe881f17efe4b98c39a40858247720935b893bba8"},
+ {file = "multidict-6.0.4-cp310-cp310-win_amd64.whl", hash = "sha256:4dcbb0906e38440fa3e325df2359ac6cb043df8e58c965bb45f4e406ecb162cc"},
+ {file = "multidict-6.0.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0dfad7a5a1e39c53ed00d2dd0c2e36aed4650936dc18fd9a1826a5ae1cad6f03"},
+ {file = "multidict-6.0.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:64da238a09d6039e3bd39bb3aee9c21a5e34f28bfa5aa22518581f910ff94af3"},
+ {file = "multidict-6.0.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ff959bee35038c4624250473988b24f846cbeb2c6639de3602c073f10410ceba"},
+ {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:01a3a55bd90018c9c080fbb0b9f4891db37d148a0a18722b42f94694f8b6d4c9"},
+ {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c5cb09abb18c1ea940fb99360ea0396f34d46566f157122c92dfa069d3e0e982"},
+ {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:666daae833559deb2d609afa4490b85830ab0dfca811a98b70a205621a6109fe"},
+ {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11bdf3f5e1518b24530b8241529d2050014c884cf18b6fc69c0c2b30ca248710"},
+ {file = "multidict-6.0.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7d18748f2d30f94f498e852c67d61261c643b349b9d2a581131725595c45ec6c"},
+ {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:458f37be2d9e4c95e2d8866a851663cbc76e865b78395090786f6cd9b3bbf4f4"},
+ {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:b1a2eeedcead3a41694130495593a559a668f382eee0727352b9a41e1c45759a"},
+ {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:7d6ae9d593ef8641544d6263c7fa6408cc90370c8cb2bbb65f8d43e5b0351d9c"},
+ {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:5979b5632c3e3534e42ca6ff856bb24b2e3071b37861c2c727ce220d80eee9ed"},
+ {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:dcfe792765fab89c365123c81046ad4103fcabbc4f56d1c1997e6715e8015461"},
+ {file = "multidict-6.0.4-cp311-cp311-win32.whl", hash = "sha256:3601a3cece3819534b11d4efc1eb76047488fddd0c85a3948099d5da4d504636"},
+ {file = "multidict-6.0.4-cp311-cp311-win_amd64.whl", hash = "sha256:81a4f0b34bd92df3da93315c6a59034df95866014ac08535fc819f043bfd51f0"},
+ {file = "multidict-6.0.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:67040058f37a2a51ed8ea8f6b0e6ee5bd78ca67f169ce6122f3e2ec80dfe9b78"},
+ {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:853888594621e6604c978ce2a0444a1e6e70c8d253ab65ba11657659dcc9100f"},
+ {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:39ff62e7d0f26c248b15e364517a72932a611a9b75f35b45be078d81bdb86603"},
+ {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:af048912e045a2dc732847d33821a9d84ba553f5c5f028adbd364dd4765092ac"},
+ {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1e8b901e607795ec06c9e42530788c45ac21ef3aaa11dbd0c69de543bfb79a9"},
+ {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:62501642008a8b9871ddfccbf83e4222cf8ac0d5aeedf73da36153ef2ec222d2"},
+ {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:99b76c052e9f1bc0721f7541e5e8c05db3941eb9ebe7b8553c625ef88d6eefde"},
+ {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:509eac6cf09c794aa27bcacfd4d62c885cce62bef7b2c3e8b2e49d365b5003fe"},
+ {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:21a12c4eb6ddc9952c415f24eef97e3e55ba3af61f67c7bc388dcdec1404a067"},
+ {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:5cad9430ab3e2e4fa4a2ef4450f548768400a2ac635841bc2a56a2052cdbeb87"},
+ {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:ab55edc2e84460694295f401215f4a58597f8f7c9466faec545093045476327d"},
+ {file = "multidict-6.0.4-cp37-cp37m-win32.whl", hash = "sha256:5a4dcf02b908c3b8b17a45fb0f15b695bf117a67b76b7ad18b73cf8e92608775"},
+ {file = "multidict-6.0.4-cp37-cp37m-win_amd64.whl", hash = "sha256:6ed5f161328b7df384d71b07317f4d8656434e34591f20552c7bcef27b0ab88e"},
+ {file = "multidict-6.0.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5fc1b16f586f049820c5c5b17bb4ee7583092fa0d1c4e28b5239181ff9532e0c"},
+ {file = "multidict-6.0.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1502e24330eb681bdaa3eb70d6358e818e8e8f908a22a1851dfd4e15bc2f8161"},
+ {file = "multidict-6.0.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b692f419760c0e65d060959df05f2a531945af31fda0c8a3b3195d4efd06de11"},
+ {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45e1ecb0379bfaab5eef059f50115b54571acfbe422a14f668fc8c27ba410e7e"},
+ {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ddd3915998d93fbcd2566ddf9cf62cdb35c9e093075f862935573d265cf8f65d"},
+ {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:59d43b61c59d82f2effb39a93c48b845efe23a3852d201ed2d24ba830d0b4cf2"},
+ {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cc8e1d0c705233c5dd0c5e6460fbad7827d5d36f310a0fadfd45cc3029762258"},
+ {file = "multidict-6.0.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d6aa0418fcc838522256761b3415822626f866758ee0bc6632c9486b179d0b52"},
+ {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6748717bb10339c4760c1e63da040f5f29f5ed6e59d76daee30305894069a660"},
+ {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:4d1a3d7ef5e96b1c9e92f973e43aa5e5b96c659c9bc3124acbbd81b0b9c8a951"},
+ {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4372381634485bec7e46718edc71528024fcdc6f835baefe517b34a33c731d60"},
+ {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:fc35cb4676846ef752816d5be2193a1e8367b4c1397b74a565a9d0389c433a1d"},
+ {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:4b9d9e4e2b37daddb5c23ea33a3417901fa7c7b3dee2d855f63ee67a0b21e5b1"},
+ {file = "multidict-6.0.4-cp38-cp38-win32.whl", hash = "sha256:e41b7e2b59679edfa309e8db64fdf22399eec4b0b24694e1b2104fb789207779"},
+ {file = "multidict-6.0.4-cp38-cp38-win_amd64.whl", hash = "sha256:d6c254ba6e45d8e72739281ebc46ea5eb5f101234f3ce171f0e9f5cc86991480"},
+ {file = "multidict-6.0.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:16ab77bbeb596e14212e7bab8429f24c1579234a3a462105cda4a66904998664"},
+ {file = "multidict-6.0.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bc779e9e6f7fda81b3f9aa58e3a6091d49ad528b11ed19f6621408806204ad35"},
+ {file = "multidict-6.0.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4ceef517eca3e03c1cceb22030a3e39cb399ac86bff4e426d4fc6ae49052cc60"},
+ {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:281af09f488903fde97923c7744bb001a9b23b039a909460d0f14edc7bf59706"},
+ {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:52f2dffc8acaba9a2f27174c41c9e57f60b907bb9f096b36b1a1f3be71c6284d"},
+ {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b41156839806aecb3641f3208c0dafd3ac7775b9c4c422d82ee2a45c34ba81ca"},
+ {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d5e3fc56f88cc98ef8139255cf8cd63eb2c586531e43310ff859d6bb3a6b51f1"},
+ {file = "multidict-6.0.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8316a77808c501004802f9beebde51c9f857054a0c871bd6da8280e718444449"},
+ {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:f70b98cd94886b49d91170ef23ec5c0e8ebb6f242d734ed7ed677b24d50c82cf"},
+ {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bf6774e60d67a9efe02b3616fee22441d86fab4c6d335f9d2051d19d90a40063"},
+ {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:e69924bfcdda39b722ef4d9aa762b2dd38e4632b3641b1d9a57ca9cd18f2f83a"},
+ {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:6b181d8c23da913d4ff585afd1155a0e1194c0b50c54fcfe286f70cdaf2b7176"},
+ {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:52509b5be062d9eafc8170e53026fbc54cf3b32759a23d07fd935fb04fc22d95"},
+ {file = "multidict-6.0.4-cp39-cp39-win32.whl", hash = "sha256:27c523fbfbdfd19c6867af7346332b62b586eed663887392cff78d614f9ec313"},
+ {file = "multidict-6.0.4-cp39-cp39-win_amd64.whl", hash = "sha256:33029f5734336aa0d4c0384525da0387ef89148dc7191aae00ca5fb23d7aafc2"},
+ {file = "multidict-6.0.4.tar.gz", hash = "sha256:3666906492efb76453c0e7b97f2cf459b0682e7402c0489a95484965dbc1da49"},
+]
+
+[[package]]
+name = "mypy"
+version = "1.3.0"
+description = "Optional static typing for Python"
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "mypy-1.3.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c1eb485cea53f4f5284e5baf92902cd0088b24984f4209e25981cc359d64448d"},
+ {file = "mypy-1.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4c99c3ecf223cf2952638da9cd82793d8f3c0c5fa8b6ae2b2d9ed1e1ff51ba85"},
+ {file = "mypy-1.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:550a8b3a19bb6589679a7c3c31f64312e7ff482a816c96e0cecec9ad3a7564dd"},
+ {file = "mypy-1.3.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:cbc07246253b9e3d7d74c9ff948cd0fd7a71afcc2b77c7f0a59c26e9395cb152"},
+ {file = "mypy-1.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:a22435632710a4fcf8acf86cbd0d69f68ac389a3892cb23fbad176d1cddaf228"},
+ {file = "mypy-1.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6e33bb8b2613614a33dff70565f4c803f889ebd2f859466e42b46e1df76018dd"},
+ {file = "mypy-1.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7d23370d2a6b7a71dc65d1266f9a34e4cde9e8e21511322415db4b26f46f6b8c"},
+ {file = "mypy-1.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:658fe7b674769a0770d4b26cb4d6f005e88a442fe82446f020be8e5f5efb2fae"},
+ {file = "mypy-1.3.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:6e42d29e324cdda61daaec2336c42512e59c7c375340bd202efa1fe0f7b8f8ca"},
+ {file = "mypy-1.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:d0b6c62206e04061e27009481cb0ec966f7d6172b5b936f3ead3d74f29fe3dcf"},
+ {file = "mypy-1.3.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:76ec771e2342f1b558c36d49900dfe81d140361dd0d2df6cd71b3db1be155409"},
+ {file = "mypy-1.3.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ebc95f8386314272bbc817026f8ce8f4f0d2ef7ae44f947c4664efac9adec929"},
+ {file = "mypy-1.3.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:faff86aa10c1aa4a10e1a301de160f3d8fc8703b88c7e98de46b531ff1276a9a"},
+ {file = "mypy-1.3.0-cp37-cp37m-win_amd64.whl", hash = "sha256:8c5979d0deb27e0f4479bee18ea0f83732a893e81b78e62e2dda3e7e518c92ee"},
+ {file = "mypy-1.3.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:c5d2cc54175bab47011b09688b418db71403aefad07cbcd62d44010543fc143f"},
+ {file = "mypy-1.3.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:87df44954c31d86df96c8bd6e80dfcd773473e877ac6176a8e29898bfb3501cb"},
+ {file = "mypy-1.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:473117e310febe632ddf10e745a355714e771ffe534f06db40702775056614c4"},
+ {file = "mypy-1.3.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:74bc9b6e0e79808bf8678d7678b2ae3736ea72d56eede3820bd3849823e7f305"},
+ {file = "mypy-1.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:44797d031a41516fcf5cbfa652265bb994e53e51994c1bd649ffcd0c3a7eccbf"},
+ {file = "mypy-1.3.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ddae0f39ca146972ff6bb4399f3b2943884a774b8771ea0a8f50e971f5ea5ba8"},
+ {file = "mypy-1.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1c4c42c60a8103ead4c1c060ac3cdd3ff01e18fddce6f1016e08939647a0e703"},
+ {file = "mypy-1.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e86c2c6852f62f8f2b24cb7a613ebe8e0c7dc1402c61d36a609174f63e0ff017"},
+ {file = "mypy-1.3.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:f9dca1e257d4cc129517779226753dbefb4f2266c4eaad610fc15c6a7e14283e"},
+ {file = "mypy-1.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:95d8d31a7713510685b05fbb18d6ac287a56c8f6554d88c19e73f724a445448a"},
+ {file = "mypy-1.3.0-py3-none-any.whl", hash = "sha256:a8763e72d5d9574d45ce5881962bc8e9046bf7b375b0abf031f3e6811732a897"},
+ {file = "mypy-1.3.0.tar.gz", hash = "sha256:e1f4d16e296f5135624b34e8fb741eb0eadedca90862405b1f1fde2040b9bd11"},
+]
+
+[package.dependencies]
+mypy-extensions = ">=1.0.0"
+tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""}
+typed-ast = {version = ">=1.4.0,<2", markers = "python_version < \"3.8\""}
+typing-extensions = ">=3.10"
+
+[package.extras]
+dmypy = ["psutil (>=4.0)"]
+install-types = ["pip"]
+python2 = ["typed-ast (>=1.4.0,<2)"]
+reports = ["lxml"]
+
+[[package]]
+name = "mypy-extensions"
+version = "1.0.0"
+description = "Type system extensions for programs checked with the mypy type checker."
+category = "dev"
+optional = false
+python-versions = ">=3.5"
+files = [
+ {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"},
+ {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"},
+]
+
+[[package]]
+name = "packaging"
+version = "23.1"
+description = "Core utilities for Python packages"
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "packaging-23.1-py3-none-any.whl", hash = "sha256:994793af429502c4ea2ebf6bf664629d07c1a9fe974af92966e4b8d2df7edc61"},
+ {file = "packaging-23.1.tar.gz", hash = "sha256:a392980d2b6cffa644431898be54b0045151319d1e7ec34f0cfed48767dd334f"},
+]
+
+[[package]]
+name = "pastel"
+version = "0.2.1"
+description = "Bring colors to your terminal."
+category = "dev"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
+files = [
+ {file = "pastel-0.2.1-py2.py3-none-any.whl", hash = "sha256:4349225fcdf6c2bb34d483e523475de5bb04a5c10ef711263452cb37d7dd4364"},
+ {file = "pastel-0.2.1.tar.gz", hash = "sha256:e6581ac04e973cac858828c6202c1e1e81fee1dc7de7683f3e1ffe0bfd8a573d"},
+]
+
+[[package]]
+name = "pathspec"
+version = "0.11.1"
+description = "Utility library for gitignore style pattern matching of file paths."
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "pathspec-0.11.1-py3-none-any.whl", hash = "sha256:d8af70af76652554bd134c22b3e8a1cc46ed7d91edcdd721ef1a0c51a84a5293"},
+ {file = "pathspec-0.11.1.tar.gz", hash = "sha256:2798de800fa92780e33acca925945e9a19a133b715067cf165b8866c15a31687"},
+]
+
+[[package]]
+name = "pexpect"
+version = "4.8.0"
+description = "Pexpect allows easy control of interactive console applications."
+category = "dev"
+optional = false
+python-versions = "*"
+files = [
+ {file = "pexpect-4.8.0-py2.py3-none-any.whl", hash = "sha256:0b48a55dcb3c05f3329815901ea4fc1537514d6ba867a152b581d69ae3710937"},
+ {file = "pexpect-4.8.0.tar.gz", hash = "sha256:fc65a43959d153d0114afe13997d439c22823a27cefceb5ff35c2178c6784c0c"},
+]
+
+[package.dependencies]
+ptyprocess = ">=0.5"
+
+[[package]]
+name = "pkginfo"
+version = "1.9.6"
+description = "Query metadata from sdists / bdists / installed packages."
+category = "dev"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "pkginfo-1.9.6-py3-none-any.whl", hash = "sha256:4b7a555a6d5a22169fcc9cf7bfd78d296b0361adad412a346c1226849af5e546"},
+ {file = "pkginfo-1.9.6.tar.gz", hash = "sha256:8fd5896e8718a4372f0ea9cc9d96f6417c9b986e23a4d116dda26b62cc29d046"},
+]
+
+[package.extras]
+testing = ["pytest", "pytest-cov"]
+
+[[package]]
+name = "pkgutil-resolve-name"
+version = "1.3.10"
+description = "Resolve a name to an object."
+category = "dev"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "pkgutil_resolve_name-1.3.10-py3-none-any.whl", hash = "sha256:ca27cc078d25c5ad71a9de0a7a330146c4e014c2462d9af19c6b828280649c5e"},
+ {file = "pkgutil_resolve_name-1.3.10.tar.gz", hash = "sha256:357d6c9e6a755653cfd78893817c0853af365dd51ec97f3d358a819373bbd174"},
+]
+
+[[package]]
+name = "platformdirs"
+version = "3.5.3"
+description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"."
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "platformdirs-3.5.3-py3-none-any.whl", hash = "sha256:0ade98a4895e87dc51d47151f7d2ec290365a585151d97b4d8d6312ed6132fed"},
+ {file = "platformdirs-3.5.3.tar.gz", hash = "sha256:e48fabd87db8f3a7df7150a4a5ea22c546ee8bc39bc2473244730d4b56d2cc4e"},
+]
+
+[package.dependencies]
+typing-extensions = {version = ">=4.6.3", markers = "python_version < \"3.8\""}
+
+[package.extras]
+docs = ["furo (>=2023.5.20)", "proselint (>=0.13)", "sphinx (>=7.0.1)", "sphinx-autodoc-typehints (>=1.23,!=1.23.4)"]
+test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.3.1)", "pytest-cov (>=4.1)", "pytest-mock (>=3.10)"]
+
+[[package]]
+name = "pluggy"
+version = "1.0.0"
+description = "plugin and hook calling mechanisms for python"
+category = "dev"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "pluggy-1.0.0-py2.py3-none-any.whl", hash = "sha256:74134bbf457f031a36d68416e1509f34bd5ccc019f0bcc952c7b909d06b37bd3"},
+ {file = "pluggy-1.0.0.tar.gz", hash = "sha256:4224373bacce55f955a878bf9cfa763c1e360858e330072059e10bad68531159"},
+]
+
+[package.dependencies]
+importlib-metadata = {version = ">=0.12", markers = "python_version < \"3.8\""}
+
+[package.extras]
+dev = ["pre-commit", "tox"]
+testing = ["pytest", "pytest-benchmark"]
+
+[[package]]
+name = "poethepoet"
+version = "0.18.1"
+description = "A task runner that works well with poetry."
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "poethepoet-0.18.1-py3-none-any.whl", hash = "sha256:e85727bf6f4a10bf6c1a43026bdeb40df689bea3c4682d03cbe531cabc8f2ba6"},
+ {file = "poethepoet-0.18.1.tar.gz", hash = "sha256:5f3566b14c2f5dccdfbc3bb26f0096006b38dc0b9c74bd4f8dd1eba7b0e29f6a"},
+]
+
+[package.dependencies]
+pastel = ">=0.2.1,<0.3.0"
+tomli = ">=1.2.2"
+
+[package.extras]
+poetry-plugin = ["poetry (>=1.0,<2.0)"]
+
+[[package]]
+name = "poetry"
+version = "1.5.1"
+description = "Python dependency management and packaging made easy."
+category = "dev"
+optional = false
+python-versions = ">=3.7,<4.0"
+files = [
+ {file = "poetry-1.5.1-py3-none-any.whl", hash = "sha256:dfc7ce3a38ae216c0465694e2e674bef6eb1a2ba81aa47a26f9dc03362fe2f5f"},
+ {file = "poetry-1.5.1.tar.gz", hash = "sha256:cc7ea4524d1a11558006224bfe8ba8ed071417d4eb5ef6c89decc6a37d437eeb"},
+]
+
+[package.dependencies]
+"backports.cached-property" = {version = ">=1.0.2,<2.0.0", markers = "python_version < \"3.8\""}
+build = ">=0.10.0,<0.11.0"
+cachecontrol = {version = ">=0.12.9,<0.13.0", extras = ["filecache"]}
+cleo = ">=2.0.0,<3.0.0"
+crashtest = ">=0.4.1,<0.5.0"
+dulwich = ">=0.21.2,<0.22.0"
+filelock = ">=3.8.0,<4.0.0"
+html5lib = ">=1.0,<2.0"
+importlib-metadata = {version = ">=4.4", markers = "python_version < \"3.10\""}
+installer = ">=0.7.0,<0.8.0"
+jsonschema = ">=4.10.0,<5.0.0"
+keyring = ">=23.9.0,<24.0.0"
+lockfile = ">=0.12.2,<0.13.0"
+packaging = ">=20.4"
+pexpect = ">=4.7.0,<5.0.0"
+pkginfo = ">=1.9.4,<2.0.0"
+platformdirs = ">=3.0.0,<4.0.0"
+poetry-core = "1.6.1"
+poetry-plugin-export = ">=1.4.0,<2.0.0"
+pyproject-hooks = ">=1.0.0,<2.0.0"
+requests = ">=2.18,<3.0"
+requests-toolbelt = ">=0.9.1,<2"
+shellingham = ">=1.5,<2.0"
+tomli = {version = ">=2.0.1,<3.0.0", markers = "python_version < \"3.11\""}
+tomlkit = ">=0.11.4,<1.0.0"
+trove-classifiers = ">=2022.5.19"
+urllib3 = ">=1.26.0,<2.0.0"
+virtualenv = ">=20.22.0,<21.0.0"
+xattr = {version = ">=0.10.0,<0.11.0", markers = "sys_platform == \"darwin\""}
+
+[[package]]
+name = "poetry-core"
+version = "1.6.1"
+description = "Poetry PEP 517 Build Backend"
+category = "dev"
+optional = false
+python-versions = ">=3.7,<4.0"
+files = [
+ {file = "poetry_core-1.6.1-py3-none-any.whl", hash = "sha256:70707340447dee0e7f334f9495ae652481c67b32d8d218f296a376ac2ed73573"},
+ {file = "poetry_core-1.6.1.tar.gz", hash = "sha256:0f9b0de39665f36d6594657e7d57b6f463cc10f30c28e6d1c3b9ff54c26c9ac3"},
+]
+
+[package.dependencies]
+importlib-metadata = {version = ">=1.7.0", markers = "python_version < \"3.8\""}
+
+[[package]]
+name = "poetry-plugin-export"
+version = "1.4.0"
+description = "Poetry plugin to export the dependencies to various formats"
+category = "dev"
+optional = false
+python-versions = ">=3.7,<4.0"
+files = [
+ {file = "poetry_plugin_export-1.4.0-py3-none-any.whl", hash = "sha256:5d9186d6f77cf2bf35fc96bd11fe650cc7656e515b17d99cb65018d50ba22589"},
+ {file = "poetry_plugin_export-1.4.0.tar.gz", hash = "sha256:f16974cd9f222d4ef640fa97a8d661b04d4fb339e51da93973f1bc9d578e183f"},
+]
+
+[package.dependencies]
+poetry = ">=1.5.0,<2.0.0"
+poetry-core = ">=1.6.0,<2.0.0"
+
+[[package]]
+name = "prometheus-client"
+version = "0.17.0"
+description = "Python client for the Prometheus monitoring system."
+category = "main"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "prometheus_client-0.17.0-py3-none-any.whl", hash = "sha256:a77b708cf083f4d1a3fb3ce5c95b4afa32b9c521ae363354a4a910204ea095ce"},
+ {file = "prometheus_client-0.17.0.tar.gz", hash = "sha256:9c3b26f1535945e85b8934fb374678d263137b78ef85f305b1156c7c881cd11b"},
+]
+
+[package.extras]
+twisted = ["twisted"]
+
+[[package]]
+name = "ptyprocess"
+version = "0.7.0"
+description = "Run a subprocess in a pseudo terminal"
+category = "dev"
+optional = false
+python-versions = "*"
+files = [
+ {file = "ptyprocess-0.7.0-py2.py3-none-any.whl", hash = "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35"},
+ {file = "ptyprocess-0.7.0.tar.gz", hash = "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220"},
+]
+
+[[package]]
+name = "pycodestyle"
+version = "2.10.0"
+description = "Python style guide checker"
+category = "dev"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "pycodestyle-2.10.0-py2.py3-none-any.whl", hash = "sha256:8a4eaf0d0495c7395bdab3589ac2db602797d76207242c17d470186815706610"},
+ {file = "pycodestyle-2.10.0.tar.gz", hash = "sha256:347187bdb476329d98f695c213d7295a846d1152ff4fe9bacb8a9590b8ee7053"},
+]
+
+[[package]]
+name = "pycparser"
+version = "2.21"
+description = "C parser in Python"
+category = "dev"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
+files = [
+ {file = "pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"},
+ {file = "pycparser-2.21.tar.gz", hash = "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"},
+]
+
+[[package]]
+name = "pyflakes"
+version = "3.0.1"
+description = "passive checker of Python programs"
+category = "dev"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "pyflakes-3.0.1-py2.py3-none-any.whl", hash = "sha256:ec55bf7fe21fff7f1ad2f7da62363d749e2a470500eab1b555334b67aa1ef8cf"},
+ {file = "pyflakes-3.0.1.tar.gz", hash = "sha256:ec8b276a6b60bd80defed25add7e439881c19e64850afd9b346283d4165fd0fd"},
+]
+
+[[package]]
+name = "pygments"
+version = "2.15.1"
+description = "Pygments is a syntax highlighting package written in Python."
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "Pygments-2.15.1-py3-none-any.whl", hash = "sha256:db2db3deb4b4179f399a09054b023b6a586b76499d36965813c71aa8ed7b5fd1"},
+ {file = "Pygments-2.15.1.tar.gz", hash = "sha256:8ace4d3c1dd481894b2005f560ead0f9f19ee64fe983366be1a21e171d12775c"},
+]
+
+[package.extras]
+plugins = ["importlib-metadata"]
+
+[[package]]
+name = "pylint"
+version = "2.13.9"
+description = "python code static checker"
+category = "dev"
+optional = false
+python-versions = ">=3.6.2"
+files = [
+ {file = "pylint-2.13.9-py3-none-any.whl", hash = "sha256:705c620d388035bdd9ff8b44c5bcdd235bfb49d276d488dd2c8ff1736aa42526"},
+ {file = "pylint-2.13.9.tar.gz", hash = "sha256:095567c96e19e6f57b5b907e67d265ff535e588fe26b12b5ebe1fc5645b2c731"},
+]
+
+[package.dependencies]
+astroid = ">=2.11.5,<=2.12.0-dev0"
+colorama = {version = "*", markers = "sys_platform == \"win32\""}
+dill = ">=0.2"
+isort = ">=4.2.5,<6"
+mccabe = ">=0.6,<0.8"
+platformdirs = ">=2.2.0"
+tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""}
+typing-extensions = {version = ">=3.10.0", markers = "python_version < \"3.10\""}
+
+[package.extras]
+testutil = ["gitpython (>3)"]
+
+[[package]]
+name = "pyparsing"
+version = "3.0.9"
+description = "pyparsing module - Classes and methods to define and execute parsing grammars"
+category = "dev"
+optional = false
+python-versions = ">=3.6.8"
+files = [
+ {file = "pyparsing-3.0.9-py3-none-any.whl", hash = "sha256:5026bae9a10eeaefb61dab2f09052b9f4307d44aee4eda64b309723d8d206bbc"},
+ {file = "pyparsing-3.0.9.tar.gz", hash = "sha256:2b020ecf7d21b687f219b71ecad3631f644a47f01403fa1d1036b0c6416d70fb"},
+]
+
+[package.extras]
+diagrams = ["jinja2", "railroad-diagrams"]
+
+[[package]]
+name = "pyproject-api"
+version = "1.5.1"
+description = "API to interact with the python pyproject.toml based projects"
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "pyproject_api-1.5.1-py3-none-any.whl", hash = "sha256:4698a3777c2e0f6b624f8a4599131e2a25376d90fe8d146d7ac74c67c6f97c43"},
+ {file = "pyproject_api-1.5.1.tar.gz", hash = "sha256:435f46547a9ff22cf4208ee274fca3e2869aeb062a4834adfc99a4dd64af3cf9"},
+]
+
+[package.dependencies]
+packaging = ">=23"
+tomli = {version = ">=2.0.1", markers = "python_version < \"3.11\""}
+
+[package.extras]
+docs = ["furo (>=2022.12.7)", "sphinx (>=6.1.3)", "sphinx-autodoc-typehints (>=1.22,!=1.23.4)"]
+testing = ["covdefaults (>=2.2.2)", "importlib-metadata (>=6)", "pytest (>=7.2.1)", "pytest-cov (>=4)", "pytest-mock (>=3.10)", "virtualenv (>=20.17.1)", "wheel (>=0.38.4)"]
+
+[[package]]
+name = "pyproject-hooks"
+version = "1.0.0"
+description = "Wrappers to call pyproject.toml-based build backend hooks."
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "pyproject_hooks-1.0.0-py3-none-any.whl", hash = "sha256:283c11acd6b928d2f6a7c73fa0d01cb2bdc5f07c57a2eeb6e83d5e56b97976f8"},
+ {file = "pyproject_hooks-1.0.0.tar.gz", hash = "sha256:f271b298b97f5955d53fb12b72c1fb1948c22c1a6b70b315c54cedaca0264ef5"},
+]
+
+[package.dependencies]
+tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""}
+
+[[package]]
+name = "pyrsistent"
+version = "0.19.3"
+description = "Persistent/Functional/Immutable data structures"
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "pyrsistent-0.19.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:20460ac0ea439a3e79caa1dbd560344b64ed75e85d8703943e0b66c2a6150e4a"},
+ {file = "pyrsistent-0.19.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4c18264cb84b5e68e7085a43723f9e4c1fd1d935ab240ce02c0324a8e01ccb64"},
+ {file = "pyrsistent-0.19.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4b774f9288dda8d425adb6544e5903f1fb6c273ab3128a355c6b972b7df39dcf"},
+ {file = "pyrsistent-0.19.3-cp310-cp310-win32.whl", hash = "sha256:5a474fb80f5e0d6c9394d8db0fc19e90fa540b82ee52dba7d246a7791712f74a"},
+ {file = "pyrsistent-0.19.3-cp310-cp310-win_amd64.whl", hash = "sha256:49c32f216c17148695ca0e02a5c521e28a4ee6c5089f97e34fe24163113722da"},
+ {file = "pyrsistent-0.19.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:f0774bf48631f3a20471dd7c5989657b639fd2d285b861237ea9e82c36a415a9"},
+ {file = "pyrsistent-0.19.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ab2204234c0ecd8b9368dbd6a53e83c3d4f3cab10ecaf6d0e772f456c442393"},
+ {file = "pyrsistent-0.19.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e42296a09e83028b3476f7073fcb69ffebac0e66dbbfd1bd847d61f74db30f19"},
+ {file = "pyrsistent-0.19.3-cp311-cp311-win32.whl", hash = "sha256:64220c429e42a7150f4bfd280f6f4bb2850f95956bde93c6fda1b70507af6ef3"},
+ {file = "pyrsistent-0.19.3-cp311-cp311-win_amd64.whl", hash = "sha256:016ad1afadf318eb7911baa24b049909f7f3bb2c5b1ed7b6a8f21db21ea3faa8"},
+ {file = "pyrsistent-0.19.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c4db1bd596fefd66b296a3d5d943c94f4fac5bcd13e99bffe2ba6a759d959a28"},
+ {file = "pyrsistent-0.19.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aeda827381f5e5d65cced3024126529ddc4289d944f75e090572c77ceb19adbf"},
+ {file = "pyrsistent-0.19.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:42ac0b2f44607eb92ae88609eda931a4f0dfa03038c44c772e07f43e738bcac9"},
+ {file = "pyrsistent-0.19.3-cp37-cp37m-win32.whl", hash = "sha256:e8f2b814a3dc6225964fa03d8582c6e0b6650d68a232df41e3cc1b66a5d2f8d1"},
+ {file = "pyrsistent-0.19.3-cp37-cp37m-win_amd64.whl", hash = "sha256:c9bb60a40a0ab9aba40a59f68214eed5a29c6274c83b2cc206a359c4a89fa41b"},
+ {file = "pyrsistent-0.19.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:a2471f3f8693101975b1ff85ffd19bb7ca7dd7c38f8a81701f67d6b4f97b87d8"},
+ {file = "pyrsistent-0.19.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cc5d149f31706762c1f8bda2e8c4f8fead6e80312e3692619a75301d3dbb819a"},
+ {file = "pyrsistent-0.19.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3311cb4237a341aa52ab8448c27e3a9931e2ee09561ad150ba94e4cfd3fc888c"},
+ {file = "pyrsistent-0.19.3-cp38-cp38-win32.whl", hash = "sha256:f0e7c4b2f77593871e918be000b96c8107da48444d57005b6a6bc61fb4331b2c"},
+ {file = "pyrsistent-0.19.3-cp38-cp38-win_amd64.whl", hash = "sha256:c147257a92374fde8498491f53ffa8f4822cd70c0d85037e09028e478cababb7"},
+ {file = "pyrsistent-0.19.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b735e538f74ec31378f5a1e3886a26d2ca6351106b4dfde376a26fc32a044edc"},
+ {file = "pyrsistent-0.19.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:99abb85579e2165bd8522f0c0138864da97847875ecbd45f3e7e2af569bfc6f2"},
+ {file = "pyrsistent-0.19.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3a8cb235fa6d3fd7aae6a4f1429bbb1fec1577d978098da1252f0489937786f3"},
+ {file = "pyrsistent-0.19.3-cp39-cp39-win32.whl", hash = "sha256:c74bed51f9b41c48366a286395c67f4e894374306b197e62810e0fdaf2364da2"},
+ {file = "pyrsistent-0.19.3-cp39-cp39-win_amd64.whl", hash = "sha256:878433581fc23e906d947a6814336eee031a00e6defba224234169ae3d3d6a98"},
+ {file = "pyrsistent-0.19.3-py3-none-any.whl", hash = "sha256:ccf0d6bd208f8111179f0c26fdf84ed7c3891982f2edaeae7422575f47e66b64"},
+ {file = "pyrsistent-0.19.3.tar.gz", hash = "sha256:1a2994773706bbb4995c31a97bc94f1418314923bd1048c6d964837040376440"},
+]
+
+[[package]]
+name = "pytest"
+version = "7.3.2"
+description = "pytest: simple powerful testing with Python"
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "pytest-7.3.2-py3-none-any.whl", hash = "sha256:cdcbd012c9312258922f8cd3f1b62a6580fdced17db6014896053d47cddf9295"},
+ {file = "pytest-7.3.2.tar.gz", hash = "sha256:ee990a3cc55ba808b80795a79944756f315c67c12b56abd3ac993a7b8c17030b"},
+]
+
+[package.dependencies]
+colorama = {version = "*", markers = "sys_platform == \"win32\""}
+exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""}
+importlib-metadata = {version = ">=0.12", markers = "python_version < \"3.8\""}
+iniconfig = "*"
+packaging = "*"
+pluggy = ">=0.12,<2.0"
+tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""}
+
+[package.extras]
+testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"]
+
+[[package]]
+name = "pytest-asyncio"
+version = "0.21.0"
+description = "Pytest support for asyncio"
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "pytest-asyncio-0.21.0.tar.gz", hash = "sha256:2b38a496aef56f56b0e87557ec313e11e1ab9276fc3863f6a7be0f1d0e415e1b"},
+ {file = "pytest_asyncio-0.21.0-py3-none-any.whl", hash = "sha256:f2b3366b7cd501a4056858bd39349d5af19742aed2d81660b7998b6341c7eb9c"},
+]
+
+[package.dependencies]
+pytest = ">=7.0.0"
+typing-extensions = {version = ">=3.7.2", markers = "python_version < \"3.8\""}
+
+[package.extras]
+docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1.0)"]
+testing = ["coverage (>=6.2)", "flaky (>=3.5.0)", "hypothesis (>=5.7.1)", "mypy (>=0.931)", "pytest-trio (>=0.7.0)"]
+
+[[package]]
+name = "pytest-cov"
+version = "4.1.0"
+description = "Pytest plugin for measuring coverage."
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "pytest-cov-4.1.0.tar.gz", hash = "sha256:3904b13dfbfec47f003b8e77fd5b589cd11904a21ddf1ab38a64f204d6a10ef6"},
+ {file = "pytest_cov-4.1.0-py3-none-any.whl", hash = "sha256:6ba70b9e97e69fcc3fb45bfeab2d0a138fb65c4d0d6a41ef33983ad114be8c3a"},
+]
+
+[package.dependencies]
+coverage = {version = ">=5.2.1", extras = ["toml"]}
+pytest = ">=4.6"
+
+[package.extras]
+testing = ["fields", "hunter", "process-tests", "pytest-xdist", "six", "virtualenv"]
+
+[[package]]
+name = "pytz"
+version = "2023.3"
+description = "World timezone definitions, modern and historical"
+category = "dev"
+optional = false
+python-versions = "*"
+files = [
+ {file = "pytz-2023.3-py2.py3-none-any.whl", hash = "sha256:a151b3abb88eda1d4e34a9814df37de2a80e301e68ba0fd856fb9b46bfbbbffb"},
+ {file = "pytz-2023.3.tar.gz", hash = "sha256:1d8ce29db189191fb55338ee6d0387d82ab59f3d00eac103412d64e0ebd0c588"},
+]
+
+[[package]]
+name = "pywin32-ctypes"
+version = "0.2.0"
+description = ""
+category = "dev"
+optional = false
+python-versions = "*"
+files = [
+ {file = "pywin32-ctypes-0.2.0.tar.gz", hash = "sha256:24ffc3b341d457d48e8922352130cf2644024a4ff09762a2261fd34c36ee5942"},
+ {file = "pywin32_ctypes-0.2.0-py2.py3-none-any.whl", hash = "sha256:9dc2d991b3479cc2df15930958b674a48a227d5361d413827a4cfd0b5876fc98"},
+]
+
+[[package]]
+name = "pyyaml"
+version = "6.0"
+description = "YAML parser and emitter for Python"
+category = "main"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "PyYAML-6.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d4db7c7aef085872ef65a8fd7d6d09a14ae91f691dec3e87ee5ee0539d516f53"},
+ {file = "PyYAML-6.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9df7ed3b3d2e0ecfe09e14741b857df43adb5a3ddadc919a2d94fbdf78fea53c"},
+ {file = "PyYAML-6.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77f396e6ef4c73fdc33a9157446466f1cff553d979bd00ecb64385760c6babdc"},
+ {file = "PyYAML-6.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a80a78046a72361de73f8f395f1f1e49f956c6be882eed58505a15f3e430962b"},
+ {file = "PyYAML-6.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f84fbc98b019fef2ee9a1cb3ce93e3187a6df0b2538a651bfb890254ba9f90b5"},
+ {file = "PyYAML-6.0-cp310-cp310-win32.whl", hash = "sha256:2cd5df3de48857ed0544b34e2d40e9fac445930039f3cfe4bcc592a1f836d513"},
+ {file = "PyYAML-6.0-cp310-cp310-win_amd64.whl", hash = "sha256:daf496c58a8c52083df09b80c860005194014c3698698d1a57cbcfa182142a3a"},
+ {file = "PyYAML-6.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d4b0ba9512519522b118090257be113b9468d804b19d63c71dbcf4a48fa32358"},
+ {file = "PyYAML-6.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:81957921f441d50af23654aa6c5e5eaf9b06aba7f0a19c18a538dc7ef291c5a1"},
+ {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:afa17f5bc4d1b10afd4466fd3a44dc0e245382deca5b3c353d8b757f9e3ecb8d"},
+ {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dbad0e9d368bb989f4515da330b88a057617d16b6a8245084f1b05400f24609f"},
+ {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:432557aa2c09802be39460360ddffd48156e30721f5e8d917f01d31694216782"},
+ {file = "PyYAML-6.0-cp311-cp311-win32.whl", hash = "sha256:bfaef573a63ba8923503d27530362590ff4f576c626d86a9fed95822a8255fd7"},
+ {file = "PyYAML-6.0-cp311-cp311-win_amd64.whl", hash = "sha256:01b45c0191e6d66c470b6cf1b9531a771a83c1c4208272ead47a3ae4f2f603bf"},
+ {file = "PyYAML-6.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:897b80890765f037df3403d22bab41627ca8811ae55e9a722fd0392850ec4d86"},
+ {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50602afada6d6cbfad699b0c7bb50d5ccffa7e46a3d738092afddc1f9758427f"},
+ {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:48c346915c114f5fdb3ead70312bd042a953a8ce5c7106d5bfb1a5254e47da92"},
+ {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:98c4d36e99714e55cfbaaee6dd5badbc9a1ec339ebfc3b1f52e293aee6bb71a4"},
+ {file = "PyYAML-6.0-cp36-cp36m-win32.whl", hash = "sha256:0283c35a6a9fbf047493e3a0ce8d79ef5030852c51e9d911a27badfde0605293"},
+ {file = "PyYAML-6.0-cp36-cp36m-win_amd64.whl", hash = "sha256:07751360502caac1c067a8132d150cf3d61339af5691fe9e87803040dbc5db57"},
+ {file = "PyYAML-6.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:819b3830a1543db06c4d4b865e70ded25be52a2e0631ccd2f6a47a2822f2fd7c"},
+ {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:473f9edb243cb1935ab5a084eb238d842fb8f404ed2193a915d1784b5a6b5fc0"},
+ {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0ce82d761c532fe4ec3f87fc45688bdd3a4c1dc5e0b4a19814b9009a29baefd4"},
+ {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:231710d57adfd809ef5d34183b8ed1eeae3f76459c18fb4a0b373ad56bedcdd9"},
+ {file = "PyYAML-6.0-cp37-cp37m-win32.whl", hash = "sha256:c5687b8d43cf58545ade1fe3e055f70eac7a5a1a0bf42824308d868289a95737"},
+ {file = "PyYAML-6.0-cp37-cp37m-win_amd64.whl", hash = "sha256:d15a181d1ecd0d4270dc32edb46f7cb7733c7c508857278d3d378d14d606db2d"},
+ {file = "PyYAML-6.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0b4624f379dab24d3725ffde76559cff63d9ec94e1736b556dacdfebe5ab6d4b"},
+ {file = "PyYAML-6.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:213c60cd50106436cc818accf5baa1aba61c0189ff610f64f4a3e8c6726218ba"},
+ {file = "PyYAML-6.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9fa600030013c4de8165339db93d182b9431076eb98eb40ee068700c9c813e34"},
+ {file = "PyYAML-6.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:277a0ef2981ca40581a47093e9e2d13b3f1fbbeffae064c1d21bfceba2030287"},
+ {file = "PyYAML-6.0-cp38-cp38-win32.whl", hash = "sha256:d4eccecf9adf6fbcc6861a38015c2a64f38b9d94838ac1810a9023a0609e1b78"},
+ {file = "PyYAML-6.0-cp38-cp38-win_amd64.whl", hash = "sha256:1e4747bc279b4f613a09eb64bba2ba602d8a6664c6ce6396a4d0cd413a50ce07"},
+ {file = "PyYAML-6.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:055d937d65826939cb044fc8c9b08889e8c743fdc6a32b33e2390f66013e449b"},
+ {file = "PyYAML-6.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e61ceaab6f49fb8bdfaa0f92c4b57bcfbea54c09277b1b4f7ac376bfb7a7c174"},
+ {file = "PyYAML-6.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d67d839ede4ed1b28a4e8909735fc992a923cdb84e618544973d7dfc71540803"},
+ {file = "PyYAML-6.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cba8c411ef271aa037d7357a2bc8f9ee8b58b9965831d9e51baf703280dc73d3"},
+ {file = "PyYAML-6.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:40527857252b61eacd1d9af500c3337ba8deb8fc298940291486c465c8b46ec0"},
+ {file = "PyYAML-6.0-cp39-cp39-win32.whl", hash = "sha256:b5b9eccad747aabaaffbc6064800670f0c297e52c12754eb1d976c57e4f74dcb"},
+ {file = "PyYAML-6.0-cp39-cp39-win_amd64.whl", hash = "sha256:b3d267842bf12586ba6c734f89d1f5b871df0273157918b0ccefa29deb05c21c"},
+ {file = "PyYAML-6.0.tar.gz", hash = "sha256:68fb519c14306fec9720a2a5b45bc9f0c8d1b9c72adf45c37baedfcd949c35a2"},
+]
+
+[[package]]
+name = "rapidfuzz"
+version = "2.15.1"
+description = "rapid fuzzy string matching"
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "rapidfuzz-2.15.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:fc0bc259ebe3b93e7ce9df50b3d00e7345335d35acbd735163b7c4b1957074d3"},
+ {file = "rapidfuzz-2.15.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d59fb3a410d253f50099d7063855c2b95df1ef20ad93ea3a6b84115590899f25"},
+ {file = "rapidfuzz-2.15.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c525a3da17b6d79d61613096c8683da86e3573e807dfaecf422eea09e82b5ba6"},
+ {file = "rapidfuzz-2.15.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d4deae6a918ecc260d0c4612257be8ba321d8e913ccb43155403842758c46fbe"},
+ {file = "rapidfuzz-2.15.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2577463d10811386e704a3ab58b903eb4e2a31b24dfd9886d789b0084d614b01"},
+ {file = "rapidfuzz-2.15.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f67d5f56aa48c0da9de4ab81bffb310683cf7815f05ea38e5aa64f3ba4368339"},
+ {file = "rapidfuzz-2.15.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d7927722ff43690e52b3145b5bd3089151d841d350c6f8378c3cfac91f67573a"},
+ {file = "rapidfuzz-2.15.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6534afc787e32c4104f65cdeb55f6abe4d803a2d0553221d00ef9ce12788dcde"},
+ {file = "rapidfuzz-2.15.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d0ae6ec79a1931929bb9dd57bc173eb5ba4c7197461bf69e3a34b6dd314feed2"},
+ {file = "rapidfuzz-2.15.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:be7ccc45c4d1a7dfb595f260e8022a90c6cb380c2a346ee5aae93f85c96d362b"},
+ {file = "rapidfuzz-2.15.1-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:8ba013500a2b68c64b2aecc5fb56a2dad6c2872cf545a0308fd044827b6e5f6a"},
+ {file = "rapidfuzz-2.15.1-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:4d9f7d10065f657f960b48699e7dddfce14ab91af4bab37a215f0722daf0d716"},
+ {file = "rapidfuzz-2.15.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7e24a1b802cea04160b3fccd75d2d0905065783ebc9de157d83c14fb9e1c6ce2"},
+ {file = "rapidfuzz-2.15.1-cp310-cp310-win32.whl", hash = "sha256:dffdf03499e0a5b3442951bb82b556333b069e0661e80568752786c79c5b32de"},
+ {file = "rapidfuzz-2.15.1-cp310-cp310-win_amd64.whl", hash = "sha256:7d150d90a7c6caae7962f29f857a4e61d42038cfd82c9df38508daf30c648ae7"},
+ {file = "rapidfuzz-2.15.1-cp310-cp310-win_arm64.whl", hash = "sha256:87c30e9184998ff6eb0fa9221f94282ce7c908fd0da96a1ef66ecadfaaa4cdb7"},
+ {file = "rapidfuzz-2.15.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6986413cb37035eb796e32f049cbc8c13d8630a4ac1e0484e3e268bb3662bd1b"},
+ {file = "rapidfuzz-2.15.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a72f26e010d4774b676f36e43c0fc8a2c26659efef4b3be3fd7714d3491e9957"},
+ {file = "rapidfuzz-2.15.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b5cd54c98a387cca111b3b784fc97a4f141244bbc28a92d4bde53f164464112e"},
+ {file = "rapidfuzz-2.15.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da7fac7c3da39f93e6b2ebe386ed0ffe1cefec91509b91857f6e1204509e931f"},
+ {file = "rapidfuzz-2.15.1-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f976e76ac72f650790b3a5402431612175b2ac0363179446285cb3c901136ca9"},
+ {file = "rapidfuzz-2.15.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:abde47e1595902a490ed14d4338d21c3509156abb2042a99e6da51f928e0c117"},
+ {file = "rapidfuzz-2.15.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ca8f1747007a3ce919739a60fa95c5325f7667cccf6f1c1ef18ae799af119f5e"},
+ {file = "rapidfuzz-2.15.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c35da09ab9797b020d0d4f07a66871dfc70ea6566363811090353ea971748b5a"},
+ {file = "rapidfuzz-2.15.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a3a769ca7580686a66046b77df33851b3c2d796dc1eb60c269b68f690f3e1b65"},
+ {file = "rapidfuzz-2.15.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:d50622efefdb03a640a51a6123748cd151d305c1f0431af762e833d6ffef71f0"},
+ {file = "rapidfuzz-2.15.1-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:b7461b0a7651d68bc23f0896bffceea40f62887e5ab8397bf7caa883592ef5cb"},
+ {file = "rapidfuzz-2.15.1-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:074ee9e17912e025c72a5780ee4c7c413ea35cd26449719cc399b852d4e42533"},
+ {file = "rapidfuzz-2.15.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:7025fb105a11f503943f17718cdb8241ea3bb4d812c710c609e69bead40e2ff0"},
+ {file = "rapidfuzz-2.15.1-cp311-cp311-win32.whl", hash = "sha256:2084d36b95139413cef25e9487257a1cc892b93bd1481acd2a9656f7a1d9930c"},
+ {file = "rapidfuzz-2.15.1-cp311-cp311-win_amd64.whl", hash = "sha256:5a738fcd24e34bce4b19126b92fdae15482d6d3a90bd687fd3d24ce9d28ce82d"},
+ {file = "rapidfuzz-2.15.1-cp311-cp311-win_arm64.whl", hash = "sha256:dc3cafa68cfa54638632bdcadf9aab89a3d182b4a3f04d2cad7585ed58ea8731"},
+ {file = "rapidfuzz-2.15.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:3c53d57ba7a88f7bf304d4ea5a14a0ca112db0e0178fff745d9005acf2879f7d"},
+ {file = "rapidfuzz-2.15.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a6ee758eec4cf2215dc8d8eafafcea0d1f48ad4b0135767db1b0f7c5c40a17dd"},
+ {file = "rapidfuzz-2.15.1-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2d93ba3ae59275e7a3a116dac4ffdb05e9598bf3ee0861fecc5b60fb042d539e"},
+ {file = "rapidfuzz-2.15.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7c3ff75e647908ddbe9aa917fbe39a112d5631171f3fcea5809e2363e525a59d"},
+ {file = "rapidfuzz-2.15.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6d89c421702474c6361245b6b199e6e9783febacdbfb6b002669e6cb3ef17a09"},
+ {file = "rapidfuzz-2.15.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f69e6199fec0f58f9a89afbbaea78d637c7ce77f656a03a1d6ea6abdc1d44f8"},
+ {file = "rapidfuzz-2.15.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:41dfea282844d0628279b4db2929da0dacb8ac317ddc5dcccc30093cf16357c1"},
+ {file = "rapidfuzz-2.15.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:2dd03477feefeccda07b7659dd614f6738cfc4f9b6779dd61b262a73b0a9a178"},
+ {file = "rapidfuzz-2.15.1-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:5efe035aa76ff37d1b5fa661de3c4b4944de9ff227a6c0b2e390a95c101814c0"},
+ {file = "rapidfuzz-2.15.1-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:ed2cf7c69102c7a0a06926d747ed855bc836f52e8d59a5d1e3adfd980d1bd165"},
+ {file = "rapidfuzz-2.15.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:a0e441d4c2025110ec3eba5d54f11f78183269a10152b3a757a739ffd1bb12bf"},
+ {file = "rapidfuzz-2.15.1-cp37-cp37m-win32.whl", hash = "sha256:a4a54efe17cc9f53589c748b53f28776dfdfb9bc83619685740cb7c37985ac2f"},
+ {file = "rapidfuzz-2.15.1-cp37-cp37m-win_amd64.whl", hash = "sha256:bb8318116ecac4dfb84841d8b9b461f9bb0c3be5b616418387d104f72d2a16d1"},
+ {file = "rapidfuzz-2.15.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:e9296c530e544f68858c3416ad1d982a1854f71e9d2d3dcedb5b216e6d54f067"},
+ {file = "rapidfuzz-2.15.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:49c4bcdb9238f11f8c4eba1b898937f09b92280d6f900023a8216008f299b41a"},
+ {file = "rapidfuzz-2.15.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ebb40a279e134bb3fef099a8b58ed5beefb201033d29bdac005bddcdb004ef71"},
+ {file = "rapidfuzz-2.15.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a7381c11cb590bbd4e6f2d8779a0b34fdd2234dfa13d0211f6aee8ca166d9d05"},
+ {file = "rapidfuzz-2.15.1-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cfdcdedfd12a0077193f2cf3626ff6722c5a184adf0d2d51f1ec984bf21c23c3"},
+ {file = "rapidfuzz-2.15.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f85bece1ec59bda8b982bd719507d468d4df746dfb1988df11d916b5e9fe19e8"},
+ {file = "rapidfuzz-2.15.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b1b393f4a1eaa6867ffac6aef58cfb04bab2b3d7d8e40b9fe2cf40dd1d384601"},
+ {file = "rapidfuzz-2.15.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:53de456ef020a77bf9d7c6c54860a48e2e902584d55d3001766140ac45c54bc7"},
+ {file = "rapidfuzz-2.15.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2492330bc38b76ed967eab7bdaea63a89b6ceb254489e2c65c3824efcbf72993"},
+ {file = "rapidfuzz-2.15.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:099e4c6befaa8957a816bdb67ce664871f10aaec9bebf2f61368cf7e0869a7a1"},
+ {file = "rapidfuzz-2.15.1-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:46599b2ad4045dd3f794a24a6db1e753d23304699d4984462cf1ead02a51ddf3"},
+ {file = "rapidfuzz-2.15.1-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:591f19d16758a3c55c9d7a0b786b40d95599a5b244d6eaef79c7a74fcf5104d8"},
+ {file = "rapidfuzz-2.15.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:ed17359061840eb249f8d833cb213942e8299ffc4f67251a6ed61833a9f2ea20"},
+ {file = "rapidfuzz-2.15.1-cp38-cp38-win32.whl", hash = "sha256:aa1e5aad325168e29bf8e17006479b97024aa9d2fdbe12062bd2f8f09080acf8"},
+ {file = "rapidfuzz-2.15.1-cp38-cp38-win_amd64.whl", hash = "sha256:c2bb68832b140c551dbed691290bef4ee6719d4e8ce1b7226a3736f61a9d1a83"},
+ {file = "rapidfuzz-2.15.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:3fac40972cf7b6c14dded88ae2331eb50dfbc278aa9195473ef6fc6bfe49f686"},
+ {file = "rapidfuzz-2.15.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f0e456cbdc0abf39352800309dab82fd3251179fa0ff6573fa117f51f4e84be8"},
+ {file = "rapidfuzz-2.15.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:22b9d22022b9d09fd4ece15102270ab9b6a5cfea8b6f6d1965c1df7e3783f5ff"},
+ {file = "rapidfuzz-2.15.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:46754fe404a9a6f5cbf7abe02d74af390038d94c9b8c923b3f362467606bfa28"},
+ {file = "rapidfuzz-2.15.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:91abb8bf7610efe326394adc1d45e1baca8f360e74187f3fa0ef3df80cdd3ba6"},
+ {file = "rapidfuzz-2.15.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e40a2f60024f9d3c15401e668f732800114a023f3f8d8c40f1521a62081ff054"},
+ {file = "rapidfuzz-2.15.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a48ee83916401ac73938526d7bd804e01d2a8fe61809df7f1577b0b3b31049a3"},
+ {file = "rapidfuzz-2.15.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c71580052f9dbac443c02f60484e5a2e5f72ad4351b84b2009fbe345b1f38422"},
+ {file = "rapidfuzz-2.15.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:82b86d5b8c1b9bcbc65236d75f81023c78d06a721c3e0229889ff4ed5c858169"},
+ {file = "rapidfuzz-2.15.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:fc4528b7736e5c30bc954022c2cf410889abc19504a023abadbc59cdf9f37cae"},
+ {file = "rapidfuzz-2.15.1-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:e1e0e569108a5760d8f01d0f2148dd08cc9a39ead79fbefefca9e7c7723c7e88"},
+ {file = "rapidfuzz-2.15.1-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:94e1c97f0ad45b05003806f8a13efc1fc78983e52fa2ddb00629003acf4676ef"},
+ {file = "rapidfuzz-2.15.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:47e81767a962e41477a85ad7ac937e34d19a7d2a80be65614f008a5ead671c56"},
+ {file = "rapidfuzz-2.15.1-cp39-cp39-win32.whl", hash = "sha256:79fc574aaf2d7c27ec1022e29c9c18f83cdaf790c71c05779528901e0caad89b"},
+ {file = "rapidfuzz-2.15.1-cp39-cp39-win_amd64.whl", hash = "sha256:f3dd4bcef2d600e0aa121e19e6e62f6f06f22a89f82ef62755e205ce14727874"},
+ {file = "rapidfuzz-2.15.1-cp39-cp39-win_arm64.whl", hash = "sha256:cac095cbdf44bc286339a77214bbca6d4d228c9ebae3da5ff6a80aaeb7c35634"},
+ {file = "rapidfuzz-2.15.1-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:b89d1126be65c85763d56e3b47d75f1a9b7c5529857b4d572079b9a636eaa8a7"},
+ {file = "rapidfuzz-2.15.1-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:19b7460e91168229768be882ea365ba0ac7da43e57f9416e2cfadc396a7df3c2"},
+ {file = "rapidfuzz-2.15.1-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:93c33c03e7092642c38f8a15ca2d8fc38da366f2526ec3b46adf19d5c7aa48ba"},
+ {file = "rapidfuzz-2.15.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:040faca2e26d9dab5541b45ce72b3f6c0e36786234703fc2ac8c6f53bb576743"},
+ {file = "rapidfuzz-2.15.1-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:6e2a3b23e1e9aa13474b3c710bba770d0dcc34d517d3dd6f97435a32873e3f28"},
+ {file = "rapidfuzz-2.15.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:2e597b9dfd6dd180982684840975c458c50d447e46928efe3e0120e4ec6f6686"},
+ {file = "rapidfuzz-2.15.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d14752c9dd2036c5f36ebe8db5f027275fa7d6b3ec6484158f83efb674bab84e"},
+ {file = "rapidfuzz-2.15.1-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:558224b6fc6124d13fa32d57876f626a7d6188ba2a97cbaea33a6ee38a867e31"},
+ {file = "rapidfuzz-2.15.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3c89cfa88dc16fd8c9bcc0c7f0b0073f7ef1e27cceb246c9f5a3f7004fa97c4d"},
+ {file = "rapidfuzz-2.15.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:509c5b631cd64df69f0f011893983eb15b8be087a55bad72f3d616b6ae6a0f96"},
+ {file = "rapidfuzz-2.15.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:0f73a04135a03a6e40393ecd5d46a7a1049d353fc5c24b82849830d09817991f"},
+ {file = "rapidfuzz-2.15.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c99d53138a2dfe8ada67cb2855719f934af2733d726fbf73247844ce4dd6dd5"},
+ {file = "rapidfuzz-2.15.1-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f01fa757f0fb332a1f045168d29b0d005de6c39ee5ce5d6c51f2563bb53c601b"},
+ {file = "rapidfuzz-2.15.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:60368e1add6e550faae65614844c43f8a96e37bf99404643b648bf2dba92c0fb"},
+ {file = "rapidfuzz-2.15.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:785744f1270828cc632c5a3660409dee9bcaac6931a081bae57542c93e4d46c4"},
+ {file = "rapidfuzz-2.15.1.tar.gz", hash = "sha256:d62137c2ca37aea90a11003ad7dc109c8f1739bfbe5a9a217f3cdb07d7ac00f6"},
+]
+
+[package.extras]
+full = ["numpy"]
+
+[[package]]
+name = "requests"
+version = "2.31.0"
+description = "Python HTTP for Humans."
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"},
+ {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"},
+]
+
+[package.dependencies]
+certifi = ">=2017.4.17"
+charset-normalizer = ">=2,<4"
+idna = ">=2.5,<4"
+urllib3 = ">=1.21.1,<3"
+
+[package.extras]
+socks = ["PySocks (>=1.5.6,!=1.5.7)"]
+use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"]
+
+[[package]]
+name = "requests-toolbelt"
+version = "1.0.0"
+description = "A utility belt for advanced users of python-requests"
+category = "dev"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
+files = [
+ {file = "requests-toolbelt-1.0.0.tar.gz", hash = "sha256:7681a0a3d047012b5bdc0ee37d7f8f07ebe76ab08caeccfc3921ce23c88d5bc6"},
+ {file = "requests_toolbelt-1.0.0-py2.py3-none-any.whl", hash = "sha256:cccfdd665f0a24fcf4726e690f65639d272bb0637b9b92dfd91a5568ccf6bd06"},
+]
+
+[package.dependencies]
+requests = ">=2.0.1,<3.0.0"
+
+[[package]]
+name = "secretstorage"
+version = "3.3.3"
+description = "Python bindings to FreeDesktop.org Secret Service API"
+category = "dev"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "SecretStorage-3.3.3-py3-none-any.whl", hash = "sha256:f356e6628222568e3af06f2eba8df495efa13b3b63081dafd4f7d9a7b7bc9f99"},
+ {file = "SecretStorage-3.3.3.tar.gz", hash = "sha256:2403533ef369eca6d2ba81718576c5e0f564d5cca1b58f73a8b23e7d4eeebd77"},
+]
+
+[package.dependencies]
+cryptography = ">=2.0"
+jeepney = ">=0.6"
+
+[[package]]
+name = "setuptools"
+version = "67.8.0"
+description = "Easily download, build, install, upgrade, and uninstall Python packages"
+category = "main"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "setuptools-67.8.0-py3-none-any.whl", hash = "sha256:5df61bf30bb10c6f756eb19e7c9f3b473051f48db77fddbe06ff2ca307df9a6f"},
+ {file = "setuptools-67.8.0.tar.gz", hash = "sha256:62642358adc77ffa87233bc4d2354c4b2682d214048f500964dbe760ccedf102"},
+]
+
+[package.extras]
+docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-hoverxref (<2)", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (==0.8.3)", "sphinx-reredirects", "sphinxcontrib-towncrier"]
+testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pip (>=19.1)", "pip-run (>=8.8)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-ruff", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"]
+testing-integration = ["build[virtualenv]", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"]
+
+[[package]]
+name = "shellingham"
+version = "1.5.0.post1"
+description = "Tool to Detect Surrounding Shell"
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "shellingham-1.5.0.post1-py2.py3-none-any.whl", hash = "sha256:368bf8c00754fd4f55afb7bbb86e272df77e4dc76ac29dbcbb81a59e9fc15744"},
+ {file = "shellingham-1.5.0.post1.tar.gz", hash = "sha256:823bc5fb5c34d60f285b624e7264f4dda254bc803a3774a147bf99c0e3004a28"},
+]
+
+[[package]]
+name = "six"
+version = "1.16.0"
+description = "Python 2 and 3 compatibility utilities"
+category = "dev"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*"
+files = [
+ {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"},
+ {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"},
+]
+
+[[package]]
+name = "snowballstemmer"
+version = "2.2.0"
+description = "This package provides 29 stemmers for 28 languages generated from Snowball algorithms."
+category = "dev"
+optional = false
+python-versions = "*"
+files = [
+ {file = "snowballstemmer-2.2.0-py2.py3-none-any.whl", hash = "sha256:c8e1716e83cc398ae16824e5572ae04e0d9fc2c6b985fb0f900f5f0c96ecba1a"},
+ {file = "snowballstemmer-2.2.0.tar.gz", hash = "sha256:09b16deb8547d3412ad7b590689584cd0fe25ec8db3be37788be3810cbf19cb1"},
+]
+
+[[package]]
+name = "sphinx"
+version = "5.3.0"
+description = "Python documentation generator"
+category = "dev"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "Sphinx-5.3.0.tar.gz", hash = "sha256:51026de0a9ff9fc13c05d74913ad66047e104f56a129ff73e174eb5c3ee794b5"},
+ {file = "sphinx-5.3.0-py3-none-any.whl", hash = "sha256:060ca5c9f7ba57a08a1219e547b269fadf125ae25b06b9fa7f66768efb652d6d"},
+]
+
+[package.dependencies]
+alabaster = ">=0.7,<0.8"
+babel = ">=2.9"
+colorama = {version = ">=0.4.5", markers = "sys_platform == \"win32\""}
+docutils = ">=0.14,<0.20"
+imagesize = ">=1.3"
+importlib-metadata = {version = ">=4.8", markers = "python_version < \"3.10\""}
+Jinja2 = ">=3.0"
+packaging = ">=21.0"
+Pygments = ">=2.12"
+requests = ">=2.5.0"
+snowballstemmer = ">=2.0"
+sphinxcontrib-applehelp = "*"
+sphinxcontrib-devhelp = "*"
+sphinxcontrib-htmlhelp = ">=2.0.0"
+sphinxcontrib-jsmath = "*"
+sphinxcontrib-qthelp = "*"
+sphinxcontrib-serializinghtml = ">=1.1.5"
+
+[package.extras]
+docs = ["sphinxcontrib-websupport"]
+lint = ["docutils-stubs", "flake8 (>=3.5.0)", "flake8-bugbear", "flake8-comprehensions", "flake8-simplify", "isort", "mypy (>=0.981)", "sphinx-lint", "types-requests", "types-typed-ast"]
+test = ["cython", "html5lib", "pytest (>=4.6)", "typed_ast"]
+
+[[package]]
+name = "sphinxcontrib-applehelp"
+version = "1.0.2"
+description = "sphinxcontrib-applehelp is a sphinx extension which outputs Apple help books"
+category = "dev"
+optional = false
+python-versions = ">=3.5"
+files = [
+ {file = "sphinxcontrib-applehelp-1.0.2.tar.gz", hash = "sha256:a072735ec80e7675e3f432fcae8610ecf509c5f1869d17e2eecff44389cdbc58"},
+ {file = "sphinxcontrib_applehelp-1.0.2-py2.py3-none-any.whl", hash = "sha256:806111e5e962be97c29ec4c1e7fe277bfd19e9652fb1a4392105b43e01af885a"},
+]
+
+[package.extras]
+lint = ["docutils-stubs", "flake8", "mypy"]
+test = ["pytest"]
+
+[[package]]
+name = "sphinxcontrib-devhelp"
+version = "1.0.2"
+description = "sphinxcontrib-devhelp is a sphinx extension which outputs Devhelp document."
+category = "dev"
+optional = false
+python-versions = ">=3.5"
+files = [
+ {file = "sphinxcontrib-devhelp-1.0.2.tar.gz", hash = "sha256:ff7f1afa7b9642e7060379360a67e9c41e8f3121f2ce9164266f61b9f4b338e4"},
+ {file = "sphinxcontrib_devhelp-1.0.2-py2.py3-none-any.whl", hash = "sha256:8165223f9a335cc1af7ffe1ed31d2871f325254c0423bc0c4c7cd1c1e4734a2e"},
+]
+
+[package.extras]
+lint = ["docutils-stubs", "flake8", "mypy"]
+test = ["pytest"]
+
+[[package]]
+name = "sphinxcontrib-htmlhelp"
+version = "2.0.0"
+description = "sphinxcontrib-htmlhelp is a sphinx extension which renders HTML help files"
+category = "dev"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "sphinxcontrib-htmlhelp-2.0.0.tar.gz", hash = "sha256:f5f8bb2d0d629f398bf47d0d69c07bc13b65f75a81ad9e2f71a63d4b7a2f6db2"},
+ {file = "sphinxcontrib_htmlhelp-2.0.0-py2.py3-none-any.whl", hash = "sha256:d412243dfb797ae3ec2b59eca0e52dac12e75a241bf0e4eb861e450d06c6ed07"},
+]
+
+[package.extras]
+lint = ["docutils-stubs", "flake8", "mypy"]
+test = ["html5lib", "pytest"]
+
+[[package]]
+name = "sphinxcontrib-jsmath"
+version = "1.0.1"
+description = "A sphinx extension which renders display math in HTML via JavaScript"
+category = "dev"
+optional = false
+python-versions = ">=3.5"
+files = [
+ {file = "sphinxcontrib-jsmath-1.0.1.tar.gz", hash = "sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8"},
+ {file = "sphinxcontrib_jsmath-1.0.1-py2.py3-none-any.whl", hash = "sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178"},
+]
+
+[package.extras]
+test = ["flake8", "mypy", "pytest"]
+
+[[package]]
+name = "sphinxcontrib-qthelp"
+version = "1.0.3"
+description = "sphinxcontrib-qthelp is a sphinx extension which outputs QtHelp document."
+category = "dev"
+optional = false
+python-versions = ">=3.5"
+files = [
+ {file = "sphinxcontrib-qthelp-1.0.3.tar.gz", hash = "sha256:4c33767ee058b70dba89a6fc5c1892c0d57a54be67ddd3e7875a18d14cba5a72"},
+ {file = "sphinxcontrib_qthelp-1.0.3-py2.py3-none-any.whl", hash = "sha256:bd9fc24bcb748a8d51fd4ecaade681350aa63009a347a8c14e637895444dfab6"},
+]
+
+[package.extras]
+lint = ["docutils-stubs", "flake8", "mypy"]
+test = ["pytest"]
+
+[[package]]
+name = "sphinxcontrib-serializinghtml"
+version = "1.1.5"
+description = "sphinxcontrib-serializinghtml is a sphinx extension which outputs \"serialized\" HTML files (json and pickle)."
+category = "dev"
+optional = false
+python-versions = ">=3.5"
+files = [
+ {file = "sphinxcontrib-serializinghtml-1.1.5.tar.gz", hash = "sha256:aa5f6de5dfdf809ef505c4895e51ef5c9eac17d0f287933eb49ec495280b6952"},
+ {file = "sphinxcontrib_serializinghtml-1.1.5-py2.py3-none-any.whl", hash = "sha256:352a9a00ae864471d3a7ead8d7d79f5fc0b57e8b3f95e9867eb9eb28999b92fd"},
+]
+
+[package.extras]
+lint = ["docutils-stubs", "flake8", "mypy"]
+test = ["pytest"]
+
+[[package]]
+name = "supervisor"
+version = "4.2.5"
+description = "A system for controlling process state under UNIX"
+category = "main"
+optional = false
+python-versions = "*"
+files = [
+ {file = "supervisor-4.2.5-py2.py3-none-any.whl", hash = "sha256:2ecaede32fc25af814696374b79e42644ecaba5c09494c51016ffda9602d0f08"},
+ {file = "supervisor-4.2.5.tar.gz", hash = "sha256:34761bae1a23c58192281a5115fb07fbf22c9b0133c08166beffc70fed3ebc12"},
+]
+
+[package.dependencies]
+setuptools = "*"
+
+[package.extras]
+testing = ["pytest", "pytest-cov"]
+
+[[package]]
+name = "toml"
+version = "0.10.2"
+description = "Python Library for Tom's Obvious, Minimal Language"
+category = "dev"
+optional = false
+python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*"
+files = [
+ {file = "toml-0.10.2-py2.py3-none-any.whl", hash = "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b"},
+ {file = "toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"},
+]
+
+[[package]]
+name = "tomli"
+version = "2.0.1"
+description = "A lil' TOML parser"
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"},
+ {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"},
+]
+
+[[package]]
+name = "tomlkit"
+version = "0.11.8"
+description = "Style preserving TOML library"
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "tomlkit-0.11.8-py3-none-any.whl", hash = "sha256:8c726c4c202bdb148667835f68d68780b9a003a9ec34167b6c673b38eff2a171"},
+ {file = "tomlkit-0.11.8.tar.gz", hash = "sha256:9330fc7faa1db67b541b28e62018c17d20be733177d290a13b24c62d1614e0c3"},
+]
+
+[[package]]
+name = "tox"
+version = "4.6.0"
+description = "tox is a generic virtualenv management and test command line tool"
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "tox-4.6.0-py3-none-any.whl", hash = "sha256:4874000453e637a87ca892f9744a2ab9a7d24064dad1b0ecbf5a4c3c146cc732"},
+ {file = "tox-4.6.0.tar.gz", hash = "sha256:954f1f647f67f481d239a193288983242a6152b67503c4a56b19a4aafaa29736"},
+]
+
+[package.dependencies]
+cachetools = ">=5.3"
+chardet = ">=5.1"
+colorama = ">=0.4.6"
+filelock = ">=3.12"
+importlib-metadata = {version = ">=6.6", markers = "python_version < \"3.8\""}
+packaging = ">=23.1"
+platformdirs = ">=3.5.1"
+pluggy = ">=1"
+pyproject-api = ">=1.5.1"
+tomli = {version = ">=2.0.1", markers = "python_version < \"3.11\""}
+typing-extensions = {version = ">=4.6.2", markers = "python_version < \"3.8\""}
+virtualenv = ">=20.23"
+
+[package.extras]
+docs = ["furo (>=2023.5.20)", "sphinx (>=7.0.1)", "sphinx-argparse-cli (>=1.11)", "sphinx-autodoc-typehints (>=1.23,!=1.23.4)", "sphinx-copybutton (>=0.5.2)", "sphinx-inline-tabs (>=2023.4.21)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=22.12)"]
+testing = ["build[virtualenv] (>=0.10)", "covdefaults (>=2.3)", "devpi-process (>=0.3)", "diff-cover (>=7.5)", "distlib (>=0.3.6)", "flaky (>=3.7)", "hatch-vcs (>=0.3)", "hatchling (>=1.17)", "psutil (>=5.9.5)", "pytest (>=7.3.1)", "pytest-cov (>=4.1)", "pytest-mock (>=3.10)", "pytest-xdist (>=3.3.1)", "re-assert (>=1.1)", "time-machine (>=2.9)", "wheel (>=0.40)"]
+
+[[package]]
+name = "tox-pyenv"
+version = "1.1.0"
+description = "tox plugin that makes tox use `pyenv which` to find python executables"
+category = "dev"
+optional = false
+python-versions = "*"
+files = [
+ {file = "tox-pyenv-1.1.0.tar.gz", hash = "sha256:916c2213577aec0b3b5452c5bfb32fd077f3a3196f50a81ad57d7ef3fc2599e4"},
+ {file = "tox_pyenv-1.1.0-py2.py3-none-any.whl", hash = "sha256:e470c18af115fe52eeff95e7e3cdd0793613eca19709966fc2724b79d55246cb"},
+]
+
+[package.dependencies]
+tox = ">=2.0"
+
+[[package]]
+name = "trove-classifiers"
+version = "2023.5.24"
+description = "Canonical source for classifiers on PyPI (pypi.org)."
+category = "dev"
+optional = false
+python-versions = "*"
+files = [
+ {file = "trove-classifiers-2023.5.24.tar.gz", hash = "sha256:fd5a1546283be941f47540a135bdeae8fb261380a6a204d9c18012f2a1b0ceae"},
+ {file = "trove_classifiers-2023.5.24-py3-none-any.whl", hash = "sha256:d9d7ae14fb90bf3d50bef99c3941b176b5326509e6e9037e622562d6352629d0"},
+]
+
+[[package]]
+name = "typed-ast"
+version = "1.5.4"
+description = "a fork of Python 2 and 3 ast modules with type comment support"
+category = "dev"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "typed_ast-1.5.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:669dd0c4167f6f2cd9f57041e03c3c2ebf9063d0757dc89f79ba1daa2bfca9d4"},
+ {file = "typed_ast-1.5.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:211260621ab1cd7324e0798d6be953d00b74e0428382991adfddb352252f1d62"},
+ {file = "typed_ast-1.5.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:267e3f78697a6c00c689c03db4876dd1efdfea2f251a5ad6555e82a26847b4ac"},
+ {file = "typed_ast-1.5.4-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:c542eeda69212fa10a7ada75e668876fdec5f856cd3d06829e6aa64ad17c8dfe"},
+ {file = "typed_ast-1.5.4-cp310-cp310-win_amd64.whl", hash = "sha256:a9916d2bb8865f973824fb47436fa45e1ebf2efd920f2b9f99342cb7fab93f72"},
+ {file = "typed_ast-1.5.4-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:79b1e0869db7c830ba6a981d58711c88b6677506e648496b1f64ac7d15633aec"},
+ {file = "typed_ast-1.5.4-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a94d55d142c9265f4ea46fab70977a1944ecae359ae867397757d836ea5a3f47"},
+ {file = "typed_ast-1.5.4-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:183afdf0ec5b1b211724dfef3d2cad2d767cbefac291f24d69b00546c1837fb6"},
+ {file = "typed_ast-1.5.4-cp36-cp36m-win_amd64.whl", hash = "sha256:639c5f0b21776605dd6c9dbe592d5228f021404dafd377e2b7ac046b0349b1a1"},
+ {file = "typed_ast-1.5.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:cf4afcfac006ece570e32d6fa90ab74a17245b83dfd6655a6f68568098345ff6"},
+ {file = "typed_ast-1.5.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed855bbe3eb3715fca349c80174cfcfd699c2f9de574d40527b8429acae23a66"},
+ {file = "typed_ast-1.5.4-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:6778e1b2f81dfc7bc58e4b259363b83d2e509a65198e85d5700dfae4c6c8ff1c"},
+ {file = "typed_ast-1.5.4-cp37-cp37m-win_amd64.whl", hash = "sha256:0261195c2062caf107831e92a76764c81227dae162c4f75192c0d489faf751a2"},
+ {file = "typed_ast-1.5.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2efae9db7a8c05ad5547d522e7dbe62c83d838d3906a3716d1478b6c1d61388d"},
+ {file = "typed_ast-1.5.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7d5d014b7daa8b0bf2eaef684295acae12b036d79f54178b92a2b6a56f92278f"},
+ {file = "typed_ast-1.5.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:370788a63915e82fd6f212865a596a0fefcbb7d408bbbb13dea723d971ed8bdc"},
+ {file = "typed_ast-1.5.4-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:4e964b4ff86550a7a7d56345c7864b18f403f5bd7380edf44a3c1fb4ee7ac6c6"},
+ {file = "typed_ast-1.5.4-cp38-cp38-win_amd64.whl", hash = "sha256:683407d92dc953c8a7347119596f0b0e6c55eb98ebebd9b23437501b28dcbb8e"},
+ {file = "typed_ast-1.5.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4879da6c9b73443f97e731b617184a596ac1235fe91f98d279a7af36c796da35"},
+ {file = "typed_ast-1.5.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3e123d878ba170397916557d31c8f589951e353cc95fb7f24f6bb69adc1a8a97"},
+ {file = "typed_ast-1.5.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ebd9d7f80ccf7a82ac5f88c521115cc55d84e35bf8b446fcd7836eb6b98929a3"},
+ {file = "typed_ast-1.5.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:98f80dee3c03455e92796b58b98ff6ca0b2a6f652120c263efdba4d6c5e58f72"},
+ {file = "typed_ast-1.5.4-cp39-cp39-win_amd64.whl", hash = "sha256:0fdbcf2fef0ca421a3f5912555804296f0b0960f0418c440f5d6d3abb549f3e1"},
+ {file = "typed_ast-1.5.4.tar.gz", hash = "sha256:39e21ceb7388e4bb37f4c679d72707ed46c2fbf2a5609b8b8ebc4b067d977df2"},
+]
+
+[[package]]
+name = "types-pyyaml"
+version = "6.0.12.10"
+description = "Typing stubs for PyYAML"
+category = "dev"
+optional = false
+python-versions = "*"
+files = [
+ {file = "types-PyYAML-6.0.12.10.tar.gz", hash = "sha256:ebab3d0700b946553724ae6ca636ea932c1b0868701d4af121630e78d695fc97"},
+ {file = "types_PyYAML-6.0.12.10-py3-none-any.whl", hash = "sha256:662fa444963eff9b68120d70cda1af5a5f2aa57900003c2006d7626450eaae5f"},
+]
+
+[[package]]
+name = "typing-extensions"
+version = "4.6.3"
+description = "Backported and Experimental Type Hints for Python 3.7+"
+category = "main"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "typing_extensions-4.6.3-py3-none-any.whl", hash = "sha256:88a4153d8505aabbb4e13aacb7c486c2b4a33ca3b3f807914a9b4c844c471c26"},
+ {file = "typing_extensions-4.6.3.tar.gz", hash = "sha256:d91d5919357fe7f681a9f2b5b4cb2a5f1ef0a1e9f59c4d8ff0d3491e05c0ffd5"},
+]
+
+[[package]]
+name = "urllib3"
+version = "1.26.16"
+description = "HTTP library with thread-safe connection pooling, file post, and more."
+category = "dev"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*"
+files = [
+ {file = "urllib3-1.26.16-py2.py3-none-any.whl", hash = "sha256:8d36afa7616d8ab714608411b4a3b13e58f463aee519024578e062e141dce20f"},
+ {file = "urllib3-1.26.16.tar.gz", hash = "sha256:8f135f6502756bde6b2a9b28989df5fbe87c9970cecaa69041edcce7f0589b14"},
+]
+
+[package.extras]
+brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotlipy (>=0.6.0)"]
+secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"]
+socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"]
+
+[[package]]
+name = "virtualenv"
+version = "20.23.0"
+description = "Virtual Python Environment builder"
+category = "dev"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "virtualenv-20.23.0-py3-none-any.whl", hash = "sha256:6abec7670e5802a528357fdc75b26b9f57d5d92f29c5462ba0fbe45feacc685e"},
+ {file = "virtualenv-20.23.0.tar.gz", hash = "sha256:a85caa554ced0c0afbd0d638e7e2d7b5f92d23478d05d17a76daeac8f279f924"},
+]
+
+[package.dependencies]
+distlib = ">=0.3.6,<1"
+filelock = ">=3.11,<4"
+importlib-metadata = {version = ">=6.4.1", markers = "python_version < \"3.8\""}
+platformdirs = ">=3.2,<4"
+
+[package.extras]
+docs = ["furo (>=2023.3.27)", "proselint (>=0.13)", "sphinx (>=6.1.3)", "sphinx-argparse (>=0.4)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=22.12)"]
+test = ["covdefaults (>=2.3)", "coverage (>=7.2.3)", "coverage-enable-subprocess (>=1)", "flaky (>=3.7)", "packaging (>=23.1)", "pytest (>=7.3.1)", "pytest-env (>=0.8.1)", "pytest-freezegun (>=0.4.2)", "pytest-mock (>=3.10)", "pytest-randomly (>=3.12)", "pytest-timeout (>=2.1)", "setuptools (>=67.7.1)", "time-machine (>=2.9)"]
+
+[[package]]
+name = "webencodings"
+version = "0.5.1"
+description = "Character encoding aliases for legacy web content"
+category = "dev"
+optional = false
+python-versions = "*"
+files = [
+ {file = "webencodings-0.5.1-py2.py3-none-any.whl", hash = "sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78"},
+ {file = "webencodings-0.5.1.tar.gz", hash = "sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923"},
+]
+
+[[package]]
+name = "wrapt"
+version = "1.15.0"
+description = "Module for decorators, wrappers and monkey patching."
+category = "dev"
+optional = false
+python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7"
+files = [
+ {file = "wrapt-1.15.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:ca1cccf838cd28d5a0883b342474c630ac48cac5df0ee6eacc9c7290f76b11c1"},
+ {file = "wrapt-1.15.0-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:e826aadda3cae59295b95343db8f3d965fb31059da7de01ee8d1c40a60398b29"},
+ {file = "wrapt-1.15.0-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:5fc8e02f5984a55d2c653f5fea93531e9836abbd84342c1d1e17abc4a15084c2"},
+ {file = "wrapt-1.15.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:96e25c8603a155559231c19c0349245eeb4ac0096fe3c1d0be5c47e075bd4f46"},
+ {file = "wrapt-1.15.0-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:40737a081d7497efea35ab9304b829b857f21558acfc7b3272f908d33b0d9d4c"},
+ {file = "wrapt-1.15.0-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:f87ec75864c37c4c6cb908d282e1969e79763e0d9becdfe9fe5473b7bb1e5f09"},
+ {file = "wrapt-1.15.0-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:1286eb30261894e4c70d124d44b7fd07825340869945c79d05bda53a40caa079"},
+ {file = "wrapt-1.15.0-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:493d389a2b63c88ad56cdc35d0fa5752daac56ca755805b1b0c530f785767d5e"},
+ {file = "wrapt-1.15.0-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:58d7a75d731e8c63614222bcb21dd992b4ab01a399f1f09dd82af17bbfc2368a"},
+ {file = "wrapt-1.15.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:21f6d9a0d5b3a207cdf7acf8e58d7d13d463e639f0c7e01d82cdb671e6cb7923"},
+ {file = "wrapt-1.15.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ce42618f67741d4697684e501ef02f29e758a123aa2d669e2d964ff734ee00ee"},
+ {file = "wrapt-1.15.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41d07d029dd4157ae27beab04d22b8e261eddfc6ecd64ff7000b10dc8b3a5727"},
+ {file = "wrapt-1.15.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:54accd4b8bc202966bafafd16e69da9d5640ff92389d33d28555c5fd4f25ccb7"},
+ {file = "wrapt-1.15.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2fbfbca668dd15b744418265a9607baa970c347eefd0db6a518aaf0cfbd153c0"},
+ {file = "wrapt-1.15.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:76e9c727a874b4856d11a32fb0b389afc61ce8aaf281ada613713ddeadd1cfec"},
+ {file = "wrapt-1.15.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e20076a211cd6f9b44a6be58f7eeafa7ab5720eb796975d0c03f05b47d89eb90"},
+ {file = "wrapt-1.15.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a74d56552ddbde46c246b5b89199cb3fd182f9c346c784e1a93e4dc3f5ec9975"},
+ {file = "wrapt-1.15.0-cp310-cp310-win32.whl", hash = "sha256:26458da5653aa5b3d8dc8b24192f574a58984c749401f98fff994d41d3f08da1"},
+ {file = "wrapt-1.15.0-cp310-cp310-win_amd64.whl", hash = "sha256:75760a47c06b5974aa5e01949bf7e66d2af4d08cb8c1d6516af5e39595397f5e"},
+ {file = "wrapt-1.15.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ba1711cda2d30634a7e452fc79eabcadaffedf241ff206db2ee93dd2c89a60e7"},
+ {file = "wrapt-1.15.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:56374914b132c702aa9aa9959c550004b8847148f95e1b824772d453ac204a72"},
+ {file = "wrapt-1.15.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a89ce3fd220ff144bd9d54da333ec0de0399b52c9ac3d2ce34b569cf1a5748fb"},
+ {file = "wrapt-1.15.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3bbe623731d03b186b3d6b0d6f51865bf598587c38d6f7b0be2e27414f7f214e"},
+ {file = "wrapt-1.15.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3abbe948c3cbde2689370a262a8d04e32ec2dd4f27103669a45c6929bcdbfe7c"},
+ {file = "wrapt-1.15.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b67b819628e3b748fd3c2192c15fb951f549d0f47c0449af0764d7647302fda3"},
+ {file = "wrapt-1.15.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:7eebcdbe3677e58dd4c0e03b4f2cfa346ed4049687d839adad68cc38bb559c92"},
+ {file = "wrapt-1.15.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:74934ebd71950e3db69960a7da29204f89624dde411afbfb3b4858c1409b1e98"},
+ {file = "wrapt-1.15.0-cp311-cp311-win32.whl", hash = "sha256:bd84395aab8e4d36263cd1b9308cd504f6cf713b7d6d3ce25ea55670baec5416"},
+ {file = "wrapt-1.15.0-cp311-cp311-win_amd64.whl", hash = "sha256:a487f72a25904e2b4bbc0817ce7a8de94363bd7e79890510174da9d901c38705"},
+ {file = "wrapt-1.15.0-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:4ff0d20f2e670800d3ed2b220d40984162089a6e2c9646fdb09b85e6f9a8fc29"},
+ {file = "wrapt-1.15.0-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:9ed6aa0726b9b60911f4aed8ec5b8dd7bf3491476015819f56473ffaef8959bd"},
+ {file = "wrapt-1.15.0-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:896689fddba4f23ef7c718279e42f8834041a21342d95e56922e1c10c0cc7afb"},
+ {file = "wrapt-1.15.0-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:75669d77bb2c071333417617a235324a1618dba66f82a750362eccbe5b61d248"},
+ {file = "wrapt-1.15.0-cp35-cp35m-win32.whl", hash = "sha256:fbec11614dba0424ca72f4e8ba3c420dba07b4a7c206c8c8e4e73f2e98f4c559"},
+ {file = "wrapt-1.15.0-cp35-cp35m-win_amd64.whl", hash = "sha256:fd69666217b62fa5d7c6aa88e507493a34dec4fa20c5bd925e4bc12fce586639"},
+ {file = "wrapt-1.15.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:b0724f05c396b0a4c36a3226c31648385deb6a65d8992644c12a4963c70326ba"},
+ {file = "wrapt-1.15.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bbeccb1aa40ab88cd29e6c7d8585582c99548f55f9b2581dfc5ba68c59a85752"},
+ {file = "wrapt-1.15.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:38adf7198f8f154502883242f9fe7333ab05a5b02de7d83aa2d88ea621f13364"},
+ {file = "wrapt-1.15.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:578383d740457fa790fdf85e6d346fda1416a40549fe8db08e5e9bd281c6a475"},
+ {file = "wrapt-1.15.0-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:a4cbb9ff5795cd66f0066bdf5947f170f5d63a9274f99bdbca02fd973adcf2a8"},
+ {file = "wrapt-1.15.0-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:af5bd9ccb188f6a5fdda9f1f09d9f4c86cc8a539bd48a0bfdc97723970348418"},
+ {file = "wrapt-1.15.0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:b56d5519e470d3f2fe4aa7585f0632b060d532d0696c5bdfb5e8319e1d0f69a2"},
+ {file = "wrapt-1.15.0-cp36-cp36m-win32.whl", hash = "sha256:77d4c1b881076c3ba173484dfa53d3582c1c8ff1f914c6461ab70c8428b796c1"},
+ {file = "wrapt-1.15.0-cp36-cp36m-win_amd64.whl", hash = "sha256:077ff0d1f9d9e4ce6476c1a924a3332452c1406e59d90a2cf24aeb29eeac9420"},
+ {file = "wrapt-1.15.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:5c5aa28df055697d7c37d2099a7bc09f559d5053c3349b1ad0c39000e611d317"},
+ {file = "wrapt-1.15.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3a8564f283394634a7a7054b7983e47dbf39c07712d7b177b37e03f2467a024e"},
+ {file = "wrapt-1.15.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:780c82a41dc493b62fc5884fb1d3a3b81106642c5c5c78d6a0d4cbe96d62ba7e"},
+ {file = "wrapt-1.15.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e169e957c33576f47e21864cf3fc9ff47c223a4ebca8960079b8bd36cb014fd0"},
+ {file = "wrapt-1.15.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:b02f21c1e2074943312d03d243ac4388319f2456576b2c6023041c4d57cd7019"},
+ {file = "wrapt-1.15.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:f2e69b3ed24544b0d3dbe2c5c0ba5153ce50dcebb576fdc4696d52aa22db6034"},
+ {file = "wrapt-1.15.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d787272ed958a05b2c86311d3a4135d3c2aeea4fc655705f074130aa57d71653"},
+ {file = "wrapt-1.15.0-cp37-cp37m-win32.whl", hash = "sha256:02fce1852f755f44f95af51f69d22e45080102e9d00258053b79367d07af39c0"},
+ {file = "wrapt-1.15.0-cp37-cp37m-win_amd64.whl", hash = "sha256:abd52a09d03adf9c763d706df707c343293d5d106aea53483e0ec8d9e310ad5e"},
+ {file = "wrapt-1.15.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cdb4f085756c96a3af04e6eca7f08b1345e94b53af8921b25c72f096e704e145"},
+ {file = "wrapt-1.15.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:230ae493696a371f1dbffaad3dafbb742a4d27a0afd2b1aecebe52b740167e7f"},
+ {file = "wrapt-1.15.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63424c681923b9f3bfbc5e3205aafe790904053d42ddcc08542181a30a7a51bd"},
+ {file = "wrapt-1.15.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d6bcbfc99f55655c3d93feb7ef3800bd5bbe963a755687cbf1f490a71fb7794b"},
+ {file = "wrapt-1.15.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c99f4309f5145b93eca6e35ac1a988f0dc0a7ccf9ccdcd78d3c0adf57224e62f"},
+ {file = "wrapt-1.15.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b130fe77361d6771ecf5a219d8e0817d61b236b7d8b37cc045172e574ed219e6"},
+ {file = "wrapt-1.15.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:96177eb5645b1c6985f5c11d03fc2dbda9ad24ec0f3a46dcce91445747e15094"},
+ {file = "wrapt-1.15.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d5fe3e099cf07d0fb5a1e23d399e5d4d1ca3e6dfcbe5c8570ccff3e9208274f7"},
+ {file = "wrapt-1.15.0-cp38-cp38-win32.whl", hash = "sha256:abd8f36c99512755b8456047b7be10372fca271bf1467a1caa88db991e7c421b"},
+ {file = "wrapt-1.15.0-cp38-cp38-win_amd64.whl", hash = "sha256:b06fa97478a5f478fb05e1980980a7cdf2712015493b44d0c87606c1513ed5b1"},
+ {file = "wrapt-1.15.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2e51de54d4fb8fb50d6ee8327f9828306a959ae394d3e01a1ba8b2f937747d86"},
+ {file = "wrapt-1.15.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0970ddb69bba00670e58955f8019bec4a42d1785db3faa043c33d81de2bf843c"},
+ {file = "wrapt-1.15.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76407ab327158c510f44ded207e2f76b657303e17cb7a572ffe2f5a8a48aa04d"},
+ {file = "wrapt-1.15.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cd525e0e52a5ff16653a3fc9e3dd827981917d34996600bbc34c05d048ca35cc"},
+ {file = "wrapt-1.15.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d37ac69edc5614b90516807de32d08cb8e7b12260a285ee330955604ed9dd29"},
+ {file = "wrapt-1.15.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:078e2a1a86544e644a68422f881c48b84fef6d18f8c7a957ffd3f2e0a74a0d4a"},
+ {file = "wrapt-1.15.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:2cf56d0e237280baed46f0b5316661da892565ff58309d4d2ed7dba763d984b8"},
+ {file = "wrapt-1.15.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7dc0713bf81287a00516ef43137273b23ee414fe41a3c14be10dd95ed98a2df9"},
+ {file = "wrapt-1.15.0-cp39-cp39-win32.whl", hash = "sha256:46ed616d5fb42f98630ed70c3529541408166c22cdfd4540b88d5f21006b0eff"},
+ {file = "wrapt-1.15.0-cp39-cp39-win_amd64.whl", hash = "sha256:eef4d64c650f33347c1f9266fa5ae001440b232ad9b98f1f43dfe7a79435c0a6"},
+ {file = "wrapt-1.15.0-py3-none-any.whl", hash = "sha256:64b1df0f83706b4ef4cfb4fb0e4c2669100fd7ecacfb59e091fad300d4e04640"},
+ {file = "wrapt-1.15.0.tar.gz", hash = "sha256:d06730c6aed78cee4126234cf2d071e01b44b915e725a6cb439a879ec9754a3a"},
+]
+
+[[package]]
+name = "xattr"
+version = "0.10.1"
+description = "Python wrapper for extended filesystem attributes"
+category = "dev"
+optional = false
+python-versions = "*"
+files = [
+ {file = "xattr-0.10.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:16a660a883e703b311d1bbbcafc74fa877585ec081cd96e8dd9302c028408ab1"},
+ {file = "xattr-0.10.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:1e2973e72faa87ca29d61c23b58c3c89fe102d1b68e091848b0e21a104123503"},
+ {file = "xattr-0.10.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:13279fe8f7982e3cdb0e088d5cb340ce9cbe5ef92504b1fd80a0d3591d662f68"},
+ {file = "xattr-0.10.1-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:1dc9b9f580ef4b8ac5e2c04c16b4d5086a611889ac14ecb2e7e87170623a0b75"},
+ {file = "xattr-0.10.1-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:485539262c2b1f5acd6b6ea56e0da2bc281a51f74335c351ea609c23d82c9a79"},
+ {file = "xattr-0.10.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:295b3ab335fcd06ca0a9114439b34120968732e3f5e9d16f456d5ec4fa47a0a2"},
+ {file = "xattr-0.10.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:a126eb38e14a2f273d584a692fe36cff760395bf7fc061ef059224efdb4eb62c"},
+ {file = "xattr-0.10.1-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:b0e919c24f5b74428afa91507b15e7d2ef63aba98e704ad13d33bed1288dca81"},
+ {file = "xattr-0.10.1-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:e31d062cfe1aaeab6ba3db6bd255f012d105271018e647645941d6609376af18"},
+ {file = "xattr-0.10.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:209fb84c09b41c2e4cf16dd2f481bb4a6e2e81f659a47a60091b9bcb2e388840"},
+ {file = "xattr-0.10.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c4120090dac33eddffc27e487f9c8f16b29ff3f3f8bcb2251b2c6c3f974ca1e1"},
+ {file = "xattr-0.10.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3e739d624491267ec5bb740f4eada93491de429d38d2fcdfb97b25efe1288eca"},
+ {file = "xattr-0.10.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2677d40b95636f3482bdaf64ed9138fb4d8376fb7933f434614744780e46e42d"},
+ {file = "xattr-0.10.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40039f1532c4456fd0f4c54e9d4e01eb8201248c321c6c6856262d87e9a99593"},
+ {file = "xattr-0.10.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:148466e5bb168aba98f80850cf976e931469a3c6eb11e9880d9f6f8b1e66bd06"},
+ {file = "xattr-0.10.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0aedf55b116beb6427e6f7958ccd80a8cbc80e82f87a4cd975ccb61a8d27b2ee"},
+ {file = "xattr-0.10.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:c3024a9ff157247c8190dd0eb54db4a64277f21361b2f756319d9d3cf20e475f"},
+ {file = "xattr-0.10.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f1be6e733e9698f645dbb98565bb8df9b75e80e15a21eb52787d7d96800e823b"},
+ {file = "xattr-0.10.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:7880c8a54c18bc091a4ce0adc5c6d81da1c748aec2fe7ac586d204d6ec7eca5b"},
+ {file = "xattr-0.10.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:89c93b42c3ba8aedbc29da759f152731196c2492a2154371c0aae3ef8ba8301b"},
+ {file = "xattr-0.10.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6b905e808df61b677eb972f915f8a751960284358b520d0601c8cbc476ba2df6"},
+ {file = "xattr-0.10.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d1ef954d0655f93a34d07d0cc7e02765ec779ff0b59dc898ee08c6326ad614d5"},
+ {file = "xattr-0.10.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:199b20301b6acc9022661412346714ce764d322068ef387c4de38062474db76c"},
+ {file = "xattr-0.10.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec0956a8ab0f0d3f9011ba480f1e1271b703d11542375ef73eb8695a6bd4b78b"},
+ {file = "xattr-0.10.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ffcb57ca1be338d69edad93cf59aac7c6bb4dbb92fd7bf8d456c69ea42f7e6d2"},
+ {file = "xattr-0.10.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:1f0563196ee54756fe2047627d316977dc77d11acd7a07970336e1a711e934db"},
+ {file = "xattr-0.10.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fc354f086f926a1c7f04886f97880fed1a26d20e3bc338d0d965fd161dbdb8ab"},
+ {file = "xattr-0.10.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:c0cd2d02ef2fb45ecf2b0da066a58472d54682c6d4f0452dfe7ae2f3a76a42ea"},
+ {file = "xattr-0.10.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:49626096ddd72dcc1654aadd84b103577d8424f26524a48d199847b5d55612d0"},
+ {file = "xattr-0.10.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ceaa26bef8fcb17eb59d92a7481c2d15d20211e217772fb43c08c859b01afc6a"},
+ {file = "xattr-0.10.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e8c014c371391f28f8cd27d73ea59f42b30772cd640b5a2538ad4f440fd9190b"},
+ {file = "xattr-0.10.1-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:46c32cd605673606b9388a313b0050ee7877a0640d7561eea243ace4fa2cc5a6"},
+ {file = "xattr-0.10.1-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:772b22c4ff791fe5816a7c2a1c9fcba83f9ab9bea138eb44d4d70f34676232b4"},
+ {file = "xattr-0.10.1-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:183ad611a2d70b5a3f5f7aadef0fcef604ea33dcf508228765fd4ddac2c7321d"},
+ {file = "xattr-0.10.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8068df3ebdfa9411e58d5ae4a05d807ec5994645bb01af66ec9f6da718b65c5b"},
+ {file = "xattr-0.10.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5bc40570155beb85e963ae45300a530223d9822edfdf09991b880e69625ba38a"},
+ {file = "xattr-0.10.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:436e1aaf23c07e15bed63115f1712d2097e207214fc6bcde147c1efede37e2c5"},
+ {file = "xattr-0.10.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7298455ccf3a922d403339781b10299b858bb5ec76435445f2da46fb768e31a5"},
+ {file = "xattr-0.10.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:986c2305c6c1a08f78611eb38ef9f1f47682774ce954efb5a4f3715e8da00d5f"},
+ {file = "xattr-0.10.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:5dc6099e76e33fa3082a905fe59df766b196534c705cf7a2e3ad9bed2b8a180e"},
+ {file = "xattr-0.10.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:042ad818cda6013162c0bfd3816f6b74b7700e73c908cde6768da824686885f8"},
+ {file = "xattr-0.10.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:9d4c306828a45b41b76ca17adc26ac3dc00a80e01a5ba85d71df2a3e948828f2"},
+ {file = "xattr-0.10.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:a606280b0c9071ef52572434ecd3648407b20df3d27af02c6592e84486b05894"},
+ {file = "xattr-0.10.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5b49d591cf34cda2079fd7a5cb2a7a1519f54dc2e62abe3e0720036f6ed41a85"},
+ {file = "xattr-0.10.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b8705ac6791426559c1a5c2b88bb2f0e83dc5616a09b4500899bfff6a929302"},
+ {file = "xattr-0.10.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a5ea974930e876bc5c146f54ac0f85bb39b7b5de2b6fc63f90364712ae368ebe"},
+ {file = "xattr-0.10.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f55a2dd73a12a1ae5113c5d9cd4b4ab6bf7950f4d76d0a1a0c0c4264d50da61d"},
+ {file = "xattr-0.10.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:475c38da0d3614cc5564467c4efece1e38bd0705a4dbecf8deeb0564a86fb010"},
+ {file = "xattr-0.10.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:925284a4a28e369459b2b7481ea22840eed3e0573a4a4c06b6b0614ecd27d0a7"},
+ {file = "xattr-0.10.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:aa32f1b45fed9122bed911de0fcc654da349e1f04fa4a9c8ef9b53e1cc98b91e"},
+ {file = "xattr-0.10.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c5d3d0e728bace64b74c475eb4da6148cd172b2d23021a1dcd055d92f17619ac"},
+ {file = "xattr-0.10.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8faaacf311e2b5cc67c030c999167a78a9906073e6abf08eaa8cf05b0416515c"},
+ {file = "xattr-0.10.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:cc6b8d5ca452674e1a96e246a3d2db5f477aecbc7c945c73f890f56323e75203"},
+ {file = "xattr-0.10.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3725746a6502f40f72ef27e0c7bfc31052a239503ff3eefa807d6b02a249be22"},
+ {file = "xattr-0.10.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:789bd406d1aad6735e97b20c6d6a1701e1c0661136be9be862e6a04564da771f"},
+ {file = "xattr-0.10.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9a7a807ab538210ff8532220d8fc5e2d51c212681f63dbd4e7ede32543b070f"},
+ {file = "xattr-0.10.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:3e5825b5fc99ecdd493b0cc09ec35391e7a451394fdf623a88b24726011c950d"},
+ {file = "xattr-0.10.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:80638d1ce7189dc52f26c234cee3522f060fadab6a8bc3562fe0ddcbe11ba5a4"},
+ {file = "xattr-0.10.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:3ff0dbe4a6ce2ce065c6de08f415bcb270ecfd7bf1655a633ddeac695ce8b250"},
+ {file = "xattr-0.10.1-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:5267e5f9435c840d2674194150b511bef929fa7d3bc942a4a75b9eddef18d8d8"},
+ {file = "xattr-0.10.1-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b27dfc13b193cb290d5d9e62f806bb9a99b00cd73bb6370d556116ad7bb5dc12"},
+ {file = "xattr-0.10.1-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:636ebdde0277bce4d12d2ef2550885804834418fee0eb456b69be928e604ecc4"},
+ {file = "xattr-0.10.1-pp37-pypy37_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d60c27922ec80310b45574351f71e0dd3a139c5295e8f8b19d19c0010196544f"},
+ {file = "xattr-0.10.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:b34df5aad035d0343bd740a95ca30db99b776e2630dca9cc1ba8e682c9cc25ea"},
+ {file = "xattr-0.10.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f24a7c04ff666d0fe905dfee0a84bc899d624aeb6dccd1ea86b5c347f15c20c1"},
+ {file = "xattr-0.10.1-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a3878e1aff8eca64badad8f6d896cb98c52984b1e9cd9668a3ab70294d1ef92d"},
+ {file = "xattr-0.10.1-pp38-pypy38_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4abef557028c551d59cf2fb3bf63f2a0c89f00d77e54c1c15282ecdd56943496"},
+ {file = "xattr-0.10.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:0e14bd5965d3db173d6983abdc1241c22219385c22df8b0eb8f1846c15ce1fee"},
+ {file = "xattr-0.10.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7f9be588a4b6043b03777d50654c6079af3da60cc37527dbb80d36ec98842b1e"},
+ {file = "xattr-0.10.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b7bc4ae264aa679aacf964abf3ea88e147eb4a22aea6af8c6d03ebdebd64cfd6"},
+ {file = "xattr-0.10.1-pp39-pypy39_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:827b5a97673b9997067fde383a7f7dc67342403093b94ea3c24ae0f4f1fec649"},
+ {file = "xattr-0.10.1.tar.gz", hash = "sha256:c12e7d81ffaa0605b3ac8c22c2994a8e18a9cf1c59287a1b7722a2289c952ec5"},
+]
+
+[package.dependencies]
+cffi = ">=1.0"
+
+[[package]]
+name = "yarl"
+version = "1.9.2"
+description = "Yet another URL library"
+category = "main"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "yarl-1.9.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:8c2ad583743d16ddbdf6bb14b5cd76bf43b0d0006e918809d5d4ddf7bde8dd82"},
+ {file = "yarl-1.9.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:82aa6264b36c50acfb2424ad5ca537a2060ab6de158a5bd2a72a032cc75b9eb8"},
+ {file = "yarl-1.9.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c0c77533b5ed4bcc38e943178ccae29b9bcf48ffd1063f5821192f23a1bd27b9"},
+ {file = "yarl-1.9.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ee4afac41415d52d53a9833ebae7e32b344be72835bbb589018c9e938045a560"},
+ {file = "yarl-1.9.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9bf345c3a4f5ba7f766430f97f9cc1320786f19584acc7086491f45524a551ac"},
+ {file = "yarl-1.9.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2a96c19c52ff442a808c105901d0bdfd2e28575b3d5f82e2f5fd67e20dc5f4ea"},
+ {file = "yarl-1.9.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:891c0e3ec5ec881541f6c5113d8df0315ce5440e244a716b95f2525b7b9f3608"},
+ {file = "yarl-1.9.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c3a53ba34a636a256d767c086ceb111358876e1fb6b50dfc4d3f4951d40133d5"},
+ {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:566185e8ebc0898b11f8026447eacd02e46226716229cea8db37496c8cdd26e0"},
+ {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:2b0738fb871812722a0ac2154be1f049c6223b9f6f22eec352996b69775b36d4"},
+ {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:32f1d071b3f362c80f1a7d322bfd7b2d11e33d2adf395cc1dd4df36c9c243095"},
+ {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:e9fdc7ac0d42bc3ea78818557fab03af6181e076a2944f43c38684b4b6bed8e3"},
+ {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:56ff08ab5df8429901ebdc5d15941b59f6253393cb5da07b4170beefcf1b2528"},
+ {file = "yarl-1.9.2-cp310-cp310-win32.whl", hash = "sha256:8ea48e0a2f931064469bdabca50c2f578b565fc446f302a79ba6cc0ee7f384d3"},
+ {file = "yarl-1.9.2-cp310-cp310-win_amd64.whl", hash = "sha256:50f33040f3836e912ed16d212f6cc1efb3231a8a60526a407aeb66c1c1956dde"},
+ {file = "yarl-1.9.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:646d663eb2232d7909e6601f1a9107e66f9791f290a1b3dc7057818fe44fc2b6"},
+ {file = "yarl-1.9.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:aff634b15beff8902d1f918012fc2a42e0dbae6f469fce134c8a0dc51ca423bb"},
+ {file = "yarl-1.9.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a83503934c6273806aed765035716216cc9ab4e0364f7f066227e1aaea90b8d0"},
+ {file = "yarl-1.9.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b25322201585c69abc7b0e89e72790469f7dad90d26754717f3310bfe30331c2"},
+ {file = "yarl-1.9.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:22a94666751778629f1ec4280b08eb11815783c63f52092a5953faf73be24191"},
+ {file = "yarl-1.9.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ec53a0ea2a80c5cd1ab397925f94bff59222aa3cf9c6da938ce05c9ec20428d"},
+ {file = "yarl-1.9.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:159d81f22d7a43e6eabc36d7194cb53f2f15f498dbbfa8edc8a3239350f59fe7"},
+ {file = "yarl-1.9.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:832b7e711027c114d79dffb92576acd1bd2decc467dec60e1cac96912602d0e6"},
+ {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:95d2ecefbcf4e744ea952d073c6922e72ee650ffc79028eb1e320e732898d7e8"},
+ {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:d4e2c6d555e77b37288eaf45b8f60f0737c9efa3452c6c44626a5455aeb250b9"},
+ {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:783185c75c12a017cc345015ea359cc801c3b29a2966c2655cd12b233bf5a2be"},
+ {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:b8cc1863402472f16c600e3e93d542b7e7542a540f95c30afd472e8e549fc3f7"},
+ {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:822b30a0f22e588b32d3120f6d41e4ed021806418b4c9f0bc3048b8c8cb3f92a"},
+ {file = "yarl-1.9.2-cp311-cp311-win32.whl", hash = "sha256:a60347f234c2212a9f0361955007fcf4033a75bf600a33c88a0a8e91af77c0e8"},
+ {file = "yarl-1.9.2-cp311-cp311-win_amd64.whl", hash = "sha256:be6b3fdec5c62f2a67cb3f8c6dbf56bbf3f61c0f046f84645cd1ca73532ea051"},
+ {file = "yarl-1.9.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:38a3928ae37558bc1b559f67410df446d1fbfa87318b124bf5032c31e3447b74"},
+ {file = "yarl-1.9.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ac9bb4c5ce3975aeac288cfcb5061ce60e0d14d92209e780c93954076c7c4367"},
+ {file = "yarl-1.9.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3da8a678ca8b96c8606bbb8bfacd99a12ad5dd288bc6f7979baddd62f71c63ef"},
+ {file = "yarl-1.9.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:13414591ff516e04fcdee8dc051c13fd3db13b673c7a4cb1350e6b2ad9639ad3"},
+ {file = "yarl-1.9.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf74d08542c3a9ea97bb8f343d4fcbd4d8f91bba5ec9d5d7f792dbe727f88938"},
+ {file = "yarl-1.9.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6e7221580dc1db478464cfeef9b03b95c5852cc22894e418562997df0d074ccc"},
+ {file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:494053246b119b041960ddcd20fd76224149cfea8ed8777b687358727911dd33"},
+ {file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:52a25809fcbecfc63ac9ba0c0fb586f90837f5425edfd1ec9f3372b119585e45"},
+ {file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:e65610c5792870d45d7b68c677681376fcf9cc1c289f23e8e8b39c1485384185"},
+ {file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:1b1bba902cba32cdec51fca038fd53f8beee88b77efc373968d1ed021024cc04"},
+ {file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:662e6016409828ee910f5d9602a2729a8a57d74b163c89a837de3fea050c7582"},
+ {file = "yarl-1.9.2-cp37-cp37m-win32.whl", hash = "sha256:f364d3480bffd3aa566e886587eaca7c8c04d74f6e8933f3f2c996b7f09bee1b"},
+ {file = "yarl-1.9.2-cp37-cp37m-win_amd64.whl", hash = "sha256:6a5883464143ab3ae9ba68daae8e7c5c95b969462bbe42e2464d60e7e2698368"},
+ {file = "yarl-1.9.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5610f80cf43b6202e2c33ba3ec2ee0a2884f8f423c8f4f62906731d876ef4fac"},
+ {file = "yarl-1.9.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b9a4e67ad7b646cd6f0938c7ebfd60e481b7410f574c560e455e938d2da8e0f4"},
+ {file = "yarl-1.9.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:83fcc480d7549ccebe9415d96d9263e2d4226798c37ebd18c930fce43dfb9574"},
+ {file = "yarl-1.9.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5fcd436ea16fee7d4207c045b1e340020e58a2597301cfbcfdbe5abd2356c2fb"},
+ {file = "yarl-1.9.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:84e0b1599334b1e1478db01b756e55937d4614f8654311eb26012091be109d59"},
+ {file = "yarl-1.9.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3458a24e4ea3fd8930e934c129b676c27452e4ebda80fbe47b56d8c6c7a63a9e"},
+ {file = "yarl-1.9.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:838162460b3a08987546e881a2bfa573960bb559dfa739e7800ceeec92e64417"},
+ {file = "yarl-1.9.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f4e2d08f07a3d7d3e12549052eb5ad3eab1c349c53ac51c209a0e5991bbada78"},
+ {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:de119f56f3c5f0e2fb4dee508531a32b069a5f2c6e827b272d1e0ff5ac040333"},
+ {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:149ddea5abf329752ea5051b61bd6c1d979e13fbf122d3a1f9f0c8be6cb6f63c"},
+ {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:674ca19cbee4a82c9f54e0d1eee28116e63bc6fd1e96c43031d11cbab8b2afd5"},
+ {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:9b3152f2f5677b997ae6c804b73da05a39daa6a9e85a512e0e6823d81cdad7cc"},
+ {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5415d5a4b080dc9612b1b63cba008db84e908b95848369aa1da3686ae27b6d2b"},
+ {file = "yarl-1.9.2-cp38-cp38-win32.whl", hash = "sha256:f7a3d8146575e08c29ed1cd287068e6d02f1c7bdff8970db96683b9591b86ee7"},
+ {file = "yarl-1.9.2-cp38-cp38-win_amd64.whl", hash = "sha256:63c48f6cef34e6319a74c727376e95626f84ea091f92c0250a98e53e62c77c72"},
+ {file = "yarl-1.9.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:75df5ef94c3fdc393c6b19d80e6ef1ecc9ae2f4263c09cacb178d871c02a5ba9"},
+ {file = "yarl-1.9.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c027a6e96ef77d401d8d5a5c8d6bc478e8042f1e448272e8d9752cb0aff8b5c8"},
+ {file = "yarl-1.9.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f3b078dbe227f79be488ffcfc7a9edb3409d018e0952cf13f15fd6512847f3f7"},
+ {file = "yarl-1.9.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:59723a029760079b7d991a401386390c4be5bfec1e7dd83e25a6a0881859e716"},
+ {file = "yarl-1.9.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b03917871bf859a81ccb180c9a2e6c1e04d2f6a51d953e6a5cdd70c93d4e5a2a"},
+ {file = "yarl-1.9.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c1012fa63eb6c032f3ce5d2171c267992ae0c00b9e164efe4d73db818465fac3"},
+ {file = "yarl-1.9.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a74dcbfe780e62f4b5a062714576f16c2f3493a0394e555ab141bf0d746bb955"},
+ {file = "yarl-1.9.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8c56986609b057b4839968ba901944af91b8e92f1725d1a2d77cbac6972b9ed1"},
+ {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:2c315df3293cd521033533d242d15eab26583360b58f7ee5d9565f15fee1bef4"},
+ {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:b7232f8dfbd225d57340e441d8caf8652a6acd06b389ea2d3222b8bc89cbfca6"},
+ {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:53338749febd28935d55b41bf0bcc79d634881195a39f6b2f767870b72514caf"},
+ {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:066c163aec9d3d073dc9ffe5dd3ad05069bcb03fcaab8d221290ba99f9f69ee3"},
+ {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8288d7cd28f8119b07dd49b7230d6b4562f9b61ee9a4ab02221060d21136be80"},
+ {file = "yarl-1.9.2-cp39-cp39-win32.whl", hash = "sha256:b124e2a6d223b65ba8768d5706d103280914d61f5cae3afbc50fc3dfcc016623"},
+ {file = "yarl-1.9.2-cp39-cp39-win_amd64.whl", hash = "sha256:61016e7d582bc46a5378ffdd02cd0314fb8ba52f40f9cf4d9a5e7dbef88dee18"},
+ {file = "yarl-1.9.2.tar.gz", hash = "sha256:04ab9d4b9f587c06d801c2abfe9317b77cdf996c65a90d5e84ecc45010823571"},
+]
+
+[package.dependencies]
+idna = ">=2.0"
+multidict = ">=4.0"
+typing-extensions = {version = ">=3.7.4", markers = "python_version < \"3.8\""}
+
+[[package]]
+name = "zipp"
+version = "3.15.0"
+description = "Backport of pathlib-compatible object wrapper for zip files"
+category = "main"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "zipp-3.15.0-py3-none-any.whl", hash = "sha256:48904fc76a60e542af151aded95726c1a5c34ed43ab4134b597665c86d7ad556"},
+ {file = "zipp-3.15.0.tar.gz", hash = "sha256:112929ad649da941c23de50f356a2b5570c954b65150642bccdd66bf194d224b"},
+]
+
+[package.extras]
+docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
+testing = ["big-O", "flake8 (<5)", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"]
+
+[metadata]
+lock-version = "2.0"
+python-versions = "^3.7"
+content-hash = "16ce34509fd2c4b4bedee9c8fc289f077bbe3057e95e7a2736ce71d994499444"
diff --git a/manager/pyproject.toml b/manager/pyproject.toml
new file mode 100644
index 00000000..3c7ff7ff
--- /dev/null
+++ b/manager/pyproject.toml
@@ -0,0 +1,180 @@
+[tool.poetry]
+name = "knot-resolver-manager"
+version = "0.1.0"
+description = "A central management tool for multiple instances of Knot Resolver"
+authors = [
+ "Václav Šraier <vaclav.sraier@nic.cz>",
+ "Aleš Mrázek <ales.mrazek@nic.cz>"
+]
+
+# See currently open issue about building C extensions here:
+# https://github.com/python-poetry/poetry/issues/2740
+[tool.poetry.build]
+script = "build.py"
+generate-setup-file = true
+
+[tool.poetry.dependencies]
+python = "^3.7"
+aiohttp = "*"
+jinja2 = "*"
+pyyaml = "*"
+supervisor = "*"
+typing-extensions = "*"
+prometheus-client = "*"
+
+[tool.poetry.group.dev.dependencies]
+poetry = "^1.4.2"
+pyparsing = "^3.0.9"
+poethepoet = "^0.18.1"
+debugpy = "^1.6.7"
+
+[tool.poetry.group.test.dependencies]
+pytest = "^7.3.1"
+pytest-cov = "^4.0.0"
+pytest-asyncio = "^0.21.0"
+tox = "^4.5.1"
+tox-pyenv = "^1.1.0"
+
+[tool.poetry.group.lint.dependencies]
+black = "^23.3.0"
+isort = "^4.3.21"
+toml = "^0.10.2"
+mypy = "^1.3.0"
+types-pyyaml = "^6.0.12.10"
+pylint = "^2.13.9"
+flake8 = {version = "*", python = "^3.8.1"}
+
+[tool.poetry.group.docs.dependencies]
+sphinx = "^5.3.0"
+
+[tool.poetry.scripts]
+kresctl = 'knot_resolver_manager.cli.main:main'
+knot-resolver = 'knot_resolver_manager.__main__:run'
+
+[tool.poe.tasks]
+run = { cmd = "scripts/run", help = "Run the manager" }
+run-debug = { cmd = "scripts/run-debug", help = "Run the manager under debugger" }
+docs = { cmd = "scripts/docs", help = "Create HTML documentation" }
+test = { shell = "env PYTHONPATH=. pytest --junitxml=unit.junit.xml --cov=knot_resolver_manager --show-capture=all tests/unit/", help = "Run tests" }
+check = { cmd = "scripts/codecheck", help = "Run static code analysis" }
+format = { shell = "black knot_resolver_manager/ tests/ scripts/ build.py; isort -rc .", help = "Run code formatter" }
+fixdeps = { shell = "poetry install; npm install; npm update", help = "Install/update dependencies according to configuration files"}
+commit = { shell = "scripts/commit", help = "Invoke every single check before commiting" }
+container = { cmd = "scripts/container.py", help = "Manage containers" }
+kresctl = { script = "knot_resolver_manager.cli.main:main", help="run kresctl" }
+clean = """
+ rm -rf .coverage
+ .mypy_cache
+ .pytest_cache
+ ./**/__pycache__
+ dist
+ .tox
+"""
+gen-setuppy = { shell = "python scripts/create_setup.py > setup.py", help = "Generate setup.py file for backwards compatibility" }
+tox = { cmd = "tox", help = "Run tests in tox" }
+integration = {cmd = "python tests/integration/runner.py", help = "Run integration tests" }
+configure-vscode = {cmd = "scripts/configure-vscode", help = "Create VSCode configuration for debugging, virtual envs etc" }
+man = {cmd = "scripts/man", help = "Display manpage from sources" }
+
+[tool.black]
+line-length = 120
+target_version = ['py311']
+include = '\.py$'
+exclude = "setup.py" # Poetry generates it and we want to keep it unchanged
+
+[tool.isort]
+line_length=120 # corresponds to -w flag
+profile = "black"
+multi_line_output=3 # corresponds to -m flag
+include_trailing_comma=true # corresponds to -tc flag
+skip_glob = '^((?!py$).)*$' # isort all Python files
+float_to_top=true
+skip = "setup.py" # Poetry generates it and we want to keep it unchanged
+
+[tool.tox]
+legacy_tox_ini = """
+[tox]
+isolated_build = True
+envlist = py37, py38, py39, py10, py11
+
+[tox:.package]
+# note tox will use the same python version as under what tox is installed to package
+# so unless this is python 3 you can require a given python version for the packaging
+# environment via the basepython key
+basepython = python3
+
+[testenv]
+deps = poetry
+commands =
+ poetry install -v
+ ./poe test
+"""
+
+[tool.pylint."MESSAGES CONTROL"]
+disable= [
+ "broad-except",
+ "fixme",
+ "global-statement",
+ "invalid-name",
+ "line-too-long", # checked by flake8
+ "missing-docstring",
+ "no-else-return",
+ "no-self-use",
+ "raise-missing-from",
+ "too-few-public-methods",
+ "unused-import", # checked by flake8,
+ "bad-continuation", # conflicts with black
+ "consider-using-in", # pyright can't see through in expressions,
+ "too-many-return-statements", # would prevent us from using recursive tree traversals
+ "logging-fstring-interpolation", # see https://github.com/PyCQA/pylint/issues/1788
+ "no-else-raise", # not helpful for readability, when we want explicit branches
+ "raising-bad-type", # handled by type checker
+ "too-many-arguments", # sure, but how can we change the signatures to take less arguments? artificially create objects with arguments? That's stupid...
+ "no-member", # checked by pyright
+ "import-error", # checked by pyright (and pylint does not do it properly)
+ "unsupported-delete-operation", # checked by pyright
+ "unsubscriptable-object", # checked by pyright
+ "unsupported-membership-test", # checked by pyright
+ "invalid-overridden-method", # hopefully checked by type checkers
+]
+
+[tool.pylint.SIMILARITIES]
+min-similarity-lines = "6"
+ignore-comments = "yes"
+ignore-docstrings = "yes"
+ignore-imports = "yes"
+
+[tool.pylint.DESIGN]
+max-parents = "10"
+
+[tool.pyright]
+include = [
+ "knot_resolver_manager",
+ "tests"
+]
+exclude = []
+typeCheckingMode = "strict"
+
+[tool.mypy]
+python_version = "3.7"
+# strict = true
+disallow_any_generics = true
+disallow_subclassing_any = true
+disallow_untyped_calls = false
+disallow_untyped_decorators = true
+pretty = true
+show_error_codes = true
+allow_redefinition = true
+disallow_untyped_defs = false
+strict_equality = true
+disallow_incomplete_defs = true
+check_untyped_defs = true
+implicit_reexport = false
+no_implicit_optional = true
+
+[build-system]
+requires = [
+ "poetry-core>=1.0.0",
+ "setuptools>=67.8.0"
+]
+build-backend = "poetry.core.masonry.api"
diff --git a/manager/scripts/_env.sh b/manager/scripts/_env.sh
new file mode 100644
index 00000000..b1941edf
--- /dev/null
+++ b/manager/scripts/_env.sh
@@ -0,0 +1,52 @@
+# fail on errors
+set -o errexit
+
+# define color codes
+red="\033[0;31m"
+yellow="\033[0;33m"
+green="\033[0;32m"
+bright_black="\033[0;90m"
+blue="\033[0;34m"
+reset="\033[0m"
+
+# ensure consistent top level directory
+gitroot="$(git rev-parse --show-toplevel)"
+if test -z "$gitroot"; then
+ echo -e "${red}This command can be run only in a git repository tree.${reset}"
+ exit 1
+fi
+cd $gitroot/manager
+
+# ensure consistent environment with virtualenv
+if test -z "$VIRTUAL_ENV" -a "$CI" != "true" -a -z "$KNOT_ENV"; then
+ echo -e "${yellow}You are NOT running the script within the project's virtual environment.${reset}"
+ echo -e "Do you want to continue regardless? [yN]"
+ read cont
+ if test "$cont" != "y" -a "$cont" != "Y"; then
+ echo -e "${red}Exiting early...${reset}"
+ exit 1
+ fi
+fi
+
+# update PATH with node_modules
+PATH="$PATH:$gitroot/node_modules/.bin"
+
+# fail even on unbound variables
+set -o nounset
+
+
+function build_kresd {
+ echo
+ echo Building Knot Resolver
+ echo ----------------------
+ echo -e "${blue}In case of an compilation error, run this command to try to fix it:${reset}"
+ echo -e "\t${blue}rm -r $(realpath .install_kresd) $(realpath .build_kresd)${reset}"
+ echo
+ pushd ..
+ mkdir -p manager/.build_kresd manager/.install_kresd
+ meson manager/.build_kresd --prefix=$(realpath manager/.install_kresd) --default-library=static --buildtype=debug
+ ninja -C manager/.build_kresd
+ ninja install -C manager/.build_kresd
+ export PATH="$(realpath manager/.install_kresd)/sbin:$PATH"
+ popd
+}
diff --git a/manager/scripts/codecheck b/manager/scripts/codecheck
new file mode 100755
index 00000000..1de47cea
--- /dev/null
+++ b/manager/scripts/codecheck
@@ -0,0 +1,77 @@
+#!/bin/bash
+
+# ensure consistent behaviour
+src_dir="$(dirname "$(realpath "$0")")"
+source $src_dir/_env.sh
+
+aggregate_rv=0
+function check_rv {
+ if test "$1" -eq 0; then
+ echo -e " ${green}OK${reset}"
+ else
+ echo -e " ${red}FAIL${reset}"
+ fi
+ aggregate_rv=$(( $aggregate_rv + $1 ))
+}
+
+# stop failing early, because we wouldn't do anything else than fail
+set +e
+
+# check that all dependencies are installed correctly
+echo -e "${yellow}Checking that all dependencies are properly installed...${reset}"
+poetry install --dry-run --only main,dev,lint | grep "0 install" > /dev/null
+check_rv $?
+echo
+
+# early exit when dependencies are not installed
+if test "$aggregate_rv" -ne "0"; then
+ echo -e "${red}Dependencies are not properly installed. Run this command to fix it:${reset}"
+ echo -e " ${red}poetry install${reset}"
+ exit 1
+fi
+
+# check formatting using black
+echo -e "${yellow}Checking formatting using black...${reset}"
+black knot_resolver_manager tests scripts --check --diff
+check_rv $?
+echo
+
+# check code with pylint
+echo -e "${yellow}Linting using pylint...${reset}"
+pylint knot_resolver_manager
+check_rv $?
+echo
+
+# check code with flake8
+echo -e "${yellow}Linting using flake8...${reset}"
+flake8 knot_resolver_manager
+check_rv $?
+echo
+
+# check types with mypy
+echo -e "${yellow}Type checking using mypy...${reset}"
+mypy knot_resolver_manager
+check_rv $?
+echo
+
+# check that setup.py is not behind pyproject.toml
+echo -e "${yellow}Checking setup.py${reset}"
+python scripts/create_setup.py | diff - setup.py
+check_rv $?
+python setup.py --help > /dev/null
+check_rv $?
+echo
+
+# fancy messages at the end :)
+if test "$aggregate_rv" -eq "0"; then
+ echo -e "${green}Everything looks great!${reset}"
+else
+ echo -e "${red}Failure.${reset}"
+ echo -e "${red}These commands might help you:${reset}"
+ echo -e "${red}\tpoe format${reset}"
+ echo -e "${red}\tpoe gen-setuppy${reset}"
+ echo -e "${red}That's not great. Could you please fix that?${reset} 😲😟"
+fi
+
+# exit with the aggregate return value
+exit $aggregate_rv
diff --git a/manager/scripts/commit b/manager/scripts/commit
new file mode 100755
index 00000000..17973159
--- /dev/null
+++ b/manager/scripts/commit
@@ -0,0 +1,18 @@
+#!/bin/bash
+
+# ensure consistent behaviour
+src_dir="$(dirname "$(realpath "$0")")"
+source $src_dir/_env.sh
+
+
+# run code check
+poe check
+
+# run unit tests
+poe test
+
+# run integration tests
+poe integration
+
+# invoke commit
+git commit $@ \ No newline at end of file
diff --git a/manager/scripts/configure-vscode b/manager/scripts/configure-vscode
new file mode 100755
index 00000000..5032a6ba
--- /dev/null
+++ b/manager/scripts/configure-vscode
@@ -0,0 +1,55 @@
+#!/bin/bash
+
+# ensure consistent behaviour
+src_dir="$(dirname "$(realpath "$0")")"
+source $src_dir/_env.sh
+
+
+echo -e "${yellow}This script will overwrite your existing VSCode configuration in the .vscode directory${reset}"
+echo -e "${red}Should we proceed? [yN]${reset}"
+read confirmation
+if test "$confirmation" = "y" -o "$confirmation" = "Y"; then
+ echo -e "${green}OK, changing your VSCode configuration${reset}"
+else
+ echo -e "${red}Aborting${reset}"
+ exit 1
+fi
+
+
+mkdir -p .vscode
+
+# settings.json
+cat > .vscode/settings.json <<EOF
+{
+ "python.defaultInterpreterPath": "$(poetry env info -p)",
+ "python.venvPath": "~/.cache/pypoetry/virtualenvs"
+}
+EOF
+
+
+# launch.json
+cat > .vscode/launch.json <<EOF
+{
+ // Use IntelliSense to learn about possible attributes.
+ // Hover to view descriptions of existing attributes.
+ // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
+ "version": "0.2.0",
+ "configurations": [
+ {
+ "name": "Python: Remote Attach",
+ "type": "python",
+ "request": "attach",
+ "connect": {
+ "host": "localhost",
+ "port": 5678
+ },
+ "pathMappings": [
+ {
+ "localRoot": "\${workspaceFolder}",
+ "remoteRoot": "."
+ }
+ ]
+ }
+ ]
+}
+EOF \ No newline at end of file
diff --git a/manager/scripts/container.py b/manager/scripts/container.py
new file mode 100755
index 00000000..191fcae2
--- /dev/null
+++ b/manager/scripts/container.py
@@ -0,0 +1,284 @@
+#!/usr/bin/env python
+
+import atexit
+import subprocess
+import sys
+import time
+from os import environ
+from pathlib import Path
+from typing import Dict, List, NoReturn, Optional
+
+import click
+
+
+def _get_git_root() -> Path:
+ result = subprocess.run("git rev-parse --show-toplevel", shell=True, stdout=subprocess.PIPE)
+ return Path(str(result.stdout, encoding="utf8").strip())
+
+
+GIT_ROOT: Path = _get_git_root()
+PODMAN_EXECUTABLE = "/usr/bin/podman"
+CACHE_DIR: Path = GIT_ROOT / ".podman-cache"
+
+
+def _start_detached(image: str, publish: List[int] = [], ro_mounts: Dict[Path, Path] = {}) -> str:
+ """Start a detached container"""
+ options = [f"--publish={port}:{port}/tcp" for port in publish] + [
+ f"--volume={str(src)}:{str(dst)}:O" for src, dst in ro_mounts.items()
+ ]
+ command = ["podman", "run", "--rm", "-d", "--security-opt=seccomp=unconfined", *options, image]
+ proc = subprocess.run(command, shell=False, executable=PODMAN_EXECUTABLE, stdout=subprocess.PIPE)
+ assert proc.returncode == 0
+ return str(proc.stdout, "utf8").strip()
+
+
+def _exec(container_id: str, cmd: List[str]) -> int:
+ command = ["podman", "exec", container_id] + cmd
+ return subprocess.call(command, shell=False, executable=PODMAN_EXECUTABLE)
+
+
+def _exec_interactive(container_id: str, cmd: List[str]) -> int:
+ command = ["podman", "exec", "-ti", container_id] + cmd
+ return subprocess.call(command, shell=False, executable=PODMAN_EXECUTABLE)
+
+
+def _stop(container_id: str):
+ command = ["podman", "stop", container_id]
+ ret = subprocess.call(command, shell=False, executable=PODMAN_EXECUTABLE)
+ assert ret == 0
+
+
+def _list_available_image_tags() -> List[str]:
+ res: List[str] = []
+ for c in (GIT_ROOT / "containers").iterdir():
+ if c.is_dir():
+ res.append(c.name)
+ res.sort() # make the order reproducible
+ return res
+
+
+def _extract_tag_from_name(name: str, all: List[str] = _list_available_image_tags()) -> str:
+ if ":" in name:
+ s = name.split(":")
+ if not s[0].endswith("knot-manager"):
+ click.secho(f"Unexpected image name '{s[0]}', expected 'knot-manager'", fg="red")
+ sys.exit(1)
+ name = s[-1]
+
+ if not name in all:
+ click.secho(f"Unexpected tag '{name}'", fg="red")
+ click.secho(f"Available tags are [{' '.join(all)}]", fg="yellow")
+ sys.exit(1)
+
+ return name
+
+
+def _get_tags_to_work_on(args: List[str]) -> List[str]:
+ args = list(args)
+
+ all = _list_available_image_tags()
+
+ # convert to tags, if the user specified full names
+ for i, a in enumerate(args):
+ args[i] = _extract_tag_from_name(a, all)
+
+ if len(args) == 0:
+ args = all
+
+ return args
+
+
+def _full_name_from_tag(tag: str) -> str:
+ return f"registry.nic.cz/knot/knot-resolver-manager/knot-manager:{tag}"
+
+
+def _build(tag: str):
+ command = [
+ "podman",
+ "build",
+ "--security-opt=seccomp=unconfined",
+ "-f",
+ str(GIT_ROOT / "containers" / tag / "Containerfile"),
+ "-t",
+ _full_name_from_tag(tag),
+ str(GIT_ROOT),
+ ]
+ ret = subprocess.call(command, shell=False, executable=PODMAN_EXECUTABLE)
+ assert ret == 0
+
+
+def _pull(tag: str):
+ command = ["podman", "pull", _full_name_from_tag(tag)]
+ ret = subprocess.call(command, shell=False, executable=PODMAN_EXECUTABLE)
+ assert ret == 0
+
+
+def _push(tag: str):
+ command = ["podman", "push", _full_name_from_tag(tag)]
+ ret = subprocess.call(command, shell=False, executable=PODMAN_EXECUTABLE)
+ assert ret == 0
+
+
+def _login_ci():
+ command = [
+ "podman",
+ "login",
+ "-u",
+ environ["CI_REGISTRY_USER"],
+ "-p",
+ environ["CI_REGISTRY_PASSWORD"],
+ environ["CI_REGISTRY"],
+ ]
+ ret = subprocess.call(command, shell=False, executable=PODMAN_EXECUTABLE)
+ assert ret == 0
+
+
+def _save(tag: str):
+ CACHE_DIR.mkdir(exist_ok=True)
+ command = [
+ "podman",
+ "save",
+ "--format",
+ "oci-archive",
+ "-o",
+ str(CACHE_DIR / (tag + ".tar")),
+ _full_name_from_tag(tag),
+ ]
+ ret = subprocess.call(command, shell=False, executable=PODMAN_EXECUTABLE)
+ assert ret == 0
+
+
+def _load(tag: str):
+ cache_file = CACHE_DIR / (tag + ".tar")
+ if cache_file.exists():
+ command = ["podman", "load", "-i", str(CACHE_DIR / (tag + ".tar"))]
+ ret = subprocess.call(command, shell=False, executable=PODMAN_EXECUTABLE)
+ assert ret == 0
+
+
+@click.group()
+def main():
+ pass
+
+
+@main.command(help="Pull CI built images")
+@click.argument("images", nargs=-1)
+def pull(images: List[str]):
+ tags = _get_tags_to_work_on(images)
+
+ for tag in tags:
+ click.secho(f"Pulling image with tag {tag}", fg="yellow")
+ _pull(tag)
+
+
+@main.command(help="Build project containers")
+@click.argument("images", nargs=-1)
+@click.option("-f", "--fetch", "fetch", is_flag=True, default=False, type=bool, help="Pull before building")
+@click.option("--ci-login", "ci_login", is_flag=True, default=False, type=bool, help="Login to registry in CI")
+@click.option("-p", "--push", "push", is_flag=True, default=False, type=bool, help="Push images after building")
+@click.option("--file-cache", is_flag=True, default=False, help="Try to utilise file cache")
+def build(images: List[str], fetch: bool, ci_login: bool, push: bool, file_cache: bool):
+ tags = _get_tags_to_work_on(images)
+
+ if ci_login:
+ _login_ci()
+
+ for tag in tags:
+ if fetch:
+ click.secho(f"Pulling image with tag {tag}", fg="yellow")
+ _pull(tag)
+
+ if file_cache:
+ _load(tag)
+
+ click.secho(f"Building image with tag {tag}", fg="yellow")
+ _build(tag)
+
+ if push:
+ click.secho(f"Pushing image with {tag}", fg="yellow")
+ _push(tag)
+
+ if file_cache:
+ _save(tag)
+
+
+@main.command(help="Run project containers")
+@click.argument("image", nargs=1)
+@click.argument("command", nargs=-1)
+@click.option("-p", "--publish", "publish", multiple=True, type=int, help="Port which should be published")
+@click.option(
+ "-m",
+ "--mount",
+ "mount",
+ multiple=True,
+ nargs=1,
+ type=str,
+ help="Read-only bind mounts into the container, value /path/on/host:/path/in/container",
+)
+@click.option(
+ "-c",
+ "--code",
+ "mount_code",
+ default=False,
+ is_flag=True,
+ type=bool,
+ help="Shortcut to mount gitroot into /code",
+)
+@click.option(
+ "-i",
+ "--interactive",
+ "interactive_inspection",
+ default=False,
+ is_flag=True,
+ type=bool,
+ help="Drop into interactive shell if the command fails",
+)
+def run(
+ image: str,
+ command: List[str],
+ publish: Optional[List[int]],
+ mount: Optional[List[str]],
+ mount_code: bool,
+ interactive_inspection: bool,
+) -> NoReturn:
+ # make sure arguments have the correct type
+ tag = _extract_tag_from_name(image)
+ command = list(command)
+ publishI = [] if publish is None else [int(p) for p in publish]
+ mountI = [] if mount is None else [x.split(":") for x in mount]
+ mount_path = {Path(x[0]).absolute(): Path(x[1]).absolute() for x in mountI}
+ for src_path in mount_path:
+ if not src_path.exists():
+ print(
+ f'The specified path "{str(src_path)}" does not exist on the host system',
+ file=sys.stderr,
+ )
+ exit(1)
+ if mount_code:
+ mount_path[GIT_ROOT] = Path("/code")
+
+ cont = _start_detached(_full_name_from_tag(tag), publish=publishI, ro_mounts=mount_path)
+
+ # register cleanup function
+ def cleanup():
+ _stop(cont)
+
+ atexit.register(cleanup)
+
+ # wait for the container to boot properly
+ time.sleep(0.5)
+ # run the command
+ exit_code = _exec_interactive(cont, command)
+
+ if interactive_inspection and exit_code != 0:
+ print(f"The command {command} failed with exit code {exit_code}.")
+ print("Dropping into an interactive shell as requested. Stop the shell to stop the whole container.")
+ print("-----------------------------")
+ _exec_interactive(cont, ["/bin/bash"])
+
+ # the container should be stopped by the `atexit` module
+ sys.exit(exit_code)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/manager/scripts/create_setup.py b/manager/scripts/create_setup.py
new file mode 100644
index 00000000..087ce3b0
--- /dev/null
+++ b/manager/scripts/create_setup.py
@@ -0,0 +1,66 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+#
+# Original source:
+# https://github.com/sdss/flicamera/blob/main/create_setup.py
+# We modified the script so that it outputs the setup.py to stdout and that no
+# version upper bounds are outputted in the depencency list.
+#
+# @Author: José Sánchez-Gallego (gallegoj@uw.edu)
+# @Date: 2019-12-18
+# @Filename: create_setup.py
+# @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause)
+
+# This is a temporary solution for the fact that pip install . fails with
+# poetry when there is no setup.py and an extension needs to be compiled.
+# See https://github.com/python-poetry/poetry/issues/1516. Running this
+# script creates a setup.py filled out with information generated by
+# poetry when parsing the pyproject.toml.
+
+import os
+import re
+import sys
+
+from packaging.version import Version
+
+# If there is a global installation of poetry, prefer that.
+lib = os.path.expanduser("~/.poetry/lib")
+vendors = os.path.join(lib, "poetry", "_vendor")
+current_vendors = os.path.join(vendors, "py{}".format(".".join(str(v) for v in sys.version_info[:2])))
+
+sys.path.insert(0, lib)
+sys.path.insert(0, current_vendors)
+
+try:
+ try:
+ from poetry.core.factory import Factory
+ from poetry.core.masonry.builders.sdist import SdistBuilder
+ except (ImportError, ModuleNotFoundError):
+ from poetry.masonry.builders.sdist import SdistBuilder
+ from poetry.factory import Factory
+ from poetry.__version__ import __version__
+except (ImportError, ModuleNotFoundError) as ee:
+ raise ImportError(f"install poetry by doing pip install poetry to use this script: {ee}")
+
+
+# Generate a Poetry object that knows about the metadata in pyproject.toml
+factory = Factory()
+poetry = factory.create_poetry(os.path.dirname(__file__))
+
+# Use the SdistBuilder to genrate a blob for setup.py
+if Version(__version__) >= Version("1.1.0b1"):
+ sdist_builder = SdistBuilder(poetry, None)
+else:
+ sdist_builder = SdistBuilder(poetry, None, None)
+
+setuppy_blob: bytes = sdist_builder.build_setup()
+
+
+# patch the result so that it does not contain upper bounds in dependencies
+# (but it should contain them in python version)
+setuppy = setuppy_blob.decode("utf8")
+setuppy, _ = re.subn(r"(\'[^\']+>=[^<>=,\']*),<[^<>=,\']*\'", "\\1'", setuppy)
+
+# output the setup.py script to stdout
+print(setuppy)
+print("\n# This setup.py was autogenerated using Poetry for backward compatibility with setuptools.")
diff --git a/manager/scripts/install.sh b/manager/scripts/install.sh
new file mode 100644
index 00000000..30b08808
--- /dev/null
+++ b/manager/scripts/install.sh
@@ -0,0 +1,11 @@
+#!/bin/bash
+
+# ensure consistent behaviour
+scripts_dir="$(dirname "$(realpath "$0")")"
+
+# change dir to 'manager'
+cd $scripts_dir
+cd ..
+
+echo "building the Manager ..."
+python3 setup.py install
diff --git a/manager/scripts/make-package.sh b/manager/scripts/make-package.sh
new file mode 100644
index 00000000..85549e65
--- /dev/null
+++ b/manager/scripts/make-package.sh
@@ -0,0 +1,71 @@
+#!/bin/bash
+
+set -o errexit
+set -o nounset
+
+function install_pipx {
+ python3 -m pip install --user pipx
+ python3 -m pipx ensurepath
+ export PATH="$PATH:/root/.local/bin" # hack to make binaries installed with pipx work
+}
+
+function pipx {
+ python3 -m pipx ${@}
+}
+
+function init_debian {
+ export DEBIAN_FRONTEND=noninteractive
+
+ # upgrade system to latest
+ apt-get update -qqq
+ apt-get upgrade -y -qqq
+
+ # configure repository with Knot Resolver dependencies
+ apt-get -y -qqq install apt-transport-https lsb-release ca-certificates wget curl gnupg2
+ sh -c 'echo "deb http://download.opensuse.org/repositories/home:/CZ-NIC:/knot-resolver-build/Debian_10/ /" > /etc/apt/sources.list.d/home:CZ-NIC:knot-resolver-build.list'
+ sh -c 'curl -fsSL https://download.opensuse.org/repositories/home:CZ-NIC:knot-resolver-build/Debian_10/Release.key | gpg --dearmor > /etc/apt/trusted.gpg.d/home_CZ-NIC_knot-resolver-build.gpg'
+ apt-get update -qqq
+
+ # apkg
+ apt-get install -y python3-pip meson git python3-venv
+}
+
+function init_fedora {
+ # upgrade system to latest and install pip
+ dnf upgrade -y
+ dnf install -y python3-pip
+}
+
+
+# system setup
+if command -v dnf; then
+ init_fedora
+elif command -v apt-get; then
+ init_debian
+else
+ echo "System not supported."
+ exit 1
+fi
+
+# install apkg
+install_pipx
+pipx install apkg
+
+# prepare the repo
+#git clone https://gitlab.nic.cz/knot/knot-resolver
+cd /repo
+git config --global user.email "automated-script"
+git config --global user.name "Automated Script"
+git checkout manager-integration-without-submodule
+git submodule update --init --recursive
+
+# build the package
+apkg system-setup
+apkg build -b
+apkg srcpkg
+
+
+
+
+
+
diff --git a/manager/scripts/man b/manager/scripts/man
new file mode 100755
index 00000000..ba28e414
--- /dev/null
+++ b/manager/scripts/man
@@ -0,0 +1,9 @@
+#!/bin/bash
+
+# ensure consistent behaviour
+src_dir="$(dirname "$(realpath "$0")")"
+source $src_dir/_env.sh
+
+build_kresd
+
+man -l .install_kresd/share/man/man8/$1* \ No newline at end of file
diff --git a/manager/scripts/run b/manager/scripts/run
new file mode 100755
index 00000000..d9c7d28f
--- /dev/null
+++ b/manager/scripts/run
@@ -0,0 +1,29 @@
+#!/bin/bash
+
+# ensure consistent behaviour
+src_dir="$(dirname "$(realpath "$0")")"
+source $src_dir/_env.sh
+
+build_kresd
+
+echo
+echo Building Knot Resolver Manager native extensions
+echo ------------------------------------------------
+poetry build
+# copy native modules from build directory to source directory
+shopt -s globstar
+shopt -s nullglob
+for d in build/lib*; do
+ for f in "$d/"**/*.so; do
+ cp -v "$f" ${f#"$d/"}
+ done
+done
+shopt -u globstar
+shopt -u nullglob
+
+
+echo
+echo Knot Manager API is accessible on http://localhost:5000
+echo -------------------------------------------------------
+
+python3 -m knot_resolver_manager -c etc/knot-resolver/config.dev.yml $@
diff --git a/manager/scripts/run-debug b/manager/scripts/run-debug
new file mode 100755
index 00000000..b48f2359
--- /dev/null
+++ b/manager/scripts/run-debug
@@ -0,0 +1,13 @@
+#!/bin/bash
+
+# ensure consistent behaviour
+src_dir="$(dirname "$(realpath "$0")")"
+source $src_dir/_env.sh
+
+echo The debug server will be listening on port localhost:5678
+echo Use VSCode remote attach feature to connect to the debug server
+echo The manager will start after you connect
+echo API will be running on port 5000
+echo ----------------------------------------
+
+KRES_DEBUG_MANAGER=1 poe run $@ \ No newline at end of file
diff --git a/manager/setup.py b/manager/setup.py
new file mode 100644
index 00000000..673d55a2
--- /dev/null
+++ b/manager/setup.py
@@ -0,0 +1,55 @@
+# -*- coding: utf-8 -*-
+from setuptools import setup
+
+packages = \
+['knot_resolver_manager',
+ 'knot_resolver_manager.cli',
+ 'knot_resolver_manager.cli.cmd',
+ 'knot_resolver_manager.compat',
+ 'knot_resolver_manager.datamodel',
+ 'knot_resolver_manager.datamodel.types',
+ 'knot_resolver_manager.kresd_controller',
+ 'knot_resolver_manager.kresd_controller.supervisord',
+ 'knot_resolver_manager.kresd_controller.supervisord.plugin',
+ 'knot_resolver_manager.utils',
+ 'knot_resolver_manager.utils.modeling']
+
+package_data = \
+{'': ['*'],
+ 'knot_resolver_manager.datamodel': ['templates/*', 'templates/macros/*']}
+
+install_requires = \
+['aiohttp',
+ 'jinja2',
+ 'prometheus-client',
+ 'pyyaml',
+ 'supervisor',
+ 'typing-extensions']
+
+entry_points = \
+{'console_scripts': ['knot-resolver = knot_resolver_manager.__main__:run',
+ 'kresctl = knot_resolver_manager.cli.main:main']}
+
+setup_kwargs = {
+ 'name': 'knot-resolver-manager',
+ 'version': '0.1.0',
+ 'description': 'A central management tool for multiple instances of Knot Resolver',
+ 'long_description': 'None',
+ 'author': 'Václav Šraier',
+ 'author_email': 'vaclav.sraier@nic.cz',
+ 'maintainer': 'None',
+ 'maintainer_email': 'None',
+ 'url': 'None',
+ 'packages': packages,
+ 'package_data': package_data,
+ 'install_requires': install_requires,
+ 'entry_points': entry_points,
+ 'python_requires': '>=3.7,<4.0',
+}
+from build import *
+build(setup_kwargs)
+
+setup(**setup_kwargs)
+
+
+# This setup.py was autogenerated using Poetry for backward compatibility with setuptools.
diff --git a/manager/shell-completion/client.bash b/manager/shell-completion/client.bash
new file mode 100644
index 00000000..b3c19419
--- /dev/null
+++ b/manager/shell-completion/client.bash
@@ -0,0 +1,33 @@
+#/usr/bin/env bash
+
+_kresctl_completion()
+{
+ COMPREPLY=()
+ local cur prev opts
+
+ cur="${COMP_WORDS[COMP_CWORD]}"
+ prev="${COMP_WORDS[COMP_CWORD-1]}"
+
+ # check if there is a word is empty
+ # that means there is a space after last non-empty word
+ if [[ -z "$cur" ]]
+ then
+ # no word to complete, return all posible options
+ opts=$(kresctl completion --bash --space "${COMP_WORDS}")
+ else
+ opts=$(kresctl completion --bash "${COMP_WORDS}")
+ fi
+
+ # if there is no completion from kresctl
+ # auto-complete just directories and files
+ if [[ -z "$opts" ]]
+ then
+ COMPREPLY=($(compgen -d -f "${cur}"))
+ else
+ COMPREPLY=( $(compgen -W "${opts}" ${cur}) )
+ fi
+
+ return 0
+}
+
+complete -o filenames -o dirnames -F _kresctl_completion kresctl
diff --git a/manager/shell-completion/client.fish b/manager/shell-completion/client.fish
new file mode 100644
index 00000000..ec3a0ab7
--- /dev/null
+++ b/manager/shell-completion/client.fish
@@ -0,0 +1,6 @@
+function __kresctl_completion
+ set -l args (commandline -pco)
+ eval command kresctl $args
+end
+
+complete -c kresctl -a '(__kresctl_completion)' -f \ No newline at end of file
diff --git a/manager/shell-completion/meson.build b/manager/shell-completion/meson.build
new file mode 100644
index 00000000..6c35ffe3
--- /dev/null
+++ b/manager/shell-completion/meson.build
@@ -0,0 +1,13 @@
+# CLI comletion for bash-shell
+install_data(
+ sources: 'client.bash',
+ rename: 'kresctl',
+ install_dir: completion_dir / 'bash-completion' / 'completions'
+ )
+
+# CLI completion for fish-shell
+install_data(
+ sources: 'client.fish',
+ rename: 'kresctl.fish',
+ install_dir: completion_dir / 'fish' / 'completions'
+ )
diff --git a/manager/tests/README.md b/manager/tests/README.md
new file mode 100644
index 00000000..1b6fc185
--- /dev/null
+++ b/manager/tests/README.md
@@ -0,0 +1,9 @@
+# Testing infrastructure
+
+## Unit tests
+
+The unit tests use `pytest` and can be invoked by the command `poe test`. They reside in the `unit` subdirectory. They can be run from freshly cloned repository and they should suceed.
+
+## Integration tests
+
+The integration tests spawn a full manager with `kresd` instances (which it expects to be installed). The tests are implemented by a custom script and they can be invoked by `poe integration` command. \ No newline at end of file
diff --git a/manager/tests/integration/.gitignore b/manager/tests/integration/.gitignore
new file mode 100644
index 00000000..3feccc83
--- /dev/null
+++ b/manager/tests/integration/.gitignore
@@ -0,0 +1,2 @@
+cache/
+run/ \ No newline at end of file
diff --git a/manager/tests/integration/config.yml b/manager/tests/integration/config.yml
new file mode 100644
index 00000000..b05f18b3
--- /dev/null
+++ b/manager/tests/integration/config.yml
@@ -0,0 +1,13 @@
+network:
+ listen:
+ - interface: 127.0.0.1@5353
+server:
+ id: integration-test
+ workers: 1
+ rundir: tests/integration/run
+ management:
+ interface: 127.0.0.1@5001
+cache:
+ storage: cache
+logging:
+ level: debug \ No newline at end of file
diff --git a/manager/tests/integration/runner.py b/manager/tests/integration/runner.py
new file mode 100644
index 00000000..f4ea0f26
--- /dev/null
+++ b/manager/tests/integration/runner.py
@@ -0,0 +1,96 @@
+import logging
+import sys
+from pathlib import Path
+from typing import Callable
+
+from knot_resolver_manager.client import KnotManagerClient, count_running_kresds, start_manager_in_background
+
+PORT = 5001
+HOST = "localhost"
+BASE_URL = f"http://{HOST}:{PORT}"
+
+
+Test = Callable[[KnotManagerClient], None]
+
+
+logger = logging.getLogger(__name__)
+
+
+def test_wrapper(test: Test) -> bool:
+ p = start_manager_in_background(Path("tests/integration/config.yml"))
+ client = KnotManagerClient(BASE_URL)
+ client.wait_for_initialization()
+
+ logger.info("Starting test %s", test.__name__)
+ try:
+ test(client)
+ res = True
+ except AssertionError:
+ logger.error("Test %s failed", exc_info=True)
+ res = False
+
+ try:
+ client.stop()
+ p.join()
+ except Exception:
+ logger.warn("Failed to stop manager gracefully, terminating by force...")
+ p.terminate()
+ p.join()
+
+ return res
+
+
+def worker_count(client: KnotManagerClient):
+ client.set_num_workers(2)
+ cnt = count_running_kresds()
+ assert cnt == 2, f"Expected 2 kresd instances, found {cnt}"
+
+ client.set_num_workers(1)
+ cnt = count_running_kresds()
+ assert cnt == 1, f"Expected 1 kresd instance, found {cnt}"
+
+
+def crash_resistance(client: KnotManagerClient):
+ client.set_num_workers(2)
+ cnt = count_running_kresds()
+ assert cnt == 2, f"Expected 2 kresd instances, found {cnt}"
+
+ # kill the server
+ # p.terminate()
+ # p.join()
+
+ # no change in number of workers should be visible
+ cnt = count_running_kresds()
+ assert cnt == 2, f"Expected 2 kresd instances, found {cnt}"
+
+ # start the server again
+ p = start_manager_in_background(Path("test/integration/config.yml"))
+ try:
+ client.wait_for_initialization()
+ except TimeoutError as e:
+ p.terminate()
+ raise e
+
+ # no change in number of workers should be visible
+ cnt = count_running_kresds()
+ assert cnt == 2, f"Expected 2 kresd instances, found {cnt}"
+
+ # however the manager should now react to changes
+ client.set_num_workers(1)
+ cnt = count_running_kresds()
+ assert cnt == 1, f"Expected 1 kresd instance, found {cnt}"
+
+
+if __name__ == "__main__":
+ logging.basicConfig(level=logging.DEBUG)
+
+ # create run directories if it does not exist
+ Path("tests/integration/run").mkdir(exist_ok=True)
+
+ # run the tests
+ success = True
+ success &= test_wrapper(worker_count)
+ # success &= test_wrapper(crash_resistance)
+
+ # exit with proper exitcode
+ sys.exit(int(not success))
diff --git a/manager/tests/packaging/control b/manager/tests/packaging/control
new file mode 100644
index 00000000..75c27093
--- /dev/null
+++ b/manager/tests/packaging/control
@@ -0,0 +1,41 @@
+{# Test that all packages are installed #}
+Tests: dependencies.py
+Tests-Directory: manager/tests/packaging/
+
+
+{# Test that kresctl command exists and is in $PATH #}
+Tests: kresctl.sh
+Tests-Directory: manager/tests/packaging
+
+
+{# Test that knot-resolver command exists and is in $PATH #}
+Tests: knot-resolver.sh
+Tests-Directory: manager/tests/packaging
+
+
+{# Tests that manager can be started with default config and it resolves some domains #}
+Tests: systemd_service.sh
+Tests-Directory: manager/tests/packaging
+Restrictions: needs-root
+{% if distro.match('fedora') -%}
+Depends: knot-utils, jq, curl, procps
+{% elif distro.match('debian') or distro.match('ubuntu') -%}
+Depends: knot-dnsutils, jq, curl, procps
+{% elif distro.match('arch') -%}
+Depends: knot, jq, curl
+{% elif distro.match('rocky', 'centos') -%}
+Depends: knot-utils, jq, curl
+{% elif distro.match('almalinux') -%}
+Depends: knot-utils, jq, curl-minimal, procps
+{% elif distro.match('opensuse') -%}
+Depends: knot-utils, jq, curl
+{% else -%}
+Depends: unsupported-distro-this-package-does-not-exist-and-the-test-should-fail
+{%- endif %}
+
+
+Tests: manpage.sh
+Tests-Directory: manager/tests/packaging
+{% if distro.match('fedora') or distro.match('rocky') or distro.match('opensuse') -%}
+Depends: man
+{%- endif %}
diff --git a/manager/tests/packaging/dependencies.py b/manager/tests/packaging/dependencies.py
new file mode 100755
index 00000000..4a426b6e
--- /dev/null
+++ b/manager/tests/packaging/dependencies.py
@@ -0,0 +1,33 @@
+#!/usr/bin/python3
+
+import importlib
+import importlib.util
+import sys
+from types import ModuleType
+
+import pkg_resources
+
+# replace imports with mocks
+dummy = ModuleType("dummy")
+dummy.__dict__["setup"] = lambda *args, **kwargs: None
+dummy.__dict__["build"] = lambda *args, **kwargs: None
+sys.modules["setuptools"] = dummy
+sys.modules["build"] = dummy
+
+# load install_requires array from setup.py
+spec = importlib.util.spec_from_file_location("setup", sys.argv[1] if len(sys.argv) == 2 else "manager/setup.py")
+mod = importlib.util.module_from_spec(spec)
+spec.loader.exec_module(mod)
+install_requires = mod.install_requires
+
+# strip version codes
+deps = set((x[: x.index(">")].lower() if ">" in x else x.lower() for x in install_requires))
+
+# find out which packages are missing
+installed = {pkg.key for pkg in pkg_resources.working_set}
+missing = deps - installed
+
+# fail if there are some missing
+if len(missing) > 0:
+ print(f"Some required packages are missing: {missing}", file=sys.stderr)
+ exit(1)
diff --git a/manager/tests/packaging/interactive/etag.sh b/manager/tests/packaging/interactive/etag.sh
new file mode 100755
index 00000000..a4c49ed9
--- /dev/null
+++ b/manager/tests/packaging/interactive/etag.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+set -e
+
+socket_opt="--unix-socket /var/run/knot-resolver/manager.sock"
+
+etag="$(curl --silent $socket_opt --fail http://localhost:5000/v1/config -o /dev/null -v 2>&1 | grep ETag | sed 's/< ETag: //;s/\s//')"
+status=$(curl --silent $socket_opt --fail http://localhost:5000/v1/config --header "If-None-Match: $etag" -w "%{http_code}" -o /dev/null)
+
+test "$status" -eq 304
diff --git a/manager/tests/packaging/interactive/metrics.sh b/manager/tests/packaging/interactive/metrics.sh
new file mode 100755
index 00000000..a3e8748f
--- /dev/null
+++ b/manager/tests/packaging/interactive/metrics.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+
+curl --silent --fail --unix-socket /var/run/knot-resolver/manager.sock http://localhost/metrics > /dev/null \ No newline at end of file
diff --git a/manager/tests/packaging/interactive/reload.sh b/manager/tests/packaging/interactive/reload.sh
new file mode 100755
index 00000000..9daa1890
--- /dev/null
+++ b/manager/tests/packaging/interactive/reload.sh
@@ -0,0 +1,5 @@
+#!/bin/bash
+
+set -e
+
+kresctl reload
diff --git a/manager/tests/packaging/interactive/workers.sh b/manager/tests/packaging/interactive/workers.sh
new file mode 100755
index 00000000..cef91b60
--- /dev/null
+++ b/manager/tests/packaging/interactive/workers.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+
+set -e
+
+kresctl config set -p /workers 5
+
+test "$(ps -a -x | grep kresd | grep -v grep | wc -l)" -eq 5
diff --git a/manager/tests/packaging/knot-resolver.sh b/manager/tests/packaging/knot-resolver.sh
new file mode 100755
index 00000000..6aa38bde
--- /dev/null
+++ b/manager/tests/packaging/knot-resolver.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+
+# fail fast
+set -e
+
+# We expect `kresctl` command to exist in $PATH
+command -v knot-resolver > /dev/null
diff --git a/manager/tests/packaging/kresctl.sh b/manager/tests/packaging/kresctl.sh
new file mode 100755
index 00000000..579f1a10
--- /dev/null
+++ b/manager/tests/packaging/kresctl.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+
+# fail fast
+set -e
+
+# We expect `kresctl` command to exist in $PATH
+command -v kresctl > /dev/null
diff --git a/manager/tests/packaging/manpage.sh b/manager/tests/packaging/manpage.sh
new file mode 100755
index 00000000..eeab3e4f
--- /dev/null
+++ b/manager/tests/packaging/manpage.sh
@@ -0,0 +1,7 @@
+#!/bin/sh
+
+set -e
+
+man -w kresd
+man -w kresd.systemd
+man -w kresctl
diff --git a/manager/tests/packaging/systemd_service.sh b/manager/tests/packaging/systemd_service.sh
new file mode 100755
index 00000000..99835eed
--- /dev/null
+++ b/manager/tests/packaging/systemd_service.sh
@@ -0,0 +1,32 @@
+#!/bin/bash
+
+# fail fast
+set -e
+
+# check for root
+if test "$(id -u)" -ne 0; then
+ echo "Must be run as root"
+ exit 1
+fi
+
+# We will be starting a systemd service, but another tests might do the same
+# so this makes sure there is nothing left after we exit
+trap "systemctl stop knot-resolver.service" EXIT
+
+
+if ! systemctl start knot-resolver.service; then
+ echo
+ echo "Failed to start service, here is its status:"
+ systemctl status knot-resolver.service
+
+else
+ # check that the resolvers are actually running
+ kdig @127.0.0.1 nic.cz
+
+ echo "Running interactive tests..."
+ for test in "$(dirname $0)"/interactive/*; do
+ echo "[test] $test"
+ $test
+ done
+fi
+
diff --git a/manager/tests/unit/__init__.py b/manager/tests/unit/__init__.py
new file mode 100644
index 00000000..d3c6280d
--- /dev/null
+++ b/manager/tests/unit/__init__.py
@@ -0,0 +1,5 @@
+from pathlib import Path
+
+from knot_resolver_manager.datamodel.globals import Context, set_global_validation_context
+
+set_global_validation_context(Context(Path("."), False))
diff --git a/manager/tests/unit/datamodel/templates/test_common_macros.py b/manager/tests/unit/datamodel/templates/test_common_macros.py
new file mode 100644
index 00000000..d730fb9d
--- /dev/null
+++ b/manager/tests/unit/datamodel/templates/test_common_macros.py
@@ -0,0 +1,82 @@
+from knot_resolver_manager.datamodel.config_schema import template_from_str
+from knot_resolver_manager.datamodel.forward_schema import ForwardServerSchema
+
+
+def test_boolean():
+ tmpl_str = """{% from 'macros/common_macros.lua.j2' import boolean %}
+{{ boolean(x) }}"""
+
+ tmpl = template_from_str(tmpl_str)
+ assert tmpl.render(x=True) == "true"
+ assert tmpl.render(x=False) == "false"
+
+
+def test_boolean_neg():
+ tmpl_str = """{% from 'macros/common_macros.lua.j2' import boolean %}
+{{ boolean(x,true) }}"""
+
+ tmpl = template_from_str(tmpl_str)
+ assert tmpl.render(x=True) == "false"
+ assert tmpl.render(x=False) == "true"
+
+
+def test_string_table():
+ s = "any string"
+ t = [s, "other string"]
+ tmpl_str = """{% from 'macros/common_macros.lua.j2' import string_table %}
+{{ string_table(x) }}"""
+
+ tmpl = template_from_str(tmpl_str)
+ assert tmpl.render(x=s) == f"'{s}'"
+ assert tmpl.render(x=t) == f"{{'{s}','{t[1]}',}}"
+
+
+def test_str2ip_table():
+ s = "2001:DB8::d0c"
+ t = [s, "192.0.2.1"]
+ tmpl_str = """{% from 'macros/common_macros.lua.j2' import str2ip_table %}
+{{ str2ip_table(x) }}"""
+
+ tmpl = template_from_str(tmpl_str)
+ assert tmpl.render(x=s) == f"kres.str2ip('{s}')"
+ assert tmpl.render(x=t) == f"{{kres.str2ip('{s}'),kres.str2ip('{t[1]}'),}}"
+
+
+def test_qtype_table():
+ s = "AAAA"
+ t = [s, "TXT"]
+ tmpl_str = """{% from 'macros/common_macros.lua.j2' import qtype_table %}
+{{ qtype_table(x) }}"""
+
+ tmpl = template_from_str(tmpl_str)
+ assert tmpl.render(x=s) == f"kres.type.{s}"
+ assert tmpl.render(x=t) == f"{{kres.type.{s},kres.type.{t[1]},}}"
+
+
+def test_servers_table():
+ s = "2001:DB8::d0c"
+ t = [s, "192.0.2.1"]
+ tmpl_str = """{% from 'macros/common_macros.lua.j2' import servers_table %}
+{{ servers_table(x) }}"""
+
+ tmpl = template_from_str(tmpl_str)
+ assert tmpl.render(x=s) == f"'{s}'"
+ assert tmpl.render(x=t) == f"{{'{s}','{t[1]}',}}"
+ assert tmpl.render(x=[{"address": s}, {"address": t[1]}]) == f"{{'{s}','{t[1]}',}}"
+
+
+def test_tls_servers_table():
+ d = ForwardServerSchema(
+ # the ca-file is a dummy, because it's existence is checked
+ {"address": ["2001:DB8::d0c"], "hostname": "res.example.com", "ca-file": "/etc/passwd"}
+ )
+ t = [d, ForwardServerSchema({"address": ["192.0.2.1"], "pin-sha256": "YQ=="})]
+ tmpl_str = """{% from 'macros/common_macros.lua.j2' import tls_servers_table %}
+{{ tls_servers_table(x) }}"""
+
+ tmpl = template_from_str(tmpl_str)
+ assert tmpl.render(x=[d.address, t[1].address]) == f"{{'{d.address}','{t[1].address}',}}"
+ assert (
+ tmpl.render(x=t)
+ == f"{{{{'{d.address}',hostname='{d.hostname}',ca_file='{d.ca_file}',}},{{'{t[1].address}',pin_sha256={{'{t[1].pin_sha256}',}}}},}}"
+ )
diff --git a/manager/tests/unit/datamodel/templates/test_forward_macros.py b/manager/tests/unit/datamodel/templates/test_forward_macros.py
new file mode 100644
index 00000000..5f80df15
--- /dev/null
+++ b/manager/tests/unit/datamodel/templates/test_forward_macros.py
@@ -0,0 +1,27 @@
+from knot_resolver_manager.datamodel.config_schema import template_from_str
+from knot_resolver_manager.datamodel.forward_schema import ForwardSchema
+from knot_resolver_manager.datamodel.types import IPAddressOptionalPort
+
+
+def test_policy_rule_forward_add():
+ tmpl_str = """{% from 'macros/forward_macros.lua.j2' import policy_rule_forward_add %}
+{{ policy_rule_forward_add(rule) }}"""
+
+ rule = ForwardSchema(
+ {
+ "subtree": ".",
+ "servers": [{"address": ["2001:148f:fffe::1", "185.43.135.1"], "hostname": "odvr.nic.cz"}],
+ "options": {
+ "authoritative": False,
+ "dnssec": True,
+ },
+ }
+ )
+ result = "policy.rule_forward_add('.',{dnssec=true,auth=false},{{'2001:148f:fffe::1',tls=false,hostname='odvr.nic.cz',},{'185.43.135.1',tls=false,hostname='odvr.nic.cz',},})"
+
+ tmpl = template_from_str(tmpl_str)
+ assert tmpl.render(rule=rule) == result
+
+ rule.servers = [IPAddressOptionalPort("2001:148f:fffe::1"), IPAddressOptionalPort("185.43.135.1")]
+ result = "policy.rule_forward_add('.',{dnssec=true,auth=false},{{'2001:148f:fffe::1'},{'185.43.135.1'},})"
+ assert tmpl.render(rule=rule) == result
diff --git a/manager/tests/unit/datamodel/templates/test_network_macros.py b/manager/tests/unit/datamodel/templates/test_network_macros.py
new file mode 100644
index 00000000..ad193d98
--- /dev/null
+++ b/manager/tests/unit/datamodel/templates/test_network_macros.py
@@ -0,0 +1,35 @@
+from knot_resolver_manager.datamodel.config_schema import template_from_str
+from knot_resolver_manager.datamodel.network_schema import ListenSchema
+
+
+def test_network_listen():
+ tmpl_str = """{% from 'macros/network_macros.lua.j2' import network_listen %}
+{{ network_listen(listen) }}"""
+ tmpl = template_from_str(tmpl_str)
+
+ soc = ListenSchema({"unix-socket": "/tmp/kresd-socket", "kind": "dot"})
+ assert tmpl.render(listen=soc) == "net.listen('/tmp/kresd-socket',nil,{kind='tls',freebind=false})\n"
+ soc_list = ListenSchema({"unix-socket": [soc.unix_socket.to_std()[0], "/tmp/kresd-socket2"], "kind": "dot"})
+ assert (
+ tmpl.render(listen=soc_list)
+ == "net.listen('/tmp/kresd-socket',nil,{kind='tls',freebind=false})\n"
+ + "net.listen('/tmp/kresd-socket2',nil,{kind='tls',freebind=false})\n"
+ )
+
+ ip = ListenSchema({"interface": "::1@55", "freebind": True})
+ assert tmpl.render(listen=ip) == "net.listen('::1',55,{kind='dns',freebind=true})\n"
+ ip_list = ListenSchema({"interface": [ip.interface.to_std()[0], "127.0.0.1@5353"]})
+ assert (
+ tmpl.render(listen=ip_list)
+ == "net.listen('::1',55,{kind='dns',freebind=false})\n"
+ + "net.listen('127.0.0.1',5353,{kind='dns',freebind=false})\n"
+ )
+
+ intrfc = ListenSchema({"interface": "eth0", "kind": "doh2"})
+ assert tmpl.render(listen=intrfc) == "net.listen(net.eth0,443,{kind='doh2',freebind=false})\n"
+ intrfc_list = ListenSchema({"interface": [intrfc.interface.to_std()[0], "lo"], "port": 5555, "kind": "doh2"})
+ assert (
+ tmpl.render(listen=intrfc_list)
+ == "net.listen(net.eth0,5555,{kind='doh2',freebind=false})\n"
+ + "net.listen(net.lo,5555,{kind='doh2',freebind=false})\n"
+ )
diff --git a/manager/tests/unit/datamodel/templates/test_policy_macros.py b/manager/tests/unit/datamodel/templates/test_policy_macros.py
new file mode 100644
index 00000000..2920a206
--- /dev/null
+++ b/manager/tests/unit/datamodel/templates/test_policy_macros.py
@@ -0,0 +1,132 @@
+from typing import List
+
+from knot_resolver_manager.datamodel.config_schema import template_from_str
+from knot_resolver_manager.datamodel.network_schema import AddressRenumberingSchema
+from knot_resolver_manager.datamodel.policy_schema import AnswerSchema
+from knot_resolver_manager.datamodel.types import PolicyFlagEnum
+
+
+def test_policy_add():
+ rule = "policy.all(policy.DENY)"
+ tmpl_str = """{% from 'macros/policy_macros.lua.j2' import policy_add %}
+{{ policy_add(rule, postrule) }}"""
+
+ tmpl = template_from_str(tmpl_str)
+ assert tmpl.render(rule=rule, postrule=False) == f"policy.add({rule})"
+ assert tmpl.render(rule=rule, postrule=True) == f"policy.add({rule},true)"
+
+
+def test_policy_tags_assign():
+ tags: List[str] = ["t01", "t02", "t03"]
+ tmpl_str = """{% from 'macros/policy_macros.lua.j2' import policy_tags_assign %}
+{{ policy_tags_assign(tags) }}"""
+
+ tmpl = template_from_str(tmpl_str)
+ assert tmpl.render(tags=tags[1]) == f"policy.TAGS_ASSIGN('{tags[1]}')"
+ assert tmpl.render(tags=tags) == "policy.TAGS_ASSIGN({" + ",".join([f"'{x}'" for x in tags]) + ",})"
+
+
+def test_policy_get_tagset():
+ tags: List[str] = ["t01", "t02", "t03"]
+ tmpl_str = """{% from 'macros/policy_macros.lua.j2' import policy_get_tagset %}
+{{ policy_get_tagset(tags) }}"""
+
+ tmpl = template_from_str(tmpl_str)
+ assert tmpl.render(tags=tags[1]) == f"policy.get_tagset('{tags[1]}')"
+ assert tmpl.render(tags=tags) == "policy.get_tagset({" + ",".join([f"'{x}'" for x in tags]) + ",})"
+
+
+# Filters
+
+
+def test_policy_all():
+ action = "policy.DENY"
+ tmpl_str = """{% from 'macros/policy_macros.lua.j2' import policy_all %}
+{{ policy_all(action) }}"""
+
+ tmpl = template_from_str(tmpl_str)
+ assert tmpl.render(action=action) == f"policy.all({action})"
+
+
+def test_policy_suffix():
+ action = "policy.DROP"
+ suffix = "policy.todnames({'example.com'})"
+ tmpl_str = """{% from 'macros/policy_macros.lua.j2' import policy_suffix %}
+{{ policy_suffix(action, suffix) }}"""
+
+ tmpl = template_from_str(tmpl_str)
+ assert tmpl.render(action=action, suffix=suffix) == f"policy.suffix({action},{suffix})"
+
+
+def test_policy_suffix_common():
+ action = "policy.DROP"
+ suffix = "policy.todnames({'first.example.com','second.example.com'})"
+ common = "policy.todnames({'example.com'})"
+ tmpl_str = """{% from 'macros/policy_macros.lua.j2' import policy_suffix_common %}
+{{ policy_suffix_common(action, suffix, common) }}"""
+
+ tmpl = template_from_str(tmpl_str)
+ assert tmpl.render(action=action, suffix=suffix) == f"policy.suffix_common({action},{suffix})"
+ assert (
+ tmpl.render(action=action, suffix=suffix, common=common) == f"policy.suffix_common({action},{suffix},{common})"
+ )
+
+
+def test_policy_pattern():
+ action = "policy.DENY"
+ pattern = "[0-9]+\2cz"
+ tmpl_str = """{% from 'macros/policy_macros.lua.j2' import policy_pattern %}
+{{ policy_pattern(action, pattern) }}"""
+
+ tmpl = template_from_str(tmpl_str)
+ assert tmpl.render(action=action, pattern=pattern) == f"policy.pattern({action},'{pattern}')"
+
+
+def test_policy_rpz():
+ action = "policy.DENY"
+ path = "/etc/knot-resolver/blocklist.rpz"
+ tmpl_str = """{% from 'macros/policy_macros.lua.j2' import policy_rpz %}
+{{ policy_rpz(action, path, watch) }}"""
+
+ tmpl = template_from_str(tmpl_str)
+ assert tmpl.render(action=action, path=path) == f"policy.rpz({action},'{path}',false)"
+ assert tmpl.render(action=action, path=path, watch=True) == f"policy.rpz({action},'{path}',true)"
+
+
+# Non-chain actions
+
+
+def test_policy_deny_msg():
+ msg = "this is deny message"
+ tmpl_str = """{% from 'macros/policy_macros.lua.j2' import policy_deny_msg %}
+{{ policy_deny_msg(msg) }}"""
+
+ tmpl = template_from_str(tmpl_str)
+ assert tmpl.render(msg=msg) == f"policy.DENY_MSG('{msg}')"
+
+
+def test_policy_reroute():
+ r: List[AddressRenumberingSchema] = [
+ AddressRenumberingSchema({"source": "192.0.2.0/24", "destination": "127.0.0.0"}),
+ AddressRenumberingSchema({"source": "10.10.10.0/24", "destination": "192.168.1.0"}),
+ ]
+ tmpl_str = """{% from 'macros/policy_macros.lua.j2' import policy_reroute %}
+{{ policy_reroute(reroute) }}"""
+
+ tmpl = template_from_str(tmpl_str)
+ assert (
+ tmpl.render(reroute=r)
+ == f"policy.REROUTE({{['{r[0].source}']='{r[0].destination}'}},{{['{r[1].source}']='{r[1].destination}'}},)"
+ )
+
+
+def test_policy_answer():
+ ans = AnswerSchema({"rtype": "AAAA", "rdata": "192.0.2.7"})
+ tmpl_str = """{% from 'macros/policy_macros.lua.j2' import policy_answer %}
+{{ policy_answer(ans) }}"""
+
+ tmpl = template_from_str(tmpl_str)
+ assert (
+ tmpl.render(ans=ans)
+ == f"policy.ANSWER({{[kres.type.{ans.rtype}]={{rdata=kres.str2ip('{ans.rdata}'),ttl={ans.ttl.seconds()}}}}},{str(ans.nodata).lower()})"
+ )
diff --git a/manager/tests/unit/datamodel/templates/test_view_macros.py b/manager/tests/unit/datamodel/templates/test_view_macros.py
new file mode 100644
index 00000000..3a3f35f9
--- /dev/null
+++ b/manager/tests/unit/datamodel/templates/test_view_macros.py
@@ -0,0 +1,53 @@
+from typing import Any
+
+import pytest
+
+from knot_resolver_manager.datamodel.config_schema import template_from_str
+from knot_resolver_manager.datamodel.view_schema import ViewOptionsSchema, ViewSchema
+
+
+def test_view_insert_action():
+ subnet = "10.0.0.0/8"
+ action = "policy.DENY"
+ tmpl_str = """{% from 'macros/view_macros.lua.j2' import view_insert_action %}
+{{ view_insert_action(subnet, action) }}"""
+
+ tmpl = template_from_str(tmpl_str)
+ assert tmpl.render(subnet=subnet, action=action) == f"assert(C.kr_view_insert_action('{ subnet }',{ action })==0)"
+
+
+def test_view_flags():
+ tmpl_str = """{% from 'macros/view_macros.lua.j2' import view_flags %}
+{{ view_flags(options) }}"""
+
+ tmpl = template_from_str(tmpl_str)
+ options = ViewOptionsSchema({"dns64": False, "minimize": False})
+ assert tmpl.render(options=options) == '"NO_MINIMIZE","DNS64_DISABLE",'
+ assert tmpl.render(options=ViewOptionsSchema()) == ""
+
+
+def test_view_answer():
+ tmpl_str = """{% from 'macros/view_macros.lua.j2' import view_options_flags %}
+{{ view_options_flags(options) }}"""
+
+ tmpl = template_from_str(tmpl_str)
+ options = ViewOptionsSchema({"dns64": False, "minimize": False})
+ assert tmpl.render(options=options) == "policy.FLAGS({'NO_MINIMIZE','DNS64_DISABLE',})"
+ assert tmpl.render(options=ViewOptionsSchema()) == "policy.FLAGS({})"
+
+
+@pytest.mark.parametrize(
+ "val,res",
+ [
+ ("allow", "policy.TAGS_ASSIGN({})"),
+ ("refused", "'policy.REFUSE'"),
+ ("noanswer", "'policy.NO_ANSWER'"),
+ ],
+)
+def test_view_answer(val: Any, res: Any):
+ tmpl_str = """{% from 'macros/view_macros.lua.j2' import view_answer %}
+{{ view_answer(view.answer) }}"""
+
+ tmpl = template_from_str(tmpl_str)
+ view = ViewSchema({"subnets": ["10.0.0.0/8"], "answer": val})
+ assert tmpl.render(view=view) == res
diff --git a/manager/tests/unit/datamodel/test_config_schema.py b/manager/tests/unit/datamodel/test_config_schema.py
new file mode 100644
index 00000000..31703b96
--- /dev/null
+++ b/manager/tests/unit/datamodel/test_config_schema.py
@@ -0,0 +1,54 @@
+import json
+from typing import Any, Dict, cast
+
+from knot_resolver_manager.datamodel import KresConfig
+
+
+def test_config_defaults():
+ config = KresConfig()
+
+ # DNS64 default
+ assert config.dns64 == False
+
+
+def test_dnssec_false():
+ config = KresConfig({"dnssec": False})
+
+ assert config.dnssec == False
+
+
+def test_dnssec_default_true():
+ config = KresConfig()
+
+ # DNSSEC defaults
+ assert config.dnssec.trust_anchor_sentinel == True
+ assert config.dnssec.trust_anchor_signal_query == True
+ assert config.dnssec.time_skew_detection == True
+ assert config.dnssec.refresh_time == None
+ assert config.dnssec.trust_anchors == None
+ assert config.dnssec.negative_trust_anchors == None
+ assert config.dnssec.trust_anchors_files == None
+ assert int(config.dnssec.keep_removed) == 0
+ assert str(config.dnssec.hold_down_time) == "30d"
+
+
+def test_dns64_prefix_default():
+ assert str(KresConfig({"dns64": True}).dns64.prefix) == "64:ff9b::/96"
+
+
+def test_config_json_schema():
+ dct = KresConfig.json_schema()
+
+ def recser(obj: Any, path: str = "") -> None:
+ if not isinstance(obj, dict):
+ return
+ else:
+ obj = cast(Dict[Any, Any], obj)
+ for key in obj:
+ recser(obj[key], path=f"{path}/{key}")
+ try:
+ _ = json.dumps(obj)
+ except BaseException as e:
+ raise Exception(f"failed to serialize '{path}': {e}") from e
+
+ recser(dct)
diff --git a/manager/tests/unit/datamodel/test_local_data.py b/manager/tests/unit/datamodel/test_local_data.py
new file mode 100644
index 00000000..fe529777
--- /dev/null
+++ b/manager/tests/unit/datamodel/test_local_data.py
@@ -0,0 +1,33 @@
+from typing import Any
+
+import pytest
+from pytest import raises
+
+from knot_resolver_manager.datamodel.local_data_schema import LocalDataSchema, SubtreeSchema
+from knot_resolver_manager.utils.modeling.exceptions import DataValidationError
+
+
+@pytest.mark.parametrize(
+ "val",
+ [
+ {"type": "empty", "roots": ["sub2.example.org"]},
+ {"type": "empty", "roots-url": "https://example.org/blocklist.txt", "refresh": "1d"},
+ {"type": "nxdomain", "roots-file": "/etc/hosts"}, # must be an existing file or validation will fail
+ {"type": "redirect", "roots": ["sub4.example.org"], "addresses": ["127.0.0.1", "::1"]},
+ ],
+)
+def test_subtree_valid(val: Any):
+ SubtreeSchema(val)
+
+
+@pytest.mark.parametrize(
+ "val",
+ [
+ {"type": "empty"},
+ {"type": "empty", "roots": ["sub2.example.org"], "roots-url": "https://example.org/blocklist.txt"},
+ {"type": "redirect", "roots": ["sub4.example.org"], "refresh": "1d"},
+ ],
+)
+def test_subtree_invalid(val: Any):
+ with raises(DataValidationError):
+ SubtreeSchema(val)
diff --git a/manager/tests/unit/datamodel/test_lua_schema.py b/manager/tests/unit/datamodel/test_lua_schema.py
new file mode 100644
index 00000000..30d69bd9
--- /dev/null
+++ b/manager/tests/unit/datamodel/test_lua_schema.py
@@ -0,0 +1,9 @@
+from pytest import raises
+
+from knot_resolver_manager.datamodel.lua_schema import LuaSchema
+from knot_resolver_manager.utils.modeling.exceptions import DataValidationError
+
+
+def test_invalid():
+ with raises(DataValidationError):
+ LuaSchema({"script": "-- lua script", "script-file": "path/to/file"})
diff --git a/manager/tests/unit/datamodel/test_management_schema.py b/manager/tests/unit/datamodel/test_management_schema.py
new file mode 100644
index 00000000..870e7208
--- /dev/null
+++ b/manager/tests/unit/datamodel/test_management_schema.py
@@ -0,0 +1,21 @@
+from typing import Any, Dict, Optional
+
+import pytest
+
+from knot_resolver_manager.datamodel.management_schema import ManagementSchema
+from knot_resolver_manager.utils.modeling.exceptions import DataValidationError
+
+
+@pytest.mark.parametrize("val", [{"interface": "::1@53"}, {"unix-socket": "/tmp/socket"}])
+def test_management_valid(val: Dict[str, Any]):
+ o = ManagementSchema(val)
+ if o.interface:
+ assert str(o.interface) == val["interface"]
+ if o.unix_socket:
+ assert str(o.unix_socket) == val["unix-socket"]
+
+
+@pytest.mark.parametrize("val", [None, {"interface": "::1@53", "unix-socket": "/tmp/socket"}])
+def test_management_invalid(val: Optional[Dict[str, Any]]):
+ with pytest.raises(DataValidationError):
+ ManagementSchema(val)
diff --git a/manager/tests/unit/datamodel/test_network_schema.py b/manager/tests/unit/datamodel/test_network_schema.py
new file mode 100644
index 00000000..7b616f34
--- /dev/null
+++ b/manager/tests/unit/datamodel/test_network_schema.py
@@ -0,0 +1,79 @@
+from typing import Any, Dict, Optional
+
+import pytest
+from pytest import raises
+
+from knot_resolver_manager.datamodel.network_schema import ListenSchema, NetworkSchema
+from knot_resolver_manager.datamodel.types import InterfaceOptionalPort, PortNumber
+from knot_resolver_manager.utils.modeling.exceptions import DataValidationError
+
+
+def test_listen_defaults():
+ o = NetworkSchema()
+
+ assert len(o.listen) == 2
+ # {"ip-address": "127.0.0.1"}
+ assert o.listen[0].interface.to_std() == [InterfaceOptionalPort("127.0.0.1")]
+ assert o.listen[0].port == PortNumber(53)
+ assert o.listen[0].kind == "dns"
+ assert o.listen[0].freebind == False
+ # {"ip-address": "::1", "freebind": True}
+ assert o.listen[1].interface.to_std() == [InterfaceOptionalPort("::1")]
+ assert o.listen[1].port == PortNumber(53)
+ assert o.listen[1].kind == "dns"
+ assert o.listen[1].freebind == True
+
+
+@pytest.mark.parametrize(
+ "listen,port",
+ [
+ ({"unix-socket": ["/tmp/kresd-socket"]}, None),
+ ({"interface": ["::1"]}, 53),
+ ({"interface": ["::1"], "kind": "dot"}, 853),
+ ({"interface": ["::1"], "kind": "doh-legacy"}, 443),
+ ({"interface": ["::1"], "kind": "doh2"}, 443),
+ ],
+)
+def test_listen_port_defaults(listen: Dict[str, Any], port: Optional[int]):
+ assert ListenSchema(listen).port == (PortNumber(port) if port else None)
+
+
+@pytest.mark.parametrize(
+ "listen",
+ [
+ {"unix-socket": "/tmp/kresd-socket"},
+ {"unix-socket": ["/tmp/kresd-socket", "/tmp/kresd-socket2"]},
+ {"interface": "::1"},
+ {"interface": "::1@5353"},
+ {"interface": "::1", "port": 5353},
+ {"interface": ["127.0.0.1", "::1"]},
+ {"interface": ["127.0.0.1@5353", "::1@5353"]},
+ {"interface": ["127.0.0.1", "::1"], "port": 5353},
+ {"interface": "lo"},
+ {"interface": "lo@5353"},
+ {"interface": "lo", "port": 5353},
+ {"interface": ["lo", "eth0"]},
+ {"interface": ["lo@5353", "eth0@5353"]},
+ {"interface": ["lo", "eth0"], "port": 5353},
+ ],
+)
+def test_listen_valid(listen: Dict[str, Any]):
+ assert ListenSchema(listen)
+
+
+@pytest.mark.parametrize(
+ "listen",
+ [
+ {"unix-socket": "/tmp/kresd-socket", "port": "53"},
+ {"interface": "::1", "unix-socket": "/tmp/kresd-socket"},
+ {"interface": "::1@5353", "port": 5353},
+ {"interface": ["127.0.0.1", "::1@5353"]},
+ {"interface": ["127.0.0.1@5353", "::1@5353"], "port": 5353},
+ {"interface": "lo@5353", "port": 5353},
+ {"interface": ["lo", "eth0@5353"]},
+ {"interface": ["lo@5353", "eth0@5353"], "port": 5353},
+ ],
+)
+def test_listen_invalid(listen: Dict[str, Any]):
+ with raises(DataValidationError):
+ ListenSchema(listen)
diff --git a/manager/tests/unit/datamodel/test_options_schema.py b/manager/tests/unit/datamodel/test_options_schema.py
new file mode 100644
index 00000000..f6bd5c3e
--- /dev/null
+++ b/manager/tests/unit/datamodel/test_options_schema.py
@@ -0,0 +1,7 @@
+from knot_resolver_manager.datamodel.options_schema import OptionsSchema
+
+
+def test_prediction_true_defaults():
+ o = OptionsSchema({"prediction": True})
+ assert str(o.prediction.window) == "15m"
+ assert int(o.prediction.period) == 24
diff --git a/manager/tests/unit/datamodel/test_policy_schema.py b/manager/tests/unit/datamodel/test_policy_schema.py
new file mode 100644
index 00000000..aeb98a71
--- /dev/null
+++ b/manager/tests/unit/datamodel/test_policy_schema.py
@@ -0,0 +1,89 @@
+from typing import Any, Dict
+
+import pytest
+from pytest import raises
+
+from knot_resolver_manager.datamodel.policy_schema import ActionSchema, PolicySchema
+from knot_resolver_manager.datamodel.types import PolicyActionEnum
+from knot_resolver_manager.utils.modeling.exceptions import DataValidationError
+from knot_resolver_manager.utils.modeling.types import get_generic_type_arguments
+
+noconfig_actions = [
+ "pass",
+ "drop",
+ "refuse",
+ "tc",
+ "debug-always",
+ "debug-cache-miss",
+ "qtrace",
+ "reqtrace",
+]
+configurable_actions = ["deny", "reroute", "answer", "mirror", "forward", "stub"]
+policy_actions = get_generic_type_arguments(PolicyActionEnum)
+
+
+@pytest.mark.parametrize("val", [item for item in policy_actions if item not in configurable_actions])
+def test_policy_action_valid(val: Any):
+ PolicySchema({"action": val})
+ ActionSchema({"action": val})
+
+
+@pytest.mark.parametrize("val", [{"action": "invalid-action"}])
+def test_action_invalid(val: Dict[str, Any]):
+ with raises(DataValidationError):
+ PolicySchema(val)
+ with raises(DataValidationError):
+ ActionSchema(val)
+
+
+@pytest.mark.parametrize(
+ "val",
+ [
+ {"action": "deny", "message": "this is deny message"},
+ {
+ "action": "reroute",
+ "reroute": [
+ {"source": "192.0.2.0/24", "destination": "127.0.0.0"},
+ {"source": "10.10.10.0/24", "destination": "192.168.1.0"},
+ ],
+ },
+ {"action": "answer", "answer": {"rtype": "AAAA", "rdata": "192.0.2.7"}},
+ {"action": "mirror", "servers": ["192.0.2.1@5353", "2001:148f:ffff::1"]},
+ {"action": "forward", "servers": ["192.0.2.1@5353", "2001:148f:ffff::1"]},
+ {"action": "stub", "servers": ["192.0.2.1@5353", "2001:148f:ffff::1"]},
+ {"action": "forward", "servers": [{"address": ["127.0.0.1@5353"]}]},
+ ],
+)
+def test_policy_valid(val: Dict[str, Any]):
+ PolicySchema(val)
+ ActionSchema(val)
+
+
+@pytest.mark.parametrize(
+ "val",
+ [
+ {"action": "reroute"},
+ {"action": "answer"},
+ {"action": "mirror"},
+ {"action": "pass", "reroute": [{"source": "192.0.2.0/24", "destination": "127.0.0.0"}]},
+ {"action": "pass", "answer": {"rtype": "AAAA", "rdata": "::1"}},
+ {"action": "pass", "servers": ["127.0.0.1@5353"]},
+ {"action": "mirror", "servers": [{"address": ["127.0.0.1@5353"]}]},
+ ],
+)
+def test_policy_invalid(val: Dict[str, Any]):
+ with raises(DataValidationError):
+ PolicySchema(val)
+ with raises(DataValidationError):
+ ActionSchema(val)
+
+
+@pytest.mark.parametrize(
+ "val",
+ noconfig_actions,
+)
+def test_policy_message_invalid(val: str):
+ with raises(DataValidationError):
+ PolicySchema({"action": f"{val}", "message": "this is deny message"})
+ with raises(DataValidationError):
+ ActionSchema({"action": f"{val}", "message": "this is deny message"})
diff --git a/manager/tests/unit/datamodel/test_rpz_schema.py b/manager/tests/unit/datamodel/test_rpz_schema.py
new file mode 100644
index 00000000..6603deed
--- /dev/null
+++ b/manager/tests/unit/datamodel/test_rpz_schema.py
@@ -0,0 +1,23 @@
+import pytest
+from pytest import raises
+
+from knot_resolver_manager.datamodel.rpz_schema import RPZSchema
+from knot_resolver_manager.utils.modeling.exceptions import DataValidationError
+
+
+@pytest.mark.parametrize(
+ "val",
+ [
+ "pass",
+ "drop",
+ "refuse",
+ "tc",
+ "debug-always",
+ "debug-cache-miss",
+ "qtrace",
+ "reqtrace",
+ ],
+)
+def test_message_invalid(val: str):
+ with raises(DataValidationError):
+ RPZSchema({"action": f"{val}", "file": "whitelist.rpz", "message": "this is deny message"})
diff --git a/manager/tests/unit/datamodel/types/test_base_types.py b/manager/tests/unit/datamodel/types/test_base_types.py
new file mode 100644
index 00000000..acc8baf3
--- /dev/null
+++ b/manager/tests/unit/datamodel/types/test_base_types.py
@@ -0,0 +1,38 @@
+import random
+import sys
+from typing import List, Optional
+
+import pytest
+from pytest import raises
+
+from knot_resolver_manager.datamodel.types.base_types import IntRangeBase
+from knot_resolver_manager.exceptions import KresManagerException
+
+
+@pytest.mark.parametrize("min,max", [(0, None), (None, 0), (1, 65535), (-65535, -1)])
+def test_int_range_base(min: Optional[int], max: Optional[int]):
+ class Test(IntRangeBase):
+ if min:
+ _min = min
+ if max:
+ _max = max
+
+ if min:
+ assert int(Test(min)) == min
+ if max:
+ assert int(Test(max)) == max
+
+ rmin = min if min else -sys.maxsize - 1
+ rmax = max if max else sys.maxsize
+
+ n = 100
+ vals: List[int] = [random.randint(rmin, rmax) for _ in range(n)]
+ assert [str(Test(val)) == f"{val}" for val in vals]
+
+ invals: List[int] = []
+ invals.extend([random.randint(rmax + 1, sys.maxsize) for _ in range(n % 2)] if max else [])
+ invals.extend([random.randint(-sys.maxsize - 1, rmin - 1) for _ in range(n % 2)] if max else [])
+
+ for inval in invals:
+ with raises(KresManagerException):
+ Test(inval)
diff --git a/manager/tests/unit/datamodel/types/test_custom_types.py b/manager/tests/unit/datamodel/types/test_custom_types.py
new file mode 100644
index 00000000..b9d6f567
--- /dev/null
+++ b/manager/tests/unit/datamodel/types/test_custom_types.py
@@ -0,0 +1,252 @@
+import ipaddress
+import random
+import string
+from typing import Any
+
+import pytest
+from pytest import raises
+
+from knot_resolver_manager.datamodel.types import (
+ Dir,
+ DomainName,
+ InterfaceName,
+ InterfaceOptionalPort,
+ InterfacePort,
+ IPAddressOptionalPort,
+ IPAddressPort,
+ IPNetwork,
+ IPv4Address,
+ IPv6Address,
+ IPv6Network96,
+ PortNumber,
+ SizeUnit,
+ TimeUnit,
+)
+from knot_resolver_manager.utils.modeling import BaseSchema
+
+
+def _rand_domain(label_chars: int, levels: int = 1) -> str:
+ return "".join(
+ ["".join(random.choices(string.ascii_letters + string.digits, k=label_chars)) + "." for i in range(levels)]
+ )
+
+
+@pytest.mark.parametrize("val", [1, 65_535, 5353, 5000])
+def test_port_number_valid(val: int):
+ assert int(PortNumber(val)) == val
+
+
+@pytest.mark.parametrize("val", [0, 65_636, -1, "53"])
+def test_port_number_invalid(val: Any):
+ with raises(ValueError):
+ PortNumber(val)
+
+
+@pytest.mark.parametrize("val", ["5368709120B", "5242880K", "5120M", "5G"])
+def test_size_unit_valid(val: str):
+ o = SizeUnit(val)
+ assert int(o) == 5368709120
+ assert str(o) == val
+ assert o.bytes() == 5368709120
+
+
+@pytest.mark.parametrize("val", ["-5B", 5, -5242880, "45745mB"])
+def test_size_unit_invalid(val: Any):
+ with raises(ValueError):
+ SizeUnit(val)
+
+
+@pytest.mark.parametrize("val", ["1d", "24h", "1440m", "86400s", "86400000ms", "86400000000us"])
+def test_time_unit_valid(val: str):
+ o = TimeUnit(val)
+ assert int(o) == 86400000000
+ assert str(o) == val
+ assert o.seconds() == 86400
+ assert o.millis() == 86400000
+ assert o.micros() == 86400000000
+
+
+@pytest.mark.parametrize("val", ["-1", "-24h", "1440mm", 6575, -1440])
+def test_time_unit_invalid(val: Any):
+ with raises(ValueError):
+ TimeUnit("-1")
+
+
+def test_parsing_units():
+ class TestSchema(BaseSchema):
+ size: SizeUnit
+ time: TimeUnit
+
+ o = TestSchema({"size": "3K", "time": "10m"})
+ assert o.size == SizeUnit("3072B")
+ assert o.time == TimeUnit("600s")
+ assert o.size.bytes() == 3072
+ assert o.time.seconds() == 10 * 60
+
+
+def test_checked_path():
+ class TestSchema(BaseSchema):
+ p: Dir
+
+ assert str(TestSchema({"p": "/tmp"}).p) == "/tmp"
+
+
+@pytest.mark.parametrize(
+ "val",
+ [
+ ".",
+ "example.com",
+ "this.is.example.com.",
+ "test.example.com",
+ "test-example.com",
+ "bücher.com.",
+ "příklad.cz",
+ _rand_domain(63),
+ _rand_domain(1, 127),
+ ],
+)
+def test_domain_name_valid(val: str):
+ o = DomainName(val)
+ assert str(o) == val
+ assert o == DomainName(val)
+ assert o.punycode() == val.encode("idna").decode("utf-8") if val != "." else "."
+
+
+@pytest.mark.parametrize(
+ "val",
+ [
+ "test.example..com.",
+ "-example.com",
+ "test-.example.net",
+ ".example.net",
+ _rand_domain(64),
+ _rand_domain(1, 128),
+ ],
+)
+def test_domain_name_invalid(val: str):
+ with raises(ValueError):
+ DomainName(val)
+
+
+@pytest.mark.parametrize("val", ["lo", "eth0", "wlo1", "web_ifgrp", "e8-2"])
+def test_interface_name_valid(val: str):
+ assert str(InterfaceName(val)) == val
+
+
+@pytest.mark.parametrize("val", ["_lo", "-wlo1", "lo_", "wlo1-", "e8--2", "web__ifgrp"])
+def test_interface_name_invalid(val: Any):
+ with raises(ValueError):
+ InterfaceName(val)
+
+
+@pytest.mark.parametrize("val", ["lo@5353", "2001:db8::1000@5001"])
+def test_interface_port_valid(val: str):
+ o = InterfacePort(val)
+ assert str(o) == val
+ assert o == InterfacePort(val)
+ assert str(o.if_name if o.if_name else o.addr) == val.split("@", 1)[0]
+ assert o.port == PortNumber(int(val.split("@", 1)[1]))
+
+
+@pytest.mark.parametrize("val", ["lo", "2001:db8::1000", "53"])
+def test_interface_port_invalid(val: Any):
+ with raises(ValueError):
+ InterfacePort(val)
+
+
+@pytest.mark.parametrize("val", ["lo", "123.4.5.6", "lo@5353", "2001:db8::1000@5001"])
+def test_interface_optional_port_valid(val: str):
+ o = InterfaceOptionalPort(val)
+ assert str(o) == val
+ assert o == InterfaceOptionalPort(val)
+ assert str(o.if_name if o.if_name else o.addr) == (val.split("@", 1)[0] if "@" in val else val)
+ assert o.port == (PortNumber(int(val.split("@", 1)[1])) if "@" in val else None)
+
+
+@pytest.mark.parametrize("val", ["lo@", "@53"])
+def test_interface_optional_port_invalid(val: Any):
+ with raises(ValueError):
+ InterfaceOptionalPort(val)
+
+
+@pytest.mark.parametrize("val", ["123.4.5.6@5353", "2001:db8::1000@53"])
+def test_ip_address_port_valid(val: str):
+ o = IPAddressPort(val)
+ assert str(o) == val
+ assert o == IPAddressPort(val)
+ assert str(o.addr) == val.split("@", 1)[0]
+ assert o.port == PortNumber(int(val.split("@", 1)[1]))
+
+
+@pytest.mark.parametrize(
+ "val", ["123.4.5.6", "2001:db8::1000", "123.4.5.6.7@5000", "2001:db8::10000@5001", "123.4.5.6@"]
+)
+def test_ip_address_port_invalid(val: Any):
+ with raises(ValueError):
+ IPAddressPort(val)
+
+
+@pytest.mark.parametrize("val", ["123.4.5.6", "123.4.5.6@5353", "2001:db8::1000", "2001:db8::1000@53"])
+def test_ip_address_optional_port_valid(val: str):
+ o = IPAddressOptionalPort(val)
+ assert str(o) == val
+ assert o == IPAddressOptionalPort(val)
+ assert str(o.addr) == (val.split("@", 1)[0] if "@" in val else val)
+ assert o.port == (PortNumber(int(val.split("@", 1)[1])) if "@" in val else None)
+
+
+@pytest.mark.parametrize("val", ["123.4.5.6.7", "2001:db8::10000", "123.4.5.6@", "@55"])
+def test_ip_address_optional_port_invalid(val: Any):
+ with raises(ValueError):
+ IPAddressOptionalPort(val)
+
+
+@pytest.mark.parametrize("val", ["123.4.5.6", "192.168.0.1"])
+def test_ipv4_address_valid(val: str):
+ o = IPv4Address(val)
+ assert str(o) == val
+ assert o == IPv4Address(val)
+
+
+@pytest.mark.parametrize("val", ["123456", "2001:db8::1000"])
+def test_ipv4_address_invalid(val: Any):
+ with raises(ValueError):
+ IPv4Address(val)
+
+
+@pytest.mark.parametrize("val", ["2001:db8::1000", "2001:db8:85a3::8a2e:370:7334"])
+def test_ipv6_address_valid(val: str):
+ o = IPv6Address(val)
+ assert str(o) == val
+ assert o == IPv6Address(val)
+
+
+@pytest.mark.parametrize("val", ["123.4.5.6", "2001::db8::1000"])
+def test_ipv6_address_invalid(val: Any):
+ with raises(ValueError):
+ IPv6Address(val)
+
+
+@pytest.mark.parametrize("val", ["10.11.12.0/24", "64:ff9b::/96"])
+def test_ip_network_valid(val: str):
+ o = IPNetwork(val)
+ assert str(o) == val
+ assert o.to_std().prefixlen == int(val.split("/", 1)[1])
+ assert o.to_std() == ipaddress.ip_network(val)
+
+
+@pytest.mark.parametrize("val", ["10.11.12.13/8", "10.11.12.5/128"])
+def test_ip_network_invalid(val: str):
+ with raises(ValueError):
+ IPNetwork(val)
+
+
+@pytest.mark.parametrize("val", ["fe80::/96", "64:ff9b::/96"])
+def test_ipv6_96_network_valid(val: str):
+ assert str(IPv6Network96(val)) == val
+
+
+@pytest.mark.parametrize("val", ["fe80::/95", "10.11.12.3/96", "64:ff9b::1/96"])
+def test_ipv6_96_network_invalid(val: Any):
+ with raises(ValueError):
+ IPv6Network96(val)
diff --git a/manager/tests/unit/datamodel/types/test_generic_types.py b/manager/tests/unit/datamodel/types/test_generic_types.py
new file mode 100644
index 00000000..7803ed00
--- /dev/null
+++ b/manager/tests/unit/datamodel/types/test_generic_types.py
@@ -0,0 +1,56 @@
+from typing import Any, List, Optional, Union
+
+import pytest
+from pytest import raises
+
+from knot_resolver_manager.datamodel.types import ListOrItem
+from knot_resolver_manager.utils.modeling import BaseSchema
+from knot_resolver_manager.utils.modeling.exceptions import DataValidationError
+from knot_resolver_manager.utils.modeling.types import get_generic_type_wrapper_argument
+
+
+@pytest.mark.parametrize("val", [str, int])
+def test_list_or_item_inner_type(val: Any):
+ assert get_generic_type_wrapper_argument(ListOrItem[val]) == Union[List[val], val]
+
+
+@pytest.mark.parametrize(
+ "typ,val",
+ [
+ (int, [1, 65_535, 5353, 5000]),
+ (int, 65_535),
+ (str, ["string1", "string2"]),
+ (str, "string1"),
+ ],
+)
+def test_list_or_item_valid(typ: Any, val: Any):
+ class ListOrItemSchema(BaseSchema):
+ test: ListOrItem[typ]
+
+ o = ListOrItemSchema({"test": val})
+ assert o.test.serialize() == val
+ assert o.test.to_std() == val if isinstance(val, list) else [val]
+
+ i = 0
+ for item in o.test:
+ assert item == val[i] if isinstance(val, list) else val
+ i += 1
+
+
+@pytest.mark.parametrize(
+ "typ,val",
+ [
+ (str, [True, False, True, False]),
+ (str, False),
+ (bool, [1, 65_535, 5353, 5000]),
+ (bool, 65_535),
+ (int, "string1"),
+ (int, ["string1", "string2"]),
+ ],
+)
+def test_list_or_item_invalid(typ: Any, val: Any):
+ class ListOrItemSchema(BaseSchema):
+ test: ListOrItem[typ]
+
+ with raises(DataValidationError):
+ ListOrItemSchema({"test": val})
diff --git a/manager/tests/unit/test_config_store.py b/manager/tests/unit/test_config_store.py
new file mode 100644
index 00000000..9b843bb9
--- /dev/null
+++ b/manager/tests/unit/test_config_store.py
@@ -0,0 +1,31 @@
+import pytest
+
+from knot_resolver_manager.config_store import ConfigStore, only_on_real_changes
+from knot_resolver_manager.datamodel.config_schema import KresConfig
+
+
+@pytest.mark.asyncio # type: ignore
+async def test_only_once():
+ count = 0
+
+ @only_on_real_changes(lambda config: config.logging.level)
+ async def change_callback(config: KresConfig) -> None:
+ nonlocal count
+ count += 1
+
+ config = KresConfig()
+ store = ConfigStore(config)
+
+ await store.register_on_change_callback(change_callback)
+ assert count == 1
+
+ config = KresConfig()
+ config.logging.level = "crit"
+ await store.update(config)
+ assert count == 2
+
+ config = KresConfig()
+ config.lua.script_only = True
+ config.lua.script = "meaningless value"
+ await store.update(config)
+ assert count == 2
diff --git a/manager/tests/unit/test_knot_resolver_manager.py b/manager/tests/unit/test_knot_resolver_manager.py
new file mode 100644
index 00000000..ed67e354
--- /dev/null
+++ b/manager/tests/unit/test_knot_resolver_manager.py
@@ -0,0 +1,5 @@
+from knot_resolver_manager import __version__
+
+
+def test_version():
+ assert __version__ == "0.1.0"
diff --git a/manager/tests/unit/utils/modeling/test_base_schema.py b/manager/tests/unit/utils/modeling/test_base_schema.py
new file mode 100644
index 00000000..ca41572d
--- /dev/null
+++ b/manager/tests/unit/utils/modeling/test_base_schema.py
@@ -0,0 +1,205 @@
+from typing import Any, Dict, List, Optional, Tuple, Type, Union
+
+import pytest
+from pytest import raises
+from typing_extensions import Literal
+
+from knot_resolver_manager.utils.modeling import ConfigSchema, parse_json, parse_yaml
+from knot_resolver_manager.utils.modeling.exceptions import DataDescriptionError, DataValidationError
+
+
+class _TestBool(ConfigSchema):
+ v: bool
+
+
+class _TestInt(ConfigSchema):
+ v: int
+
+
+class _TestStr(ConfigSchema):
+ v: str
+
+
+@pytest.mark.parametrize("val,exp", [("false", False), ("true", True), ("False", False), ("True", True)])
+def test_parsing_bool_valid(val: str, exp: bool):
+ assert _TestBool(parse_yaml(f"v: {val}")).v == exp
+
+
+@pytest.mark.parametrize("val", ["0", "1", "5", "'true'", "'false'", "5.5"]) # int, str, float
+def test_parsing_bool_invalid(val: str):
+ with raises(DataValidationError):
+ _TestBool(parse_yaml(f"v: {val}"))
+
+
+@pytest.mark.parametrize("val,exp", [("0", 0), ("5353", 5353), ("-5001", -5001)])
+def test_parsing_int_valid(val: str, exp: int):
+ assert _TestInt(parse_yaml(f"v: {val}")).v == exp
+
+
+@pytest.mark.parametrize("val", ["false", "'5'", "5.5"]) # bool, str, float
+def test_parsing_int_invalid(val: str):
+ with raises(DataValidationError):
+ _TestInt(parse_yaml(f"v: {val}"))
+
+
+# int and float are allowed inputs for string
+@pytest.mark.parametrize("val,exp", [("test", "test"), (65, "65"), (5.5, "5.5")])
+def test_parsing_str_valid(val: Any, exp: str):
+ assert _TestStr(parse_yaml(f"v: {val}")).v == exp
+
+
+def test_parsing_str_invalid():
+ with raises(DataValidationError):
+ _TestStr(parse_yaml("v: false")) # bool
+
+
+@pytest.mark.parametrize("typ,val", [(_TestInt, 5), (_TestBool, False), (_TestStr, "test")])
+def test_parsing_nested(typ: Type[ConfigSchema], val: Any):
+ class UpperSchema(ConfigSchema):
+ l: typ
+
+ yaml = f"""
+l:
+ v: {val}
+"""
+
+ o = UpperSchema(parse_yaml(yaml))
+ assert o.l.v == val
+
+
+def test_parsing_simple_compound_types():
+ class TestSchema(ConfigSchema):
+ l: List[int]
+ d: Dict[str, str]
+ t: Tuple[str, int]
+ o: Optional[int]
+
+ yaml = """
+l:
+ - 1
+ - 2
+ - 3
+ - 4
+ - 5
+d:
+ something: else
+ w: all
+t:
+ - test
+ - 5
+"""
+
+ o = TestSchema(parse_yaml(yaml))
+ assert o.l == [1, 2, 3, 4, 5]
+ assert o.d == {"something": "else", "w": "all"}
+ assert o.t == ("test", 5)
+ assert o.o is None
+
+
+def test_parsing_nested_compound_types():
+ class TestSchema(ConfigSchema):
+ i: int
+ o: Optional[Dict[str, str]]
+
+ yaml1 = "i: 5"
+ yaml2 = f"""
+{yaml1}
+o:
+ key1: str1
+ key2: str2
+ """
+
+ o = TestSchema(parse_yaml(yaml1))
+ assert o.i == 5
+ assert o.o is None
+
+ o = TestSchema(parse_yaml(yaml2))
+ assert o.i == 5
+ assert o.o == {"key1": "str1", "key2": "str2"}
+
+
+def test_dash_conversion():
+ class TestSchema(ConfigSchema):
+ awesome_field: Dict[str, str]
+
+ yaml = """
+awesome-field:
+ awesome-key: awesome-value
+"""
+
+ o = TestSchema(parse_yaml(yaml))
+ assert o.awesome_field["awesome-key"] == "awesome-value"
+
+
+def test_eq():
+ class B(ConfigSchema):
+ a: _TestInt
+ field: str
+
+ b1 = B({"a": {"v": 6}, "field": "val"})
+ b2 = B({"a": {"v": 6}, "field": "val"})
+ b_diff = B({"a": {"v": 7}, "field": "val"})
+
+ assert b1 == b2
+ assert b2 != b_diff
+ assert b1 != b_diff
+ assert b_diff == b_diff
+
+
+def test_docstring_parsing_valid():
+ class NormalDescription(ConfigSchema):
+ """
+ Does nothing special
+ Really
+ """
+
+ desc = NormalDescription.json_schema()
+ assert desc["description"] == "Does nothing special\nReally"
+
+ class FieldsDescription(ConfigSchema):
+ """
+ This is an awesome test class
+ ---
+ field: This field does nothing interesting
+ value: Neither does this
+ """
+
+ field: str
+ value: int
+
+ schema = FieldsDescription.json_schema()
+ assert schema["description"] == "This is an awesome test class"
+ assert schema["properties"]["field"]["description"] == "This field does nothing interesting"
+ assert schema["properties"]["value"]["description"] == "Neither does this"
+
+ class NoDescription(ConfigSchema):
+ nothing: str
+
+ _ = NoDescription.json_schema()
+
+
+def test_docstring_parsing_invalid():
+ class AdditionalItem(ConfigSchema):
+ """
+ This class is wrong
+ ---
+ field: nope
+ nothing: really nothing
+ """
+
+ nothing: str
+
+ with raises(DataDescriptionError):
+ _ = AdditionalItem.json_schema()
+
+ class WrongDescription(ConfigSchema):
+ """
+ This class is wrong
+ ---
+ other: description
+ """
+
+ nothing: str
+
+ with raises(DataDescriptionError):
+ _ = WrongDescription.json_schema()
diff --git a/manager/tests/unit/utils/modeling/test_etag.py b/manager/tests/unit/utils/modeling/test_etag.py
new file mode 100644
index 00000000..25a52369
--- /dev/null
+++ b/manager/tests/unit/utils/modeling/test_etag.py
@@ -0,0 +1,15 @@
+from pyparsing import empty
+
+from knot_resolver_manager.utils.etag import structural_etag
+
+
+def test_etag():
+ empty1 = {}
+ empty2 = {}
+
+ assert structural_etag(empty1) == structural_etag(empty2)
+
+ something1 = {"something": 1}
+ something2 = {"something": 2}
+ assert structural_etag(empty1) != structural_etag(something1)
+ assert structural_etag(something1) != structural_etag(something2)
diff --git a/manager/tests/unit/utils/modeling/test_json_pointer.py b/manager/tests/unit/utils/modeling/test_json_pointer.py
new file mode 100644
index 00000000..532e6d5e
--- /dev/null
+++ b/manager/tests/unit/utils/modeling/test_json_pointer.py
@@ -0,0 +1,72 @@
+from pytest import raises
+
+from knot_resolver_manager.utils.modeling.json_pointer import json_ptr_resolve
+
+# example adopted from https://www.sitepoint.com/json-server-example/
+TEST = {
+ "clients": [
+ {
+ "id": "59761c23b30d971669fb42ff",
+ "isActive": True,
+ "age": 36,
+ "name": "Dunlap Hubbard",
+ "gender": "male",
+ "company": "CEDWARD",
+ "email": "dunlaphubbard@cedward.com",
+ "phone": "+1 (890) 543-2508",
+ "address": "169 Rutledge Street, Konterra, Northern Mariana Islands, 8551",
+ },
+ {
+ "id": "59761c233d8d0f92a6b0570d",
+ "isActive": True,
+ "age": 24,
+ "name": "Kirsten Sellers",
+ "gender": "female",
+ "company": "EMERGENT",
+ "email": "kirstensellers@emergent.com",
+ "phone": "+1 (831) 564-2190",
+ "address": "886 Gallatin Place, Fannett, Arkansas, 4656",
+ },
+ {
+ "id": "59761c23fcb6254b1a06dad5",
+ "isActive": True,
+ "age": 30,
+ "name": "Acosta Robbins",
+ "gender": "male",
+ "company": "ORGANICA",
+ "email": "acostarobbins@organica.com",
+ "phone": "+1 (882) 441-3367",
+ "address": "697 Linden Boulevard, Sattley, Idaho, 1035",
+ },
+ ]
+}
+
+
+def test_json_ptr():
+ parent, res, token = json_ptr_resolve(TEST, "")
+ assert parent is None
+ assert res is TEST
+
+ parent, res, token = json_ptr_resolve(TEST, "/")
+ assert parent is TEST
+ assert res is None
+ assert token == ""
+
+ parent, res, token = json_ptr_resolve(TEST, "/clients/2/gender")
+ assert parent is TEST["clients"][2]
+ assert res == "male"
+ assert token == "gender"
+
+ with raises(ValueError):
+ _ = json_ptr_resolve(TEST, "//")
+
+ with raises(SyntaxError):
+ _ = json_ptr_resolve(TEST, "invalid/ptr")
+
+ with raises(ValueError):
+ _ = json_ptr_resolve(TEST, "/clients/2/gender/invalid")
+
+ parent, res, token = json_ptr_resolve(TEST, "/~01")
+ assert parent is TEST
+ assert res is None
+ assert token == "~1"
diff --git a/manager/tests/unit/utils/modeling/test_query.py b/manager/tests/unit/utils/modeling/test_query.py
new file mode 100644
index 00000000..1a552b87
--- /dev/null
+++ b/manager/tests/unit/utils/modeling/test_query.py
@@ -0,0 +1,18 @@
+from pytest import raises
+
+from knot_resolver_manager.utils.modeling.query import query
+
+
+def test_example_from_spec():
+ # source of the example: https://jsonpatch.com/
+ original = {"baz": "qux", "foo": "bar"}
+ patch = [
+ {"op": "replace", "path": "/baz", "value": "boo"},
+ {"op": "add", "path": "/hello", "value": ["world"]},
+ {"op": "remove", "path": "/foo"},
+ ]
+ expected = {"baz": "boo", "hello": ["world"]}
+
+ result, _ = query(original, "patch", "", patch)
+
+ assert result == expected
diff --git a/manager/tests/unit/utils/modeling/test_renaming.py b/manager/tests/unit/utils/modeling/test_renaming.py
new file mode 100644
index 00000000..1a4ce89e
--- /dev/null
+++ b/manager/tests/unit/utils/modeling/test_renaming.py
@@ -0,0 +1,24 @@
+from knot_resolver_manager.utils.modeling.renaming import renamed
+
+
+def test_all():
+ ref = {
+ "awesome-customers": [{"name": "John", "home-address": "London"}, {"name": "Bob", "home-address": "Prague"}],
+ "storage": {"bobby-pin": 5, "can-opener": 0, "laptop": 1},
+ }
+
+ rnm = renamed(ref)
+ assert rnm["awesome_customers"][0]["home_address"] == "London"
+ assert rnm["awesome_customers"][1:][0]["home_address"] == "Prague"
+ assert set(rnm["storage"].items()) == set((("can_opener", 0), ("bobby_pin", 5), ("laptop", 1)))
+ assert set(rnm["storage"].keys()) == set(("bobby_pin", "can_opener", "laptop"))
+
+
+def test_nested_init():
+ val = renamed(renamed(({"ke-y": "val-ue"})))
+ assert val["ke_y"] == "val-ue"
+
+
+def test_original():
+ obj = renamed(({"ke-y": "val-ue"})).original()
+ assert obj["ke-y"] == "val-ue"
diff --git a/manager/tests/unit/utils/modeling/test_types.py b/manager/tests/unit/utils/modeling/test_types.py
new file mode 100644
index 00000000..281f03a8
--- /dev/null
+++ b/manager/tests/unit/utils/modeling/test_types.py
@@ -0,0 +1,38 @@
+from typing import Any, Dict, List, Tuple, Union
+
+import pytest
+from typing_extensions import Literal
+
+from knot_resolver_manager.utils.modeling import BaseSchema
+from knot_resolver_manager.utils.modeling.types import is_list, is_literal
+
+types = [
+ bool,
+ int,
+ str,
+ Dict[Any, Any],
+ Tuple[Any, Any],
+ Union[str, int],
+ BaseSchema,
+]
+literal_types = [Literal[5], Literal["test"], Literal[False]]
+
+
+@pytest.mark.parametrize("val", types)
+def test_is_list_true(val: Any):
+ assert is_list(List[val])
+
+
+@pytest.mark.parametrize("val", types)
+def test_is_list_false(val: Any):
+ assert not is_list(val)
+
+
+@pytest.mark.parametrize("val", literal_types)
+def test_is_literal_true(val: Any):
+ assert is_literal(Literal[val])
+
+
+@pytest.mark.parametrize("val", types)
+def test_is_literal_false(val: Any):
+ assert not is_literal(val)
diff --git a/manager/tests/unit/utils/test_dataclasses.py b/manager/tests/unit/utils/test_dataclasses.py
new file mode 100644
index 00000000..c402c092
--- /dev/null
+++ b/manager/tests/unit/utils/test_dataclasses.py
@@ -0,0 +1,15 @@
+from knot_resolver_manager.compat.dataclasses import dataclass, is_dataclass
+
+
+def test_dataclass():
+ @dataclass
+ class A:
+ b: int = 5
+
+ val = A(6)
+ assert val.b == 6
+
+ val = A(b=7)
+ assert val.b == 7
+
+ assert is_dataclass(A)
diff --git a/manager/tests/unit/utils/test_functional.py b/manager/tests/unit/utils/test_functional.py
new file mode 100644
index 00000000..041748e4
--- /dev/null
+++ b/manager/tests/unit/utils/test_functional.py
@@ -0,0 +1,22 @@
+from knot_resolver_manager.utils.functional import all_matches, contains_element_matching, foldl
+
+
+def test_foldl():
+ lst = list(range(10))
+
+ assert foldl(lambda x, y: x + y, 0, lst) == sum(range(10))
+ assert foldl(lambda x, y: x + y, 55, lst) == sum(range(10)) + 55
+
+
+def test_containsElementMatching():
+ lst = list(range(10))
+
+ assert contains_element_matching(lambda e: e == 5, lst)
+ assert not contains_element_matching(lambda e: e == 11, lst)
+
+
+def test_matches_all():
+ lst = list(range(10))
+
+ assert all_matches(lambda x: x >= 0, lst)
+ assert not all_matches(lambda x: x % 2 == 0, lst)
diff --git a/manager/typings/READMEmd b/manager/typings/READMEmd
new file mode 100644
index 00000000..2e2c88bb
--- /dev/null
+++ b/manager/typings/READMEmd
@@ -0,0 +1,8 @@
+# Type stubs
+
+See (pyright documentation)[https://github.com/microsoft/pyright/blob/master/docs/type-stubs.md]
+
+Generate stubs using this command (replacing `jinja2` for an appropriate package):
+```
+poetry run yarn pyright --createstub jinja2
+``` \ No newline at end of file
diff --git a/manager/typings/pytest/__init__.pyi b/manager/typings/pytest/__init__.pyi
new file mode 100644
index 00000000..1a485dd4
--- /dev/null
+++ b/manager/typings/pytest/__init__.pyi
@@ -0,0 +1,36 @@
+"""
+This type stub file was generated by pyright.
+"""
+
+from _pytest import __version__
+from _pytest.assertion import register_assert_rewrite
+from _pytest.compat import _setup_collect_fakemodule
+from _pytest.config import ExitCode, UsageError, cmdline, hookimpl, hookspec, main
+from _pytest.debugging import pytestPDB as __pytestPDB
+from _pytest.fixtures import fillfixtures as _fillfuncargs
+from _pytest.fixtures import fixture, yield_fixture
+from _pytest.freeze_support import freeze_includes
+from _pytest.main import Session
+from _pytest.mark import MARK_GEN as mark
+from _pytest.mark import param
+from _pytest.nodes import Collector, File, Item
+from _pytest.outcomes import exit, fail, importorskip, skip, xfail
+from _pytest.python import Class, Function, Instance, Module, Package
+from _pytest.python_api import approx, raises
+from _pytest.recwarn import deprecated_call, warns
+from _pytest.warning_types import (
+ PytestAssertRewriteWarning,
+ PytestCacheWarning,
+ PytestCollectionWarning,
+ PytestConfigWarning,
+ PytestDeprecationWarning,
+ PytestExperimentalApiWarning,
+ PytestUnhandledCoroutineWarning,
+ PytestUnknownMarkWarning,
+ PytestWarning,
+)
+
+"""
+pytest: unit and functional testing with Python.
+"""
+set_trace = ...
diff --git a/manager/typings/pytest/__main__.pyi b/manager/typings/pytest/__main__.pyi
new file mode 100644
index 00000000..de3c14ca
--- /dev/null
+++ b/manager/typings/pytest/__main__.pyi
@@ -0,0 +1,9 @@
+"""
+This type stub file was generated by pyright.
+"""
+
+"""
+pytest entry point
+"""
+if __name__ == "__main__":
+ ...
diff --git a/manager/typings/supervisor/__init__.pyi b/manager/typings/supervisor/__init__.pyi
new file mode 100644
index 00000000..cea7ef96
--- /dev/null
+++ b/manager/typings/supervisor/__init__.pyi
@@ -0,0 +1,3 @@
+"""
+This type stub file was generated by pyright.
+"""
diff --git a/manager/typings/supervisor/childutils.pyi b/manager/typings/supervisor/childutils.pyi
new file mode 100644
index 00000000..7845a5af
--- /dev/null
+++ b/manager/typings/supervisor/childutils.pyi
@@ -0,0 +1,51 @@
+"""
+This type stub file was generated by pyright.
+"""
+
+def getRPCTransport(env): # -> SupervisorTransport:
+ ...
+
+def getRPCInterface(env):
+ ...
+
+def get_headers(line): # -> dict[Unknown, Unknown]:
+ ...
+
+def eventdata(payload): # -> tuple[dict[Unknown, Unknown], Unknown]:
+ ...
+
+def get_asctime(now=...): # -> str:
+ ...
+
+class ProcessCommunicationsProtocol:
+ def send(self, msg, fp=...): # -> None:
+ ...
+
+ def stdout(self, msg): # -> None:
+ ...
+
+ def stderr(self, msg): # -> None:
+ ...
+
+
+
+pcomm = ...
+class EventListenerProtocol:
+ def wait(self, stdin=..., stdout=...): # -> tuple[dict[Unknown, Unknown], Unknown]:
+ ...
+
+ def ready(self, stdout=...): # -> None:
+ ...
+
+ def ok(self, stdout=...): # -> None:
+ ...
+
+ def fail(self, stdout=...): # -> None:
+ ...
+
+ def send(self, data, stdout=...): # -> None:
+ ...
+
+
+
+listener = ...
diff --git a/manager/typings/supervisor/compat.pyi b/manager/typings/supervisor/compat.pyi
new file mode 100644
index 00000000..72fc9a43
--- /dev/null
+++ b/manager/typings/supervisor/compat.pyi
@@ -0,0 +1,39 @@
+"""
+This type stub file was generated by pyright.
+"""
+
+PY2 = ...
+if PY2:
+ long = ...
+ raw_input = ...
+ unicode = ...
+ unichr = ...
+ basestring = ...
+ def as_bytes(s, encoding=...): # -> str:
+ ...
+
+ def as_string(s, encoding=...): # -> unicode:
+ ...
+
+ def is_text_stream(stream): # -> bool:
+ ...
+
+else:
+ long = ...
+ basestring = ...
+ raw_input = ...
+ unichr = ...
+ class unicode(str):
+ def __init__(self, string, encoding, errors) -> None:
+ ...
+
+
+
+ def as_bytes(s, encoding=...): # -> bytes:
+ ...
+
+ def as_string(s, encoding=...): # -> str:
+ ...
+
+ def is_text_stream(stream): # -> bool:
+ ...
diff --git a/manager/typings/supervisor/confecho.pyi b/manager/typings/supervisor/confecho.pyi
new file mode 100644
index 00000000..9cd6e68a
--- /dev/null
+++ b/manager/typings/supervisor/confecho.pyi
@@ -0,0 +1,6 @@
+"""
+This type stub file was generated by pyright.
+"""
+
+def main(out=...): # -> None:
+ ...
diff --git a/manager/typings/supervisor/datatypes.pyi b/manager/typings/supervisor/datatypes.pyi
new file mode 100644
index 00000000..5ef53bc4
--- /dev/null
+++ b/manager/typings/supervisor/datatypes.pyi
@@ -0,0 +1,199 @@
+"""
+This type stub file was generated by pyright.
+"""
+
+def process_or_group_name(name): # -> str:
+ """Ensures that a process or group name is not created with
+ characters that break the eventlistener protocol or web UI URLs"""
+ ...
+
+def integer(value): # -> int:
+ ...
+
+TRUTHY_STRINGS = ...
+FALSY_STRINGS = ...
+def boolean(s): # -> bool:
+ """Convert a string value to a boolean value."""
+ ...
+
+def list_of_strings(arg): # -> list[Unknown]:
+ ...
+
+def list_of_ints(arg): # -> list[int]:
+ ...
+
+def list_of_exitcodes(arg): # -> list[int]:
+ ...
+
+def dict_of_key_value_pairs(arg): # -> dict[Unknown, Unknown]:
+ """ parse KEY=val,KEY2=val2 into {'KEY':'val', 'KEY2':'val2'}
+ Quotes can be used to allow commas in the value
+ """
+ ...
+
+class Automatic:
+ ...
+
+
+class Syslog:
+ """TODO deprecated; remove this special 'syslog' filename in the future"""
+ ...
+
+
+LOGFILE_NONES = ...
+LOGFILE_AUTOS = ...
+LOGFILE_SYSLOGS = ...
+def logfile_name(val): # -> Type[Automatic] | Type[Syslog] | None:
+ ...
+
+class RangeCheckedConversion:
+ """Conversion helper that range checks another conversion."""
+ def __init__(self, conversion, min=..., max=...) -> None:
+ ...
+
+ def __call__(self, value):
+ ...
+
+
+
+port_number = ...
+def inet_address(s): # -> tuple[Unknown | Literal[''], Unknown]:
+ ...
+
+class SocketAddress:
+ def __init__(self, s) -> None:
+ ...
+
+
+
+class SocketConfig:
+ """ Abstract base class which provides a uniform abstraction
+ for TCP vs Unix sockets """
+ url = ...
+ addr = ...
+ backlog = ...
+ def __repr__(self): # -> str:
+ ...
+
+ def __str__(self) -> str:
+ ...
+
+ def __eq__(self, other) -> bool:
+ ...
+
+ def __ne__(self, other) -> bool:
+ ...
+
+ def get_backlog(self): # -> None:
+ ...
+
+ def addr(self):
+ ...
+
+ def create_and_bind(self):
+ ...
+
+
+
+class InetStreamSocketConfig(SocketConfig):
+ """ TCP socket config helper """
+ host = ...
+ port = ...
+ def __init__(self, host, port, **kwargs) -> None:
+ ...
+
+ def addr(self): # -> tuple[Unknown | None, Unknown | None]:
+ ...
+
+ def create_and_bind(self): # -> socket:
+ ...
+
+
+
+class UnixStreamSocketConfig(SocketConfig):
+ """ Unix domain socket config helper """
+ path = ...
+ mode = ...
+ owner = ...
+ sock = ...
+ def __init__(self, path, **kwargs) -> None:
+ ...
+
+ def addr(self): # -> Unknown | None:
+ ...
+
+ def create_and_bind(self): # -> socket:
+ ...
+
+ def get_mode(self): # -> None:
+ ...
+
+ def get_owner(self): # -> None:
+ ...
+
+
+
+def colon_separated_user_group(arg): # -> tuple[int, int]:
+ """ Find a user ID and group ID from a string like 'user:group'. Returns
+ a tuple (uid, gid). If the string only contains a user like 'user'
+ then (uid, -1) will be returned. Raises ValueError if either
+ the user or group can't be resolved to valid IDs on the system. """
+ ...
+
+def name_to_uid(name): # -> int:
+ """ Find a user ID from a string containing a user name or ID.
+ Raises ValueError if the string can't be resolved to a valid
+ user ID on the system. """
+ ...
+
+def name_to_gid(name): # -> int:
+ """ Find a group ID from a string containing a group name or ID.
+ Raises ValueError if the string can't be resolved to a valid
+ group ID on the system. """
+ ...
+
+def gid_for_uid(uid): # -> int:
+ ...
+
+def octal_type(arg): # -> int:
+ ...
+
+def existing_directory(v):
+ ...
+
+def existing_dirpath(v):
+ ...
+
+def logging_level(value): # -> Any:
+ ...
+
+class SuffixMultiplier:
+ def __init__(self, d, default=...) -> None:
+ ...
+
+ def __call__(self, v): # -> int:
+ ...
+
+
+
+byte_size = ...
+def url(value):
+ ...
+
+SIGNUMS = ...
+def signal_number(value): # -> int | Any:
+ ...
+
+class RestartWhenExitUnexpected:
+ ...
+
+
+class RestartUnconditionally:
+ ...
+
+
+def auto_restart(value): # -> Type[RestartUnconditionally] | Type[RestartWhenExitUnexpected] | str | Literal[False]:
+ ...
+
+def profile_options(value): # -> tuple[list[Unknown], bool]:
+ ...
diff --git a/manager/typings/supervisor/dispatchers.pyi b/manager/typings/supervisor/dispatchers.pyi
new file mode 100644
index 00000000..dfeff5c4
--- /dev/null
+++ b/manager/typings/supervisor/dispatchers.pyi
@@ -0,0 +1,158 @@
+"""
+This type stub file was generated by pyright.
+"""
+
+def find_prefix_at_end(haystack, needle): # -> int:
+ ...
+
+class PDispatcher:
+ """ Asyncore dispatcher for mainloop, representing a process channel
+ (stdin, stdout, or stderr). This class is abstract. """
+ closed = ...
+ def __init__(self, process, channel, fd) -> None:
+ ...
+
+ def __repr__(self): # -> str:
+ ...
+
+ def readable(self):
+ ...
+
+ def writable(self):
+ ...
+
+ def handle_read_event(self):
+ ...
+
+ def handle_write_event(self):
+ ...
+
+ def handle_error(self): # -> None:
+ ...
+
+ def close(self): # -> None:
+ ...
+
+ def flush(self): # -> None:
+ ...
+
+
+
+class POutputDispatcher(PDispatcher):
+ """
+ Dispatcher for one channel (stdout or stderr) of one process.
+ Serves several purposes:
+
+ - capture output sent within <!--XSUPERVISOR:BEGIN--> and
+ <!--XSUPERVISOR:END--> tags and signal a ProcessCommunicationEvent
+ by calling notify(event).
+ - route the output to the appropriate log handlers as specified in the
+ config.
+ """
+ childlog = ...
+ normallog = ...
+ capturelog = ...
+ capturemode = ...
+ output_buffer = ...
+ def __init__(self, process, event_type, fd) -> None:
+ """
+ Initialize the dispatcher.
+
+ `event_type` should be one of ProcessLogStdoutEvent or
+ ProcessLogStderrEvent
+ """
+ ...
+
+ def removelogs(self): # -> None:
+ ...
+
+ def reopenlogs(self): # -> None:
+ ...
+
+ def record_output(self): # -> None:
+ ...
+
+ def toggle_capturemode(self): # -> None:
+ ...
+
+ def writable(self): # -> Literal[False]:
+ ...
+
+ def readable(self): # -> bool:
+ ...
+
+ def handle_read_event(self): # -> None:
+ ...
+
+
+
+class PEventListenerDispatcher(PDispatcher):
+ """ An output dispatcher that monitors and changes a process'
+ listener_state """
+ childlog = ...
+ state_buffer = ...
+ READY_FOR_EVENTS_TOKEN = ...
+ RESULT_TOKEN_START = ...
+ READY_FOR_EVENTS_LEN = ...
+ RESULT_TOKEN_START_LEN = ...
+ def __init__(self, process, channel, fd) -> None:
+ ...
+
+ def removelogs(self): # -> None:
+ ...
+
+ def reopenlogs(self): # -> None:
+ ...
+
+ def writable(self): # -> Literal[False]:
+ ...
+
+ def readable(self): # -> bool:
+ ...
+
+ def handle_read_event(self): # -> None:
+ ...
+
+ def handle_listener_state_change(self): # -> None:
+ ...
+
+ def handle_result(self, result): # -> None:
+ ...
+
+
+
+class PInputDispatcher(PDispatcher):
+ """ Input (stdin) dispatcher """
+ def __init__(self, process, channel, fd) -> None:
+ ...
+
+ def writable(self): # -> bool:
+ ...
+
+ def readable(self): # -> Literal[False]:
+ ...
+
+ def flush(self): # -> None:
+ ...
+
+ def handle_write_event(self): # -> None:
+ ...
+
+
+
+ANSI_ESCAPE_BEGIN = ...
+ANSI_TERMINATORS = ...
+def stripEscapes(s): # -> Literal[b'']:
+ """
+ Remove all ANSI color escapes from the given string.
+ """
+ ...
+
+class RejectEvent(Exception):
+ """ The exception type expected by a dispatcher when a handler wants
+ to reject an event """
+ ...
+
+
+def default_handler(event, response): # -> None:
+ ...
diff --git a/manager/typings/supervisor/events.pyi b/manager/typings/supervisor/events.pyi
new file mode 100644
index 00000000..85c66622
--- /dev/null
+++ b/manager/typings/supervisor/events.pyi
@@ -0,0 +1,227 @@
+"""
+This type stub file was generated by pyright.
+"""
+
+callbacks = ...
+def subscribe(type, callback): # -> None:
+ ...
+
+def unsubscribe(type, callback): # -> None:
+ ...
+
+def notify(event): # -> None:
+ ...
+
+def clear(): # -> None:
+ ...
+
+class Event:
+ """ Abstract event type """
+ ...
+
+
+class ProcessLogEvent(Event):
+ """ Abstract """
+ channel = ...
+ def __init__(self, process, pid, data) -> None:
+ ...
+
+ def payload(self): # -> str:
+ ...
+
+
+
+class ProcessLogStdoutEvent(ProcessLogEvent):
+ channel = ...
+
+
+class ProcessLogStderrEvent(ProcessLogEvent):
+ channel = ...
+
+
+class ProcessCommunicationEvent(Event):
+ """ Abstract """
+ BEGIN_TOKEN = ...
+ END_TOKEN = ...
+ def __init__(self, process, pid, data) -> None:
+ ...
+
+ def payload(self): # -> str:
+ ...
+
+
+
+class ProcessCommunicationStdoutEvent(ProcessCommunicationEvent):
+ channel = ...
+
+
+class ProcessCommunicationStderrEvent(ProcessCommunicationEvent):
+ channel = ...
+
+
+class RemoteCommunicationEvent(Event):
+ def __init__(self, type, data) -> None:
+ ...
+
+ def payload(self): # -> str:
+ ...
+
+
+
+class SupervisorStateChangeEvent(Event):
+ """ Abstract class """
+ def payload(self): # -> Literal['']:
+ ...
+
+
+
+class SupervisorRunningEvent(SupervisorStateChangeEvent):
+ ...
+
+
+class SupervisorStoppingEvent(SupervisorStateChangeEvent):
+ ...
+
+
+class EventRejectedEvent:
+ def __init__(self, process, event) -> None:
+ ...
+
+
+
+class ProcessStateEvent(Event):
+ """ Abstract class, never raised directly """
+ frm = ...
+ to = ...
+ def __init__(self, process, from_state, expected=...) -> None:
+ ...
+
+ def payload(self): # -> str:
+ ...
+
+ def get_extra_values(self): # -> list[Unknown]:
+ ...
+
+
+
+class ProcessStateFatalEvent(ProcessStateEvent):
+ ...
+
+
+class ProcessStateUnknownEvent(ProcessStateEvent):
+ ...
+
+
+class ProcessStateStartingOrBackoffEvent(ProcessStateEvent):
+ def get_extra_values(self): # -> list[tuple[Literal['tries'], int]]:
+ ...
+
+
+
+class ProcessStateBackoffEvent(ProcessStateStartingOrBackoffEvent):
+ ...
+
+
+class ProcessStateStartingEvent(ProcessStateStartingOrBackoffEvent):
+ ...
+
+
+class ProcessStateExitedEvent(ProcessStateEvent):
+ def get_extra_values(self): # -> list[tuple[Literal['expected'], int] | tuple[Literal['pid'], Unknown]]:
+ ...
+
+
+
+class ProcessStateRunningEvent(ProcessStateEvent):
+ def get_extra_values(self): # -> list[tuple[Literal['pid'], Unknown]]:
+ ...
+
+
+
+class ProcessStateStoppingEvent(ProcessStateEvent):
+ def get_extra_values(self): # -> list[tuple[Literal['pid'], Unknown]]:
+ ...
+
+
+
+class ProcessStateStoppedEvent(ProcessStateEvent):
+ def get_extra_values(self): # -> list[tuple[Literal['pid'], Unknown]]:
+ ...
+
+
+
+class ProcessGroupEvent(Event):
+ def __init__(self, group) -> None:
+ ...
+
+ def payload(self): # -> str:
+ ...
+
+
+
+class ProcessGroupAddedEvent(ProcessGroupEvent):
+ ...
+
+
+class ProcessGroupRemovedEvent(ProcessGroupEvent):
+ ...
+
+
+class TickEvent(Event):
+ """ Abstract """
+ def __init__(self, when, supervisord) -> None:
+ ...
+
+ def payload(self): # -> str:
+ ...
+
+
+
+class Tick5Event(TickEvent):
+ period = ...
+
+
+class Tick60Event(TickEvent):
+ period = ...
+
+
+class Tick3600Event(TickEvent):
+ period = ...
+
+
+TICK_EVENTS = ...
+class EventTypes:
+ EVENT = Event
+ PROCESS_STATE = ProcessStateEvent
+ PROCESS_STATE_STOPPED = ProcessStateStoppedEvent
+ PROCESS_STATE_EXITED = ProcessStateExitedEvent
+ PROCESS_STATE_STARTING = ProcessStateStartingEvent
+ PROCESS_STATE_STOPPING = ProcessStateStoppingEvent
+ PROCESS_STATE_BACKOFF = ProcessStateBackoffEvent
+ PROCESS_STATE_FATAL = ProcessStateFatalEvent
+ PROCESS_STATE_RUNNING = ProcessStateRunningEvent
+ PROCESS_STATE_UNKNOWN = ProcessStateUnknownEvent
+ PROCESS_COMMUNICATION = ProcessCommunicationEvent
+ PROCESS_COMMUNICATION_STDOUT = ProcessCommunicationStdoutEvent
+ PROCESS_COMMUNICATION_STDERR = ProcessCommunicationStderrEvent
+ PROCESS_LOG = ProcessLogEvent
+ PROCESS_LOG_STDOUT = ProcessLogStdoutEvent
+ PROCESS_LOG_STDERR = ProcessLogStderrEvent
+ REMOTE_COMMUNICATION = RemoteCommunicationEvent
+ SUPERVISOR_STATE_CHANGE = SupervisorStateChangeEvent
+ SUPERVISOR_STATE_CHANGE_RUNNING = SupervisorRunningEvent
+ SUPERVISOR_STATE_CHANGE_STOPPING = SupervisorStoppingEvent
+ TICK = TickEvent
+ TICK_5 = Tick5Event
+ TICK_60 = Tick60Event
+ TICK_3600 = Tick3600Event
+ PROCESS_GROUP = ProcessGroupEvent
+ PROCESS_GROUP_ADDED = ProcessGroupAddedEvent
+ PROCESS_GROUP_REMOVED = ProcessGroupRemovedEvent
+
+
+def getEventNameByType(requested): # -> str | None:
+ ...
+
+def register(name, event): # -> None:
+ ...
diff --git a/manager/typings/supervisor/http.pyi b/manager/typings/supervisor/http.pyi
new file mode 100644
index 00000000..4f0503f3
--- /dev/null
+++ b/manager/typings/supervisor/http.pyi
@@ -0,0 +1,216 @@
+"""
+This type stub file was generated by pyright.
+"""
+
+from supervisor.medusa import http_server
+from supervisor.medusa.auth_handler import auth_handler
+
+class NOT_DONE_YET:
+ ...
+
+
+class deferring_chunked_producer:
+ """A producer that implements the 'chunked' transfer coding for HTTP/1.1.
+ Here is a sample usage:
+ request['Transfer-Encoding'] = 'chunked'
+ request.push (
+ producers.chunked_producer (your_producer)
+ )
+ request.done()
+ """
+ def __init__(self, producer, footers=...) -> None:
+ ...
+
+ def more(self): # -> Type[NOT_DONE_YET] | bytes:
+ ...
+
+
+
+class deferring_composite_producer:
+ """combine a fifo of producers into one"""
+ def __init__(self, producers) -> None:
+ ...
+
+ def more(self): # -> Type[NOT_DONE_YET] | Literal[b'']:
+ ...
+
+
+
+class deferring_globbing_producer:
+ """
+ 'glob' the output from a producer into a particular buffer size.
+ helps reduce the number of calls to send(). [this appears to
+ gain about 30% performance on requests to a single channel]
+ """
+ def __init__(self, producer, buffer_size=...) -> None:
+ ...
+
+ def more(self): # -> Type[NOT_DONE_YET] | bytes:
+ ...
+
+
+
+class deferring_hooked_producer:
+ """
+ A producer that will call <function> when it empties,.
+ with an argument of the number of bytes produced. Useful
+ for logging/instrumentation purposes.
+ """
+ def __init__(self, producer, function) -> None:
+ ...
+
+ def more(self): # -> Type[NOT_DONE_YET] | Literal[b'']:
+ ...
+
+
+
+class deferring_http_request(http_server.http_request):
+ """ The medusa http_request class uses the default set of producers in
+ medusa.producers. We can't use these because they don't know anything
+ about deferred responses, so we override various methods here. This was
+ added to support tail -f like behavior on the logtail handler """
+ def done(self, *arg, **kw): # -> None:
+ """ I didn't want to override this, but there's no way around
+ it in order to support deferreds - CM
+
+ finalize this transaction - send output to the http channel"""
+ ...
+
+ def log(self, bytes): # -> None:
+ """ We need to override this because UNIX domain sockets return
+ an empty string for the addr rather than a (host, port) combination """
+ ...
+
+ def cgi_environment(self): # -> dict[Unknown, Unknown]:
+ ...
+
+ def get_server_url(self): # -> str:
+ """ Functionality that medusa's http request doesn't have; set an
+ attribute named 'server_url' on the request based on the Host: header
+ """
+ ...
+
+
+
+class deferring_http_channel(http_server.http_channel):
+ ac_out_buffer_size = ...
+ delay = ...
+ last_writable_check = ...
+ def writable(self, now=...): # -> bool:
+ ...
+
+ def refill_buffer(self): # -> None:
+ """ Implement deferreds """
+ ...
+
+ def found_terminator(self): # -> None:
+ """ We only override this to use 'deferring_http_request' class
+ instead of the normal http_request class; it sucks to need to override
+ this """
+ ...
+
+
+
+class supervisor_http_server(http_server.http_server):
+ channel_class = deferring_http_channel
+ ip = ...
+ def prebind(self, sock, logger_object): # -> None:
+ """ Override __init__ to do logger setup earlier so it can
+ go to our logger object instead of stdout """
+ ...
+
+ def postbind(self): # -> None:
+ ...
+
+ def log_info(self, message, type=...): # -> None:
+ ...
+
+
+
+class supervisor_af_inet_http_server(supervisor_http_server):
+ """ AF_INET version of supervisor HTTP server """
+ def __init__(self, ip, port, logger_object) -> None:
+ ...
+
+
+
+class supervisor_af_unix_http_server(supervisor_http_server):
+ """ AF_UNIX version of supervisor HTTP server """
+ def __init__(self, socketname, sockchmod, sockchown, logger_object) -> None:
+ ...
+
+ def checkused(self, socketname): # -> bool:
+ ...
+
+
+
+class tail_f_producer:
+ def __init__(self, request, filename, head) -> None:
+ ...
+
+ def __del__(self): # -> None:
+ ...
+
+ def more(self): # -> bytes | Type[NOT_DONE_YET] | Literal['''==> File truncated <==
+''']:
+ ...
+
+
+
+class logtail_handler:
+ IDENT = ...
+ path = ...
+ def __init__(self, supervisord) -> None:
+ ...
+
+ def match(self, request):
+ ...
+
+ def handle_request(self, request): # -> None:
+ ...
+
+
+
+class mainlogtail_handler:
+ IDENT = ...
+ path = ...
+ def __init__(self, supervisord) -> None:
+ ...
+
+ def match(self, request):
+ ...
+
+ def handle_request(self, request): # -> None:
+ ...
+
+
+
+def make_http_servers(options, supervisord): # -> list[Unknown]:
+ ...
+
+class LogWrapper:
+ '''Receives log messages from the Medusa servers and forwards
+ them to the Supervisor logger'''
+ def __init__(self, logger) -> None:
+ ...
+
+ def log(self, msg): # -> None:
+ '''Medusa servers call this method. There is no log level so
+ we have to sniff the message. We want "Server Error" messages
+ from medusa.http_server logged as errors at least.'''
+ ...
+
+
+
+class encrypted_dictionary_authorizer:
+ def __init__(self, dict) -> None:
+ ...
+
+ def authorize(self, auth_info): # -> Literal[False]:
+ ...
+
+
+
+class supervisor_auth_handler(auth_handler):
+ def __init__(self, dict, handler, realm=...) -> None:
+ ...
diff --git a/manager/typings/supervisor/http_client.pyi b/manager/typings/supervisor/http_client.pyi
new file mode 100644
index 00000000..94dcd828
--- /dev/null
+++ b/manager/typings/supervisor/http_client.pyi
@@ -0,0 +1,84 @@
+"""
+This type stub file was generated by pyright.
+"""
+
+from supervisor.medusa import asynchat_25 as asynchat
+
+CR = ...
+LF = ...
+CRLF = ...
+class Listener:
+ def status(self, url, status): # -> None:
+ ...
+
+ def error(self, url, error): # -> None:
+ ...
+
+ def response_header(self, url, name, value): # -> None:
+ ...
+
+ def done(self, url): # -> None:
+ ...
+
+ def feed(self, url, data): # -> None:
+ ...
+
+ def close(self, url): # -> None:
+ ...
+
+
+
+class HTTPHandler(asynchat.async_chat):
+ def __init__(self, listener, username=..., password=..., conn=..., map=...) -> None:
+ ...
+
+ def get(self, serverurl, path=...): # -> None:
+ ...
+
+ def close(self): # -> None:
+ ...
+
+ def header(self, name, value): # -> None:
+ ...
+
+ def handle_error(self): # -> None:
+ ...
+
+ def handle_connect(self): # -> None:
+ ...
+
+ def feed(self, data): # -> None:
+ ...
+
+ def collect_incoming_data(self, bytes): # -> None:
+ ...
+
+ def found_terminator(self): # -> None:
+ ...
+
+ def ignore(self): # -> None:
+ ...
+
+ def status_line(self): # -> tuple[bytes | Unknown, int, bytes | Unknown]:
+ ...
+
+ def headers(self): # -> None:
+ ...
+
+ def response_header(self, name, value): # -> None:
+ ...
+
+ def body(self): # -> None:
+ ...
+
+ def done(self): # -> None:
+ ...
+
+ def chunked_size(self): # -> None:
+ ...
+
+ def chunked_body(self): # -> None:
+ ...
+
+ def trailer(self): # -> None:
+ ...
diff --git a/manager/typings/supervisor/loggers.pyi b/manager/typings/supervisor/loggers.pyi
new file mode 100644
index 00000000..b46f2336
--- /dev/null
+++ b/manager/typings/supervisor/loggers.pyi
@@ -0,0 +1,233 @@
+"""
+This type stub file was generated by pyright.
+"""
+
+"""
+Logger implementation loosely modeled on PEP 282. We don't use the
+PEP 282 logger implementation in the stdlib ('logging') because it's
+idiosyncratic and a bit slow for our purposes (we don't use threads).
+"""
+class LevelsByName:
+ CRIT = ...
+ ERRO = ...
+ WARN = ...
+ INFO = ...
+ DEBG = ...
+ TRAC = ...
+ BLAT = ...
+
+
+class LevelsByDescription:
+ critical = ...
+ error = ...
+ warn = ...
+ info = ...
+ debug = ...
+ trace = ...
+ blather = ...
+
+
+LOG_LEVELS_BY_NUM = ...
+def getLevelNumByDescription(description): # -> Any | None:
+ ...
+
+class Handler:
+ fmt = ...
+ level = ...
+ def __init__(self, stream=...) -> None:
+ ...
+
+ def setFormat(self, fmt): # -> None:
+ ...
+
+ def setLevel(self, level): # -> None:
+ ...
+
+ def flush(self): # -> None:
+ ...
+
+ def close(self): # -> None:
+ ...
+
+ def emit(self, record): # -> None:
+ ...
+
+ def handleError(self): # -> None:
+ ...
+
+
+
+class StreamHandler(Handler):
+ def __init__(self, strm=...) -> None:
+ ...
+
+ def remove(self): # -> None:
+ ...
+
+ def reopen(self): # -> None:
+ ...
+
+
+
+class BoundIO:
+ def __init__(self, maxbytes, buf=...) -> None:
+ ...
+
+ def flush(self): # -> None:
+ ...
+
+ def close(self): # -> None:
+ ...
+
+ def write(self, b): # -> None:
+ ...
+
+ def getvalue(self): # -> Unknown | bytes:
+ ...
+
+ def clear(self): # -> None:
+ ...
+
+
+
+class FileHandler(Handler):
+ """File handler which supports reopening of logs.
+ """
+ def __init__(self, filename, mode=...) -> None:
+ ...
+
+ def reopen(self): # -> None:
+ ...
+
+ def remove(self): # -> None:
+ ...
+
+
+
+class RotatingFileHandler(FileHandler):
+ def __init__(self, filename, mode=..., maxBytes=..., backupCount=...) -> None:
+ """
+ Open the specified file and use it as the stream for logging.
+
+ By default, the file grows indefinitely. You can specify particular
+ values of maxBytes and backupCount to allow the file to rollover at
+ a predetermined size.
+
+ Rollover occurs whenever the current log file is nearly maxBytes in
+ length. If backupCount is >= 1, the system will successively create
+ new files with the same pathname as the base file, but with extensions
+ ".1", ".2" etc. appended to it. For example, with a backupCount of 5
+ and a base file name of "app.log", you would get "app.log",
+ "app.log.1", "app.log.2", ... through to "app.log.5". The file being
+ written to is always "app.log" - when it gets filled up, it is closed
+ and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc.
+ exist, then they are renamed to "app.log.2", "app.log.3" etc.
+ respectively.
+
+ If maxBytes is zero, rollover never occurs.
+ """
+ ...
+
+ def emit(self, record): # -> None:
+ """
+ Emit a record.
+
+ Output the record to the file, catering for rollover as described
+ in doRollover().
+ """
+ ...
+
+ def removeAndRename(self, sfn, dfn): # -> None:
+ ...
+
+ def doRollover(self): # -> None:
+ """
+ Do a rollover, as described in __init__().
+ """
+ ...
+
+
+
+class LogRecord:
+ def __init__(self, level, msg, **kw) -> None:
+ ...
+
+ def asdict(self): # -> dict[str, str | Unknown]:
+ ...
+
+
+
+class Logger:
+ def __init__(self, level=..., handlers=...) -> None:
+ ...
+
+ def close(self): # -> None:
+ ...
+
+ def blather(self, msg, **kw): # -> None:
+ ...
+
+ def trace(self, msg, **kw): # -> None:
+ ...
+
+ def debug(self, msg, **kw): # -> None:
+ ...
+
+ def info(self, msg, **kw): # -> None:
+ ...
+
+ def warn(self, msg, **kw): # -> None:
+ ...
+
+ def error(self, msg, **kw): # -> None:
+ ...
+
+ def critical(self, msg, **kw): # -> None:
+ ...
+
+ def log(self, level, msg, **kw): # -> None:
+ ...
+
+ def addHandler(self, hdlr): # -> None:
+ ...
+
+ def getvalue(self):
+ ...
+
+
+
+class SyslogHandler(Handler):
+ def __init__(self) -> None:
+ ...
+
+ def close(self): # -> None:
+ ...
+
+ def reopen(self): # -> None:
+ ...
+
+ def emit(self, record): # -> None:
+ ...
+
+
+
+def getLogger(level=...): # -> Logger:
+ ...
+
+_2MB = ...
+def handle_boundIO(logger, fmt, maxbytes=...): # -> None:
+ """Attach a new BoundIO handler to an existing Logger"""
+ ...
+
+def handle_stdout(logger, fmt): # -> None:
+ """Attach a new StreamHandler with stdout handler to an existing Logger"""
+ ...
+
+def handle_syslog(logger, fmt): # -> None:
+ """Attach a new Syslog handler to an existing Logger"""
+ ...
+
+def handle_file(logger, filename, fmt, rotating=..., maxbytes=..., backups=...): # -> None:
+ """Attach a new file handler to an existing Logger. If the filename
+ is the magic name of 'syslog' then make it a syslog handler instead."""
+ ...
diff --git a/manager/typings/supervisor/medusa/__init__.pyi b/manager/typings/supervisor/medusa/__init__.pyi
new file mode 100644
index 00000000..2317f9a8
--- /dev/null
+++ b/manager/typings/supervisor/medusa/__init__.pyi
@@ -0,0 +1,7 @@
+"""
+This type stub file was generated by pyright.
+"""
+
+"""medusa.__init__
+"""
+__revision__ = ...
diff --git a/manager/typings/supervisor/medusa/asynchat_25.pyi b/manager/typings/supervisor/medusa/asynchat_25.pyi
new file mode 100644
index 00000000..c269d900
--- /dev/null
+++ b/manager/typings/supervisor/medusa/asynchat_25.pyi
@@ -0,0 +1,117 @@
+"""
+This type stub file was generated by pyright.
+"""
+
+from supervisor.medusa import asyncore_25 as asyncore
+
+r"""A class supporting chat-style (command/response) protocols.
+
+This class adds support for 'chat' style protocols - where one side
+sends a 'command', and the other sends a response (examples would be
+the common internet protocols - smtp, nntp, ftp, etc..).
+
+The handle_read() method looks at the input stream for the current
+'terminator' (usually '\r\n' for single-line responses, '\r\n.\r\n'
+for multi-line output), calling self.found_terminator() on its
+receipt.
+
+for example:
+Say you build an async nntp client using this class. At the start
+of the connection, you'll have self.terminator set to '\r\n', in
+order to process the single-line greeting. Just before issuing a
+'LIST' command you'll set it to '\r\n.\r\n'. The output of the LIST
+command will be accumulated (using your own 'collect_incoming_data'
+method) up to the terminator, and then control will be returned to
+you - by calling your self.found_terminator() method.
+"""
+class async_chat(asyncore.dispatcher):
+ """This is an abstract class. You must derive from this class, and add
+ the two methods collect_incoming_data() and found_terminator()"""
+ ac_in_buffer_size = ...
+ ac_out_buffer_size = ...
+ def __init__(self, conn=..., map=...) -> None:
+ ...
+
+ def collect_incoming_data(self, data):
+ ...
+
+ def found_terminator(self):
+ ...
+
+ def set_terminator(self, term): # -> None:
+ """Set the input delimiter. Can be a fixed string of any length, an integer, or None"""
+ ...
+
+ def get_terminator(self): # -> int:
+ ...
+
+ def handle_read(self): # -> None:
+ ...
+
+ def handle_write(self): # -> None:
+ ...
+
+ def handle_close(self): # -> None:
+ ...
+
+ def push(self, data): # -> None:
+ ...
+
+ def push_with_producer(self, producer): # -> None:
+ ...
+
+ def readable(self): # -> bool:
+ """predicate for inclusion in the readable for select()"""
+ ...
+
+ def writable(self): # -> bool:
+ """predicate for inclusion in the writable for select()"""
+ ...
+
+ def close_when_done(self): # -> None:
+ """automatically close this channel once the outgoing queue is empty"""
+ ...
+
+ def refill_buffer(self): # -> None:
+ ...
+
+ def initiate_send(self): # -> None:
+ ...
+
+ def discard_buffers(self): # -> None:
+ ...
+
+
+
+class simple_producer:
+ def __init__(self, data, buffer_size=...) -> None:
+ ...
+
+ def more(self): # -> bytes:
+ ...
+
+
+
+class fifo:
+ def __init__(self, list=...) -> None:
+ ...
+
+ def __len__(self): # -> int:
+ ...
+
+ def is_empty(self): # -> bool:
+ ...
+
+ def first(self):
+ ...
+
+ def push(self, data): # -> None:
+ ...
+
+ def pop(self): # -> tuple[Literal[1], Unknown] | tuple[Literal[0], None]:
+ ...
+
+
+
+def find_prefix_at_end(haystack, needle): # -> int:
+ ...
diff --git a/manager/typings/supervisor/medusa/asyncore_25.pyi b/manager/typings/supervisor/medusa/asyncore_25.pyi
new file mode 100644
index 00000000..d0eb145f
--- /dev/null
+++ b/manager/typings/supervisor/medusa/asyncore_25.pyi
@@ -0,0 +1,195 @@
+"""
+This type stub file was generated by pyright.
+"""
+
+import os
+
+"""Basic infrastructure for asynchronous socket service clients and servers.
+
+There are only two ways to have a program on a single processor do "more
+than one thing at a time". Multi-threaded programming is the simplest and
+most popular way to do it, but there is another very different technique,
+that lets you have nearly all the advantages of multi-threading, without
+actually using multiple threads. it's really only practical if your program
+is largely I/O bound. If your program is CPU bound, then preemptive
+scheduled threads are probably what you really need. Network servers are
+rarely CPU-bound, however.
+
+If your operating system supports the select() system call in its I/O
+library (and nearly all do), then you can use it to juggle multiple
+communication channels at once; doing other work while your I/O is taking
+place in the "background." Although this strategy can seem strange and
+complex, especially at first, it is in many ways easier to understand and
+control than multi-threaded programming. The module documented here solves
+many of the difficult problems for you, making the task of building
+sophisticated high-performance network servers and clients a snap.
+"""
+class ExitNow(Exception):
+ ...
+
+
+def read(obj): # -> None:
+ ...
+
+def write(obj): # -> None:
+ ...
+
+def readwrite(obj, flags): # -> None:
+ ...
+
+def poll(timeout=..., map=...): # -> None:
+ ...
+
+def poll2(timeout=..., map=...): # -> None:
+ ...
+
+poll3 = ...
+def loop(timeout=..., use_poll=..., map=..., count=...): # -> None:
+ ...
+
+class dispatcher:
+ debug = ...
+ connected = ...
+ accepting = ...
+ closing = ...
+ addr = ...
+ def __init__(self, sock=..., map=...) -> None:
+ ...
+
+ def __repr__(self): # -> str:
+ ...
+
+ def add_channel(self, map=...): # -> None:
+ ...
+
+ def del_channel(self, map=...): # -> None:
+ ...
+
+ def create_socket(self, family, type): # -> None:
+ ...
+
+ def set_socket(self, sock, map=...): # -> None:
+ ...
+
+ def set_reuse_addr(self): # -> None:
+ ...
+
+ def readable(self): # -> Literal[True]:
+ ...
+
+ def writable(self): # -> Literal[True]:
+ ...
+
+ def listen(self, num): # -> None:
+ ...
+
+ def bind(self, addr): # -> None:
+ ...
+
+ def connect(self, address): # -> None:
+ ...
+
+ def accept(self): # -> tuple[socket | Unknown, _RetAddress | Unknown] | None:
+ ...
+
+ def send(self, data): # -> int:
+ ...
+
+ def recv(self, buffer_size): # -> bytes:
+ ...
+
+ def close(self): # -> None:
+ ...
+
+ def __getattr__(self, attr): # -> Any:
+ ...
+
+ def log(self, message): # -> None:
+ ...
+
+ def log_info(self, message, type=...): # -> None:
+ ...
+
+ def handle_read_event(self): # -> None:
+ ...
+
+ def handle_write_event(self): # -> None:
+ ...
+
+ def handle_expt_event(self): # -> None:
+ ...
+
+ def handle_error(self): # -> None:
+ ...
+
+ def handle_expt(self): # -> None:
+ ...
+
+ def handle_read(self): # -> None:
+ ...
+
+ def handle_write(self): # -> None:
+ ...
+
+ def handle_connect(self): # -> None:
+ ...
+
+ def handle_accept(self): # -> None:
+ ...
+
+ def handle_close(self): # -> None:
+ ...
+
+
+
+class dispatcher_with_send(dispatcher):
+ def __init__(self, sock=..., map=...) -> None:
+ ...
+
+ def initiate_send(self): # -> None:
+ ...
+
+ def handle_write(self): # -> None:
+ ...
+
+ def writable(self): # -> int | Literal[True]:
+ ...
+
+ def send(self, data): # -> None:
+ ...
+
+
+
+def compact_traceback(): # -> tuple[tuple[Unknown, Unknown, Unknown], Type[BaseException] | None, BaseException | None, str]:
+ ...
+
+def close_all(map=...): # -> None:
+ ...
+
+if os.name == 'posix':
+ class file_wrapper:
+ def __init__(self, fd) -> None:
+ ...
+
+ def recv(self, buffersize): # -> str:
+ ...
+
+ def send(self, s): # -> int:
+ ...
+
+ read = ...
+ write = ...
+ def close(self): # -> None:
+ ...
+
+ def fileno(self):
+ ...
+
+
+
+ class file_dispatcher(dispatcher):
+ def __init__(self, fd, map=...) -> None:
+ ...
+
+ def set_file(self, fd): # -> None:
+ ...
diff --git a/manager/typings/supervisor/medusa/auth_handler.pyi b/manager/typings/supervisor/medusa/auth_handler.pyi
new file mode 100644
index 00000000..8c19e35f
--- /dev/null
+++ b/manager/typings/supervisor/medusa/auth_handler.pyi
@@ -0,0 +1,42 @@
+"""
+This type stub file was generated by pyright.
+"""
+
+RCS_ID = ...
+get_header = ...
+class auth_handler:
+ def __init__(self, dict, handler, realm=...) -> None:
+ ...
+
+ def match(self, request):
+ ...
+
+ def handle_request(self, request): # -> None:
+ ...
+
+ def handle_unauthorized(self, request): # -> None:
+ ...
+
+ def make_nonce(self, request): # -> bytes:
+ """A digest-authentication <nonce>, constructed as suggested in RFC 2069"""
+ ...
+
+ def apply_hash(self, s): # -> bytes:
+ """Apply MD5 to a string <s>, then wrap it in base64 encoding."""
+ ...
+
+ def status(self): # -> composite_producer:
+ ...
+
+
+
+class dictionary_authorizer:
+ def __init__(self, dict) -> None:
+ ...
+
+ def authorize(self, auth_info): # -> Literal[1, 0]:
+ ...
+
+
+
+AUTHORIZATION = ...
diff --git a/manager/typings/supervisor/medusa/counter.pyi b/manager/typings/supervisor/medusa/counter.pyi
new file mode 100644
index 00000000..f5402309
--- /dev/null
+++ b/manager/typings/supervisor/medusa/counter.pyi
@@ -0,0 +1,27 @@
+"""
+This type stub file was generated by pyright.
+"""
+
+class counter:
+ """general-purpose counter"""
+ def __init__(self, initial_value=...) -> None:
+ ...
+
+ def increment(self, delta=...): # -> Unknown:
+ ...
+
+ def decrement(self, delta=...): # -> Unknown:
+ ...
+
+ def as_long(self): # -> int:
+ ...
+
+ def __nonzero__(self):
+ ...
+
+ __bool__ = ...
+ def __repr__(self): # -> str:
+ ...
+
+ def __str__(self) -> str:
+ ...
diff --git a/manager/typings/supervisor/medusa/default_handler.pyi b/manager/typings/supervisor/medusa/default_handler.pyi
new file mode 100644
index 00000000..227e0dab
--- /dev/null
+++ b/manager/typings/supervisor/medusa/default_handler.pyi
@@ -0,0 +1,41 @@
+"""
+This type stub file was generated by pyright.
+"""
+
+import supervisor.medusa.producers as producers
+
+RCS_ID = ...
+unquote = ...
+class default_handler:
+ valid_commands = ...
+ IDENT = ...
+ directory_defaults = ...
+ default_file_producer = producers.file_producer
+ def __init__(self, filesystem) -> None:
+ ...
+
+ hit_counter = ...
+ def __repr__(self): # -> str:
+ ...
+
+ def match(self, request): # -> Literal[1]:
+ ...
+
+ def handle_request(self, request): # -> None:
+ ...
+
+ def set_content_type(self, path, request): # -> None:
+ ...
+
+ def status(self): # -> simple_producer:
+ ...
+
+
+
+IF_MODIFIED_SINCE = ...
+USER_AGENT = ...
+CONTENT_TYPE = ...
+get_header = ...
+get_header_match = ...
+def get_extension(path): # -> Literal['']:
+ ...
diff --git a/manager/typings/supervisor/medusa/filesys.pyi b/manager/typings/supervisor/medusa/filesys.pyi
new file mode 100644
index 00000000..7f0ffdb9
--- /dev/null
+++ b/manager/typings/supervisor/medusa/filesys.pyi
@@ -0,0 +1,176 @@
+"""
+This type stub file was generated by pyright.
+"""
+
+import os
+
+class abstract_filesystem:
+ def __init__(self) -> None:
+ ...
+
+ def current_directory(self): # -> None:
+ """Return a string representing the current directory."""
+ ...
+
+ def listdir(self, path, long=...): # -> None:
+ """Return a listing of the directory at 'path' The empty string
+ indicates the current directory. If 'long' is set, instead
+ return a list of (name, stat_info) tuples
+ """
+ ...
+
+ def open(self, path, mode): # -> None:
+ """Return an open file object"""
+ ...
+
+ def stat(self, path): # -> None:
+ """Return the equivalent of os.stat() on the given path."""
+ ...
+
+ def isdir(self, path): # -> None:
+ """Does the path represent a directory?"""
+ ...
+
+ def isfile(self, path): # -> None:
+ """Does the path represent a plain file?"""
+ ...
+
+ def cwd(self, path): # -> None:
+ """Change the working directory."""
+ ...
+
+ def cdup(self): # -> None:
+ """Change to the parent of the current directory."""
+ ...
+
+ def longify(self, path): # -> None:
+ """Return a 'long' representation of the filename
+ [for the output of the LIST command]"""
+ ...
+
+
+
+def safe_stat(path): # -> tuple[Unknown, stat_result] | None:
+ ...
+
+class os_filesystem:
+ path_module = os.path
+ do_globbing = ...
+ def __init__(self, root, wd=...) -> None:
+ ...
+
+ def current_directory(self): # -> Unknown | str:
+ ...
+
+ def isfile(self, path): # -> bool:
+ ...
+
+ def isdir(self, path): # -> bool:
+ ...
+
+ def cwd(self, path): # -> Literal[0, 1]:
+ ...
+
+ def cdup(self): # -> Literal[0, 1]:
+ ...
+
+ def listdir(self, path, long=...): # -> list_producer:
+ ...
+
+ def stat(self, path): # -> stat_result:
+ ...
+
+ def open(self, path, mode): # -> TextIOWrapper:
+ ...
+
+ def unlink(self, path): # -> None:
+ ...
+
+ def mkdir(self, path): # -> None:
+ ...
+
+ def rmdir(self, path): # -> None:
+ ...
+
+ def rename(self, src, dst): # -> None:
+ ...
+
+ def normalize(self, path): # -> str:
+ ...
+
+ def translate(self, path): # -> str:
+ ...
+
+ def longify(self, path_stat_info_tuple): # -> str:
+ ...
+
+ def __repr__(self): # -> str:
+ ...
+
+
+
+if os.name == 'posix':
+ class unix_filesystem(os_filesystem):
+ ...
+
+
+ class schizophrenic_unix_filesystem(os_filesystem):
+ PROCESS_UID = ...
+ PROCESS_EUID = ...
+ PROCESS_GID = ...
+ PROCESS_EGID = ...
+ def __init__(self, root, wd=..., persona=...) -> None:
+ ...
+
+ def become_persona(self): # -> None:
+ ...
+
+ def become_nobody(self): # -> None:
+ ...
+
+ def cwd(self, path): # -> Literal[0, 1]:
+ ...
+
+ def cdup(self): # -> Literal[0, 1]:
+ ...
+
+ def open(self, filename, mode): # -> TextIOWrapper:
+ ...
+
+ def listdir(self, path, long=...): # -> list_producer:
+ ...
+
+
+
+class msdos_filesystem(os_filesystem):
+ def longify(self, path_stat_info_tuple): # -> str:
+ ...
+
+
+
+class merged_filesystem:
+ def __init__(self, *fsys) -> None:
+ ...
+
+
+
+def msdos_longify(file, stat_info): # -> str:
+ ...
+
+def msdos_date(t): # -> str:
+ ...
+
+months = ...
+mode_table = ...
+def unix_longify(file, stat_info): # -> str:
+ ...
+
+def ls_date(now, t): # -> str:
+ ...
+
+class list_producer:
+ def __init__(self, list, func=...) -> None:
+ ...
+
+ def more(self): # -> str:
+ ...
diff --git a/manager/typings/supervisor/medusa/http_date.pyi b/manager/typings/supervisor/medusa/http_date.pyi
new file mode 100644
index 00000000..1aa9fa0d
--- /dev/null
+++ b/manager/typings/supervisor/medusa/http_date.pyi
@@ -0,0 +1,37 @@
+"""
+This type stub file was generated by pyright.
+"""
+
+def concat(*args): # -> str:
+ ...
+
+def join(seq, field=...):
+ ...
+
+def group(s):
+ ...
+
+short_days = ...
+long_days = ...
+short_day_reg = ...
+long_day_reg = ...
+daymap = ...
+hms_reg = ...
+months = ...
+monmap = ...
+months_reg = ...
+rfc822_date = ...
+rfc822_reg = ...
+def unpack_rfc822(m): # -> tuple[int, Unknown, int, int, int, int, Literal[0], Literal[0], Literal[0]]:
+ ...
+
+rfc850_date = ...
+rfc850_reg = ...
+def unpack_rfc850(m): # -> tuple[int, Unknown, int, int, int, int, Literal[0], Literal[0], Literal[0]]:
+ ...
+
+def build_http_date(when): # -> str:
+ ...
+
+def parse_http_date(d): # -> int:
+ ...
diff --git a/manager/typings/supervisor/medusa/http_server.pyi b/manager/typings/supervisor/medusa/http_server.pyi
new file mode 100644
index 00000000..0a3ee626
--- /dev/null
+++ b/manager/typings/supervisor/medusa/http_server.pyi
@@ -0,0 +1,196 @@
+"""
+This type stub file was generated by pyright.
+"""
+
+import supervisor.medusa.asynchat_25 as asynchat
+import supervisor.medusa.asyncore_25 as asyncore
+
+RCS_ID = ...
+VERSION_STRING = ...
+class http_request:
+ reply_code = ...
+ request_counter = ...
+ use_chunked = ...
+ collector = ...
+ def __init__(self, *args) -> None:
+ ...
+
+ def __setitem__(self, key, value): # -> None:
+ ...
+
+ def __getitem__(self, key): # -> str:
+ ...
+
+ def __contains__(self, key): # -> bool:
+ ...
+
+ def has_key(self, key): # -> bool:
+ ...
+
+ def build_reply_header(self): # -> bytes:
+ ...
+
+ def add_header(self, name, value): # -> None:
+ """ Adds a header to the reply headers """
+ ...
+
+ def clear_headers(self): # -> None:
+ """ Clears the reply header list """
+ ...
+
+ def remove_header(self, name, value=...): # -> None:
+ """ Removes the specified header.
+ If a value is provided, the name and
+ value must match to remove the header.
+ If the value is None, removes all headers
+ with that name."""
+ ...
+
+ def get_reply_headers(self): # -> List[Unknown]:
+ """ Get the tuple of headers that will be used
+ for generating reply headers"""
+ ...
+
+ def get_reply_header_text(self): # -> str:
+ """ Gets the reply header (including status and
+ additional crlf)"""
+ ...
+
+ path_regex = ...
+ def split_uri(self): # -> Tuple[str | Any, ...]:
+ ...
+
+ def get_header_with_regex(self, head_reg, group): # -> Literal['']:
+ ...
+
+ def get_header(self, header): # -> None:
+ ...
+
+ def collect_incoming_data(self, data): # -> None:
+ ...
+
+ def found_terminator(self): # -> None:
+ ...
+
+ def push(self, thing): # -> None:
+ ...
+
+ def response(self, code=...): # -> str:
+ ...
+
+ def error(self, code): # -> None:
+ ...
+
+ reply_now = ...
+ def done(self): # -> None:
+ """finalize this transaction - send output to the http channel"""
+ ...
+
+ def log_date_string(self, when): # -> str:
+ ...
+
+ def log(self, bytes): # -> None:
+ ...
+
+ responses = ...
+ DEFAULT_ERROR_MESSAGE = ...
+ def log_info(self, msg, level): # -> None:
+ ...
+
+
+
+class http_channel(asynchat.async_chat):
+ ac_out_buffer_size = ...
+ current_request = ...
+ channel_counter = ...
+ def __init__(self, server, conn, addr) -> None:
+ ...
+
+ def __repr__(self): # -> str:
+ ...
+
+ maintenance_interval = ...
+ def check_maintenance(self): # -> None:
+ ...
+
+ def maintenance(self): # -> None:
+ ...
+
+ zombie_timeout = ...
+ def kill_zombies(self): # -> None:
+ ...
+
+ def send(self, data): # -> int:
+ ...
+
+ def recv(self, buffer_size): # -> bytes:
+ ...
+
+ def handle_error(self): # -> None:
+ ...
+
+ def log(self, *args): # -> None:
+ ...
+
+ def collect_incoming_data(self, data): # -> None:
+ ...
+
+ def found_terminator(self): # -> None:
+ ...
+
+ def writable_for_proxy(self): # -> bool | Literal[1] | None:
+ ...
+
+
+
+class http_server(asyncore.dispatcher):
+ SERVER_IDENT = ...
+ channel_class = http_channel
+ def __init__(self, ip, port, resolver=..., logger_object=...) -> None:
+ ...
+
+ def writable(self): # -> Literal[0]:
+ ...
+
+ def handle_read(self): # -> None:
+ ...
+
+ def readable(self): # -> bool:
+ ...
+
+ def handle_connect(self): # -> None:
+ ...
+
+ def handle_accept(self): # -> None:
+ ...
+
+ def install_handler(self, handler, back=...): # -> None:
+ ...
+
+ def remove_handler(self, handler): # -> None:
+ ...
+
+ def status(self): # -> composite_producer:
+ ...
+
+
+
+def maybe_status(thing): # -> None:
+ ...
+
+CONNECTION = ...
+def join_headers(headers): # -> list[Unknown]:
+ ...
+
+def get_header(head_reg, lines, group=...): # -> Literal['']:
+ ...
+
+def get_header_match(head_reg, lines): # -> Literal['']:
+ ...
+
+REQUEST = ...
+def crack_request(r): # -> tuple[str | Any, str | Any, str | Any | None] | tuple[None, None, None]:
+ ...
+
+if __name__ == '__main__':
+ ...
diff --git a/manager/typings/supervisor/medusa/logger.pyi b/manager/typings/supervisor/medusa/logger.pyi
new file mode 100644
index 00000000..2a2a9d00
--- /dev/null
+++ b/manager/typings/supervisor/medusa/logger.pyi
@@ -0,0 +1,122 @@
+"""
+This type stub file was generated by pyright.
+"""
+
+import supervisor.medusa.asynchat_25 as asynchat
+
+class file_logger:
+ def __init__(self, file, flush=..., mode=...) -> None:
+ ...
+
+ def __repr__(self): # -> str:
+ ...
+
+ def write(self, data): # -> None:
+ ...
+
+ def writeline(self, line): # -> None:
+ ...
+
+ def writelines(self, lines): # -> None:
+ ...
+
+ def maybe_flush(self): # -> None:
+ ...
+
+ def flush(self): # -> None:
+ ...
+
+ def softspace(self, *args): # -> None:
+ ...
+
+ def log(self, message): # -> None:
+ ...
+
+
+
+class rotating_file_logger(file_logger):
+ def __init__(self, file, freq=..., maxsize=..., flush=..., mode=...) -> None:
+ ...
+
+ def __repr__(self): # -> str:
+ ...
+
+ def next_backup(self, freq): # -> float | None:
+ ...
+
+ def maybe_flush(self): # -> None:
+ ...
+
+ def maybe_rotate(self): # -> None:
+ ...
+
+ def rotate(self): # -> None:
+ ...
+
+
+
+class socket_logger(asynchat.async_chat):
+ def __init__(self, address) -> None:
+ ...
+
+ def __repr__(self): # -> str:
+ ...
+
+ def log(self, message): # -> None:
+ ...
+
+
+
+class multi_logger:
+ def __init__(self, loggers) -> None:
+ ...
+
+ def __repr__(self): # -> str:
+ ...
+
+ def log(self, message): # -> None:
+ ...
+
+
+
+class resolving_logger:
+ """Feed (ip, message) combinations into this logger to get a
+ resolved hostname in front of the message. The message will not
+ be logged until the PTR request finishes (or fails)."""
+ def __init__(self, resolver, logger) -> None:
+ ...
+
+ class logger_thunk:
+ def __init__(self, message, logger) -> None:
+ ...
+
+ def __call__(self, host, ttl, answer): # -> None:
+ ...
+
+
+
+ def log(self, ip, message): # -> None:
+ ...
+
+
+
+class unresolving_logger:
+ """Just in case you don't want to resolve"""
+ def __init__(self, logger) -> None:
+ ...
+
+ def log(self, ip, message): # -> None:
+ ...
+
+
+
+def strip_eol(line):
+ ...
+
+class tail_logger:
+ """Keep track of the last <size> log messages"""
+ def __init__(self, logger, size=...) -> None:
+ ...
+
+ def log(self, message): # -> None:
+ ...
diff --git a/manager/typings/supervisor/medusa/producers.pyi b/manager/typings/supervisor/medusa/producers.pyi
new file mode 100644
index 00000000..00a005d7
--- /dev/null
+++ b/manager/typings/supervisor/medusa/producers.pyi
@@ -0,0 +1,155 @@
+"""
+This type stub file was generated by pyright.
+"""
+
+RCS_ID = ...
+class simple_producer:
+ """producer for a string"""
+ def __init__(self, data, buffer_size=...) -> None:
+ ...
+
+ def more(self): # -> bytes | Unknown:
+ ...
+
+
+
+class scanning_producer:
+ """like simple_producer, but more efficient for large strings"""
+ def __init__(self, data, buffer_size=...) -> None:
+ ...
+
+ def more(self): # -> Literal[b'']:
+ ...
+
+
+
+class lines_producer:
+ """producer for a list of lines"""
+ def __init__(self, lines) -> None:
+ ...
+
+ def more(self): # -> str:
+ ...
+
+
+
+class buffer_list_producer:
+ """producer for a list of strings"""
+ def __init__(self, buffers) -> None:
+ ...
+
+ def more(self): # -> Literal[b'']:
+ ...
+
+
+
+class file_producer:
+ """producer wrapper for file[-like] objects"""
+ out_buffer_size = ...
+ def __init__(self, file) -> None:
+ ...
+
+ def more(self): # -> Literal[b'']:
+ ...
+
+
+
+class output_producer:
+ """Acts like an output file; suitable for capturing sys.stdout"""
+ def __init__(self) -> None:
+ ...
+
+ def write(self, data): # -> None:
+ ...
+
+ def writeline(self, line): # -> None:
+ ...
+
+ def writelines(self, lines): # -> None:
+ ...
+
+ def flush(self): # -> None:
+ ...
+
+ def softspace(self, *args): # -> None:
+ ...
+
+ def more(self): # -> bytes | Literal['']:
+ ...
+
+
+
+class composite_producer:
+ """combine a fifo of producers into one"""
+ def __init__(self, producers) -> None:
+ ...
+
+ def more(self): # -> Literal[b'']:
+ ...
+
+
+
+class globbing_producer:
+ """
+ 'glob' the output from a producer into a particular buffer size.
+ helps reduce the number of calls to send(). [this appears to
+ gain about 30% performance on requests to a single channel]
+ """
+ def __init__(self, producer, buffer_size=...) -> None:
+ ...
+
+ def more(self): # -> bytes:
+ ...
+
+
+
+class hooked_producer:
+ """
+ A producer that will call <function> when it empties,.
+ with an argument of the number of bytes produced. Useful
+ for logging/instrumentation purposes.
+ """
+ def __init__(self, producer, function) -> None:
+ ...
+
+ def more(self): # -> Literal['']:
+ ...
+
+
+
+class chunked_producer:
+ """A producer that implements the 'chunked' transfer coding for HTTP/1.1.
+ Here is a sample usage:
+ request['Transfer-Encoding'] = 'chunked'
+ request.push (
+ producers.chunked_producer (your_producer)
+ )
+ request.done()
+ """
+ def __init__(self, producer, footers=...) -> None:
+ ...
+
+ def more(self): # -> bytes:
+ ...
+
+
+
+class compressed_producer:
+ """
+ Compress another producer on-the-fly, using ZLIB
+ """
+ def __init__(self, producer, level=...) -> None:
+ ...
+
+ def more(self): # -> bytes:
+ ...
+
+
+
+class escaping_producer:
+ """A producer that escapes a sequence of characters"""
+ def __init__(self, producer, esc_from=..., esc_to=...) -> None:
+ ...
+
+ def more(self):
+ ...
diff --git a/manager/typings/supervisor/medusa/util.pyi b/manager/typings/supervisor/medusa/util.pyi
new file mode 100644
index 00000000..9d54a35c
--- /dev/null
+++ b/manager/typings/supervisor/medusa/util.pyi
@@ -0,0 +1,18 @@
+"""
+This type stub file was generated by pyright.
+"""
+
+def html_repr(object): # -> str:
+ ...
+
+def progressive_divide(n, parts): # -> list[Unknown]:
+ ...
+
+def split_by_units(n, units, dividers, format_string): # -> list[Unknown]:
+ ...
+
+def english_bytes(n): # -> list[str]:
+ ...
+
+def english_time(n): # -> list[str]:
+ ...
diff --git a/manager/typings/supervisor/medusa/xmlrpc_handler.pyi b/manager/typings/supervisor/medusa/xmlrpc_handler.pyi
new file mode 100644
index 00000000..52e60dc4
--- /dev/null
+++ b/manager/typings/supervisor/medusa/xmlrpc_handler.pyi
@@ -0,0 +1,42 @@
+"""
+This type stub file was generated by pyright.
+"""
+
+VERSION = ...
+class xmlrpc_handler:
+ def match(self, request): # -> Literal[1, 0]:
+ ...
+
+ def handle_request(self, request): # -> None:
+ ...
+
+ def continue_request(self, data, request): # -> None:
+ ...
+
+ def call(self, method, params): # -> NoReturn:
+ ...
+
+
+
+class collector:
+ """gathers input for POST and PUT requests"""
+ def __init__(self, handler, request) -> None:
+ ...
+
+ def collect_incoming_data(self, data): # -> None:
+ ...
+
+ def found_terminator(self): # -> None:
+ ...
+
+
+
+if __name__ == '__main__':
+ class rpc_demo(xmlrpc_handler):
+ def call(self, method, params): # -> Literal['Sure, that works']:
+ ...
+
+
+
+ hs = ...
+ rpc = ...
diff --git a/manager/typings/supervisor/options.pyi b/manager/typings/supervisor/options.pyi
new file mode 100644
index 00000000..3363b96e
--- /dev/null
+++ b/manager/typings/supervisor/options.pyi
@@ -0,0 +1,527 @@
+"""
+This type stub file was generated by pyright.
+"""
+
+import warnings
+
+from supervisor.compat import ConfigParser
+
+VERSION = ...
+def normalize_path(v):
+ ...
+
+class Dummy:
+ ...
+
+
+class Options:
+ stderr = ...
+ stdout = ...
+ exit = ...
+ warnings = warnings
+ uid = ...
+ progname = ...
+ configfile = ...
+ schemadir = ...
+ configroot = ...
+ here = ...
+ positional_args_allowed = ...
+ def __init__(self, require_configfile=...) -> None:
+ """Constructor.
+
+ Params:
+ require_configfile -- whether we should fail on no config file.
+ """
+ ...
+
+ def default_configfile(self): # -> str | None:
+ """Return the name of the found config file or print usage/exit."""
+ ...
+
+ def help(self, dummy): # -> None:
+ """Print a long help message to stdout and exit(0).
+
+ Occurrences of "%s" in are replaced by self.progname.
+ """
+ ...
+
+ def usage(self, msg): # -> None:
+ """Print a brief error message to stderr and exit(2)."""
+ ...
+
+ def add(self, name=..., confname=..., short=..., long=..., handler=..., default=..., required=..., flag=..., env=...): # -> None:
+ """Add information about a configuration option.
+
+ This can take several forms:
+
+ add(name, confname)
+ Configuration option 'confname' maps to attribute 'name'
+ add(name, None, short, long)
+ Command line option '-short' or '--long' maps to 'name'
+ add(None, None, short, long, handler)
+ Command line option calls handler
+ add(name, None, short, long, handler)
+ Assign handler return value to attribute 'name'
+
+ In addition, one of the following keyword arguments may be given:
+
+ default=... -- if not None, the default value
+ required=... -- if nonempty, an error message if no value provided
+ flag=... -- if not None, flag value for command line option
+ env=... -- if not None, name of environment variable that
+ overrides the configuration file or default
+ """
+ ...
+
+ def realize(self, args=..., doc=..., progname=...): # -> None:
+ """Realize a configuration.
+
+ Optional arguments:
+
+ args -- the command line arguments, less the program name
+ (default is sys.argv[1:])
+
+ doc -- usage message (default is __main__.__doc__)
+ """
+ ...
+
+ def process_config(self, do_usage=...): # -> None:
+ """Process configuration data structure.
+
+ This includes reading config file if necessary, setting defaults etc.
+ """
+ ...
+
+ def process_config_file(self, do_usage): # -> None:
+ ...
+
+ def exists(self, path): # -> bool:
+ ...
+
+ def open(self, fn, mode=...): # -> TextIOWrapper:
+ ...
+
+ def get_plugins(self, parser, factory_key, section_prefix): # -> list[Unknown]:
+ ...
+
+ def import_spec(self, spec): # -> Any:
+ ...
+
+
+
+class ServerOptions(Options):
+ user = ...
+ sockchown = ...
+ sockchmod = ...
+ logfile = ...
+ loglevel = ...
+ pidfile = ...
+ passwdfile = ...
+ nodaemon = ...
+ silent = ...
+ httpservers = ...
+ unlink_pidfile = ...
+ unlink_socketfiles = ...
+ mood = ...
+ def __init__(self) -> None:
+ ...
+
+ def version(self, dummy): # -> None:
+ """Print version to stdout and exit(0).
+ """
+ ...
+
+ def getLogger(self, *args, **kwargs): # -> Logger:
+ ...
+
+ def default_configfile(self): # -> str | None:
+ ...
+
+ def realize(self, *arg, **kw): # -> None:
+ ...
+
+ def process_config(self, do_usage=...): # -> None:
+ ...
+
+ def read_config(self, fp):
+ ...
+
+ def process_groups_from_parser(self, parser): # -> list[Unknown]:
+ ...
+
+ def parse_fcgi_socket(self, sock, proc_uid, socket_owner, socket_mode, socket_backlog): # -> UnixStreamSocketConfig | InetStreamSocketConfig:
+ ...
+
+ def processes_from_section(self, parser, section, group_name, klass=...): # -> list[Unknown]:
+ ...
+
+ def server_configs_from_parser(self, parser): # -> list[Unknown]:
+ ...
+
+ def daemonize(self): # -> None:
+ ...
+
+ def write_pidfile(self): # -> None:
+ ...
+
+ def cleanup(self): # -> None:
+ ...
+
+ def close_httpservers(self): # -> None:
+ ...
+
+ def close_logger(self): # -> None:
+ ...
+
+ def setsignals(self): # -> None:
+ ...
+
+ def get_signal(self): # -> None:
+ ...
+
+ def openhttpservers(self, supervisord): # -> None:
+ ...
+
+ def get_autochildlog_name(self, name, identifier, channel):
+ ...
+
+ def clear_autochildlogdir(self): # -> None:
+ ...
+
+ def get_socket_map(self): # -> dict[Unknown, Unknown]:
+ ...
+
+ def cleanup_fds(self): # -> None:
+ ...
+
+ def kill(self, pid, signal): # -> None:
+ ...
+
+ def waitpid(self): # -> tuple[int | None, int | None]:
+ ...
+
+ def drop_privileges(self, user): # -> str | None:
+ """Drop privileges to become the specified user, which may be a
+ username or uid. Called for supervisord startup and when spawning
+ subprocesses. Returns None on success or a string error message if
+ privileges could not be dropped."""
+ ...
+
+ def set_uid_or_exit(self): # -> None:
+ """Set the uid of the supervisord process. Called during supervisord
+ startup only. No return value. Exits the process via usage() if
+ privileges could not be dropped."""
+ ...
+
+ def set_rlimits_or_exit(self): # -> None:
+ """Set the rlimits of the supervisord process. Called during
+ supervisord startup only. No return value. Exits the process via
+ usage() if any rlimits could not be set."""
+ ...
+
+ def make_logger(self): # -> None:
+ ...
+
+ def make_http_servers(self, supervisord): # -> list[Unknown]:
+ ...
+
+ def close_fd(self, fd): # -> None:
+ ...
+
+ def fork(self): # -> int:
+ ...
+
+ def dup2(self, frm, to): # -> None:
+ ...
+
+ def setpgrp(self): # -> None:
+ ...
+
+ def stat(self, filename): # -> stat_result:
+ ...
+
+ def write(self, fd, data): # -> int:
+ ...
+
+ def execve(self, filename, argv, env): # -> NoReturn:
+ ...
+
+ def mktempfile(self, suffix, prefix, dir):
+ ...
+
+ def remove(self, path): # -> None:
+ ...
+
+ def setumask(self, mask): # -> None:
+ ...
+
+ def get_path(self): # -> List[str]:
+ """Return a list corresponding to $PATH, or a default."""
+ ...
+
+ def get_pid(self): # -> int:
+ ...
+
+ def check_execv_args(self, filename, argv, st): # -> None:
+ ...
+
+ def reopenlogs(self): # -> None:
+ ...
+
+ def readfd(self, fd): # -> bytes:
+ ...
+
+ def chdir(self, dir): # -> None:
+ ...
+
+ def make_pipes(self, stderr=...): # -> dict[str, None]:
+ """ Create pipes for parent to child stdin/stdout/stderr
+ communications. Open fd in non-blocking mode so we can read them
+ in the mainloop without blocking. If stderr is False, don't
+ create a pipe for stderr. """
+ ...
+
+ def close_parent_pipes(self, pipes): # -> None:
+ ...
+
+ def close_child_pipes(self, pipes): # -> None:
+ ...
+
+
+
+class ClientOptions(Options):
+ positional_args_allowed = ...
+ interactive = ...
+ prompt = ...
+ serverurl = ...
+ username = ...
+ password = ...
+ history_file = ...
+ def __init__(self) -> None:
+ ...
+
+ def realize(self, *arg, **kw): # -> None:
+ ...
+
+ def read_config(self, fp):
+ ...
+
+ def getServerProxy(self):
+ ...
+
+
+
+_marker = ...
+class UnhosedConfigParser(ConfigParser.RawConfigParser):
+ mysection = ...
+ def __init__(self, *args, **kwargs) -> None:
+ ...
+
+ def read_string(self, string, source=...): # -> None:
+ '''Parse configuration data from a string. This is intended
+ to be used in tests only. We add this method for Py 2/3 compat.'''
+ ...
+
+ def read(self, filenames, **kwargs): # -> list[Unknown]:
+ '''Attempt to read and parse a list of filenames, returning a list
+ of filenames which were successfully parsed. This is a method of
+ RawConfigParser that is overridden to build self.section_to_file,
+ which is a mapping of section names to the files they came from.
+ '''
+ ...
+
+ def saneget(self, section, option, default=..., do_expand=..., expansions=...): # -> str:
+ ...
+
+ def getdefault(self, option, default=..., expansions=..., **kwargs): # -> str:
+ ...
+
+ def expand_here(self, here): # -> None:
+ ...
+
+
+
+class Config:
+ def __ne__(self, other) -> bool:
+ ...
+
+ def __lt__(self, other) -> bool:
+ ...
+
+ def __le__(self, other) -> bool:
+ ...
+
+ def __gt__(self, other) -> bool:
+ ...
+
+ def __ge__(self, other) -> bool:
+ ...
+
+ def __repr__(self): # -> str:
+ ...
+
+
+
+class ProcessConfig(Config):
+ req_param_names = ...
+ optional_param_names = ...
+ def __init__(self, options, **params) -> None:
+ ...
+
+ def __eq__(self, other) -> bool:
+ ...
+
+ def get_path(self):
+ '''Return a list corresponding to $PATH that is configured to be set
+ in the process environment, or the system default.'''
+ ...
+
+ def create_autochildlogs(self): # -> None:
+ ...
+
+ def make_process(self, group=...): # -> Subprocess:
+ ...
+
+ def make_dispatchers(self, proc): # -> tuple[dict[Unknown, Unknown], Unknown]:
+ ...
+
+
+
+class EventListenerConfig(ProcessConfig):
+ def make_dispatchers(self, proc): # -> tuple[dict[Unknown, Unknown], Unknown]:
+ ...
+
+
+
+class FastCGIProcessConfig(ProcessConfig):
+ def make_process(self, group=...): # -> FastCGISubprocess:
+ ...
+
+ def make_dispatchers(self, proc): # -> tuple[dict[Unknown, Unknown], Unknown]:
+ ...
+
+
+
+class ProcessGroupConfig(Config):
+ def __init__(self, options, name, priority, process_configs) -> None:
+ ...
+
+ def __eq__(self, other) -> bool:
+ ...
+
+ def after_setuid(self): # -> None:
+ ...
+
+ def make_group(self): # -> ProcessGroup:
+ ...
+
+
+
+class EventListenerPoolConfig(Config):
+ def __init__(self, options, name, priority, process_configs, buffer_size, pool_events, result_handler) -> None:
+ ...
+
+ def __eq__(self, other) -> bool:
+ ...
+
+ def after_setuid(self): # -> None:
+ ...
+
+ def make_group(self): # -> EventListenerPool:
+ ...
+
+
+
+class FastCGIGroupConfig(ProcessGroupConfig):
+ def __init__(self, options, name, priority, process_configs, socket_config) -> None:
+ ...
+
+ def __eq__(self, other) -> bool:
+ ...
+
+ def make_group(self): # -> FastCGIProcessGroup:
+ ...
+
+
+
+def readFile(filename, offset, length): # -> bytes:
+ """ Read length bytes from the file named by filename starting at
+ offset """
+ ...
+
+def tailFile(filename, offset, length): # -> list[str | int | bool] | list[str | Unknown | bool]:
+ """
+ Read length bytes from the file named by filename starting at
+ offset, automatically increasing offset and setting overflow
+ flag if log size has grown beyond (offset + length). If length
+ bytes are not available, as many bytes as are available are returned.
+ """
+ ...
+
+def decode_wait_status(sts): # -> tuple[int, str] | tuple[Literal[-1], Unknown | str] | tuple[Literal[-1], Unknown]:
+ """Decode the status returned by wait() or waitpid().
+
+ Return a tuple (exitstatus, message) where exitstatus is the exit
+ status, or -1 if the process was killed by a signal; and message
+ is a message telling what happened. It is the caller's
+ responsibility to display the message.
+ """
+ ...
+
+_signames = ...
+def signame(sig):
+ """Return a symbolic name for a signal.
+
+ Return "signal NNN" if there is no corresponding SIG name in the
+ signal module.
+ """
+ ...
+
+class SignalReceiver:
+ def __init__(self) -> None:
+ ...
+
+ def receive(self, sig, frame): # -> None:
+ ...
+
+ def get_signal(self): # -> None:
+ ...
+
+
+
+def expand(s, expansions, name):
+ ...
+
+def make_namespec(group_name, process_name): # -> str:
+ ...
+
+def split_namespec(namespec): # -> tuple[Unknown, Unknown | None]:
+ ...
+
+class ProcessException(Exception):
+ """ Specialized exceptions used when attempting to start a process """
+ ...
+
+
+class BadCommand(ProcessException):
+ """ Indicates the command could not be parsed properly. """
+ ...
+
+
+class NotExecutable(ProcessException):
+ """ Indicates that the filespec cannot be executed because its path
+ resolves to a file which is not executable, or which is a directory. """
+ ...
+
+
+class NotFound(ProcessException):
+ """ Indicates that the filespec cannot be executed because it could not
+ be found """
+ ...
+
+
+class NoPermission(ProcessException):
+ """ Indicates that the file cannot be executed because the supervisor
+ process does not possess the appropriate UNIX filesystem permission
+ to execute the file. """
+ ...
diff --git a/manager/typings/supervisor/pidproxy.pyi b/manager/typings/supervisor/pidproxy.pyi
new file mode 100644
index 00000000..21a4d3f4
--- /dev/null
+++ b/manager/typings/supervisor/pidproxy.pyi
@@ -0,0 +1,33 @@
+"""
+This type stub file was generated by pyright.
+"""
+
+""" An executable which proxies for a subprocess; upon a signal, it sends that
+signal to the process identified by a pidfile. """
+class PidProxy:
+ pid = ...
+ def __init__(self, args) -> None:
+ ...
+
+ def go(self): # -> None:
+ ...
+
+ def usage(self): # -> None:
+ ...
+
+ def setsignals(self): # -> None:
+ ...
+
+ def reap(self, sig, frame): # -> None:
+ ...
+
+ def passtochild(self, sig, frame): # -> None:
+ ...
+
+
+
+def main(): # -> None:
+ ...
+
+if __name__ == '__main__':
+ ...
diff --git a/manager/typings/supervisor/poller.pyi b/manager/typings/supervisor/poller.pyi
new file mode 100644
index 00000000..f4b0d992
--- /dev/null
+++ b/manager/typings/supervisor/poller.pyi
@@ -0,0 +1,127 @@
+"""
+This type stub file was generated by pyright.
+"""
+
+class BasePoller:
+ def __init__(self, options) -> None:
+ ...
+
+ def initialize(self): # -> None:
+ ...
+
+ def register_readable(self, fd):
+ ...
+
+ def register_writable(self, fd):
+ ...
+
+ def unregister_readable(self, fd):
+ ...
+
+ def unregister_writable(self, fd):
+ ...
+
+ def poll(self, timeout):
+ ...
+
+ def before_daemonize(self): # -> None:
+ ...
+
+ def after_daemonize(self): # -> None:
+ ...
+
+ def close(self): # -> None:
+ ...
+
+
+
+class SelectPoller(BasePoller):
+ def initialize(self): # -> None:
+ ...
+
+ def register_readable(self, fd): # -> None:
+ ...
+
+ def register_writable(self, fd): # -> None:
+ ...
+
+ def unregister_readable(self, fd): # -> None:
+ ...
+
+ def unregister_writable(self, fd): # -> None:
+ ...
+
+ def unregister_all(self): # -> None:
+ ...
+
+ def poll(self, timeout): # -> tuple[list[Unknown], list[Unknown]] | tuple[List[Any], List[Any]]:
+ ...
+
+
+
+class PollPoller(BasePoller):
+ def initialize(self): # -> None:
+ ...
+
+ def register_readable(self, fd): # -> None:
+ ...
+
+ def register_writable(self, fd): # -> None:
+ ...
+
+ def unregister_readable(self, fd): # -> None:
+ ...
+
+ def unregister_writable(self, fd): # -> None:
+ ...
+
+ def poll(self, timeout): # -> tuple[list[Unknown], list[Unknown]]:
+ ...
+
+
+
+class KQueuePoller(BasePoller):
+ '''
+ Wrapper for select.kqueue()/kevent()
+ '''
+ max_events = ...
+ def initialize(self): # -> None:
+ ...
+
+ def register_readable(self, fd): # -> None:
+ ...
+
+ def register_writable(self, fd): # -> None:
+ ...
+
+ def unregister_readable(self, fd): # -> None:
+ ...
+
+ def unregister_writable(self, fd): # -> None:
+ ...
+
+ def poll(self, timeout): # -> tuple[list[Unknown], list[Unknown]]:
+ ...
+
+ def before_daemonize(self): # -> None:
+ ...
+
+ def after_daemonize(self): # -> None:
+ ...
+
+ def close(self): # -> None:
+ ...
+
+
+
+def implements_poll(): # -> bool:
+ ...
+
+def implements_kqueue(): # -> bool:
+ ...
+
+if implements_kqueue():
+ Poller = ...
+else:
+ Poller = ...
+ Poller = ...
diff --git a/manager/typings/supervisor/process.pyi b/manager/typings/supervisor/process.pyi
new file mode 100644
index 00000000..df784aa3
--- /dev/null
+++ b/manager/typings/supervisor/process.pyi
@@ -0,0 +1,224 @@
+"""
+This type stub file was generated by pyright.
+"""
+
+import functools
+
+@functools.total_ordering
+class Subprocess:
+ """A class to manage a subprocess."""
+ pid = ...
+ config = ...
+ state = ...
+ listener_state = ...
+ event = ...
+ laststart = ...
+ laststop = ...
+ laststopreport = ...
+ delay = ...
+ administrative_stop = ...
+ system_stop = ...
+ killing = ...
+ backoff = ...
+ dispatchers = ...
+ pipes = ...
+ exitstatus = ...
+ spawnerr = ...
+ group = ...
+ def __init__(self, config) -> None:
+ """Constructor.
+
+ Argument is a ProcessConfig instance.
+ """
+ ...
+
+ def removelogs(self): # -> None:
+ ...
+
+ def reopenlogs(self): # -> None:
+ ...
+
+ def drain(self): # -> None:
+ ...
+
+ def write(self, chars): # -> None:
+ ...
+
+ def get_execv_args(self): # -> tuple[str | None, List[str]]:
+ """Internal: turn a program name into a file name, using $PATH,
+ make sure it exists / is executable, raising a ProcessException
+ if not """
+ ...
+
+ event_map = ...
+ def change_state(self, new_state, expected=...): # -> Literal[False] | None:
+ ...
+
+ def record_spawnerr(self, msg): # -> None:
+ ...
+
+ def spawn(self): # -> None:
+ """Start the subprocess. It must not be running already.
+
+ Return the process id. If the fork() call fails, return None.
+ """
+ ...
+
+ def stop(self): # -> str | None:
+ """ Administrative stop """
+ ...
+
+ def stop_report(self): # -> None:
+ """ Log a 'waiting for x to stop' message with throttling. """
+ ...
+
+ def give_up(self): # -> None:
+ ...
+
+ def kill(self, sig): # -> str | None:
+ """Send a signal to the subprocess with the intention to kill
+ it (to make it exit). This may or may not actually kill it.
+
+ Return None if the signal was sent, or an error message string
+ if an error occurred or if the subprocess is not running.
+ """
+ ...
+
+ def signal(self, sig): # -> str | None:
+ """Send a signal to the subprocess, without intending to kill it.
+
+ Return None if the signal was sent, or an error message string
+ if an error occurred or if the subprocess is not running.
+ """
+ ...
+
+ def finish(self, pid, sts): # -> None:
+ """ The process was reaped and we need to report and manage its state
+ """
+ ...
+
+ def set_uid(self): # -> None:
+ ...
+
+ def __lt__(self, other) -> bool:
+ ...
+
+ def __eq__(self, other) -> bool:
+ ...
+
+ def __repr__(self): # -> str:
+ ...
+
+ def get_state(self): # -> int | None:
+ ...
+
+ def transition(self): # -> None:
+ ...
+
+
+
+class FastCGISubprocess(Subprocess):
+ """Extends Subprocess class to handle FastCGI subprocesses"""
+ def __init__(self, config) -> None:
+ ...
+
+ def before_spawn(self): # -> None:
+ """
+ The FastCGI socket needs to be created by the parent before we fork
+ """
+ ...
+
+ def spawn(self): # -> None:
+ """
+ Overrides Subprocess.spawn() so we can hook in before it happens
+ """
+ ...
+
+ def after_finish(self): # -> None:
+ """
+ Releases reference to FastCGI socket when process is reaped
+ """
+ ...
+
+ def finish(self, pid, sts): # -> None:
+ """
+ Overrides Subprocess.finish() so we can hook in after it happens
+ """
+ ...
+
+
+
+@functools.total_ordering
+class ProcessGroupBase:
+ def __init__(self, config) -> None:
+ ...
+
+ def __lt__(self, other) -> bool:
+ ...
+
+ def __eq__(self, other) -> bool:
+ ...
+
+ def __repr__(self): # -> str:
+ ...
+
+ def removelogs(self): # -> None:
+ ...
+
+ def reopenlogs(self): # -> None:
+ ...
+
+ def stop_all(self): # -> None:
+ ...
+
+ def get_unstopped_processes(self): # -> list[Unknown]:
+ """ Processes which aren't in a state that is considered 'stopped' """
+ ...
+
+ def get_dispatchers(self): # -> dict[Unknown, Unknown]:
+ ...
+
+ def before_remove(self): # -> None:
+ ...
+
+
+
+class ProcessGroup(ProcessGroupBase):
+ def transition(self): # -> None:
+ ...
+
+
+
+class FastCGIProcessGroup(ProcessGroup):
+ def __init__(self, config, **kwargs) -> None:
+ ...
+
+
+
+class EventListenerPool(ProcessGroupBase):
+ def __init__(self, config) -> None:
+ ...
+
+ def handle_rejected(self, event): # -> None:
+ ...
+
+ def transition(self): # -> None:
+ ...
+
+ def before_remove(self): # -> None:
+ ...
+
+ def dispatch(self): # -> None:
+ ...
+
+
+
+class GlobalSerial:
+ def __init__(self) -> None:
+ ...
+
+
+
+GlobalSerial = ...
+def new_serial(inst):
+ ...
diff --git a/manager/typings/supervisor/rpcinterface.pyi b/manager/typings/supervisor/rpcinterface.pyi
new file mode 100644
index 00000000..df40334a
--- /dev/null
+++ b/manager/typings/supervisor/rpcinterface.pyi
@@ -0,0 +1,336 @@
+"""
+This type stub file was generated by pyright.
+"""
+
+API_VERSION = ...
+class SupervisorNamespaceRPCInterface:
+ def __init__(self, supervisord) -> None:
+ ...
+
+ def getAPIVersion(self): # -> Literal['3.0']:
+ """ Return the version of the RPC API used by supervisord
+
+ @return string version version id
+ """
+ ...
+
+ getVersion = ...
+ def getSupervisorVersion(self): # -> str:
+ """ Return the version of the supervisor package in use by supervisord
+
+ @return string version version id
+ """
+ ...
+
+ def getIdentification(self):
+ """ Return identifying string of supervisord
+
+ @return string identifier identifying string
+ """
+ ...
+
+ def getState(self): # -> dict[str, Unknown | None]:
+ """ Return current state of supervisord as a struct
+
+ @return struct A struct with keys int statecode, string statename
+ """
+ ...
+
+ def getPID(self):
+ """ Return the PID of supervisord
+
+ @return int PID
+ """
+ ...
+
+ def readLog(self, offset, length): # -> str:
+ """ Read length bytes from the main log starting at offset
+
+ @param int offset offset to start reading from.
+ @param int length number of bytes to read from the log.
+ @return string result Bytes of log
+ """
+ ...
+
+ readMainLog = ...
+ def clearLog(self): # -> Literal[True]:
+ """ Clear the main log.
+
+ @return boolean result always returns True unless error
+ """
+ ...
+
+ def shutdown(self): # -> Literal[True]:
+ """ Shut down the supervisor process
+
+ @return boolean result always returns True unless error
+ """
+ ...
+
+ def restart(self): # -> Literal[True]:
+ """ Restart the supervisor process
+
+ @return boolean result always return True unless error
+ """
+ ...
+
+ def reloadConfig(self): # -> list[list[list[Unknown]]]:
+ """
+ Reload the configuration.
+
+ The result contains three arrays containing names of process
+ groups:
+
+ * `added` gives the process groups that have been added
+ * `changed` gives the process groups whose contents have
+ changed
+ * `removed` gives the process groups that are no longer
+ in the configuration
+
+ @return array result [[added, changed, removed]]
+
+ """
+ ...
+
+ def addProcessGroup(self, name): # -> Literal[True]:
+ """ Update the config for a running process from config file.
+
+ @param string name name of process group to add
+ @return boolean result true if successful
+ """
+ ...
+
+ def removeProcessGroup(self, name): # -> Literal[True]:
+ """ Remove a stopped process from the active configuration.
+
+ @param string name name of process group to remove
+ @return boolean result Indicates whether the removal was successful
+ """
+ ...
+
+ def startProcess(self, name, wait=...): # -> () -> (Type[NOT_DONE_YET] | Literal[True]) | Literal[True]:
+ """ Start a process
+
+ @param string name Process name (or ``group:name``, or ``group:*``)
+ @param boolean wait Wait for process to be fully started
+ @return boolean result Always true unless error
+
+ """
+ ...
+
+ def startProcessGroup(self, name, wait=...): # -> (processes: Unknown = processes, predicate: Unknown = predicate, func: Unknown = func, extra_kwargs: Unknown = extra_kwargs, callbacks: Unknown = callbacks, results: Unknown = results) -> (Unknown | Type[NOT_DONE_YET]):
+ """ Start all processes in the group named 'name'
+
+ @param string name The group name
+ @param boolean wait Wait for each process to be fully started
+ @return array result An array of process status info structs
+ """
+ ...
+
+ def startAllProcesses(self, wait=...): # -> (processes: Unknown = processes, predicate: Unknown = predicate, func: Unknown = func, extra_kwargs: Unknown = extra_kwargs, callbacks: Unknown = callbacks, results: Unknown = results) -> (Unknown | Type[NOT_DONE_YET]):
+ """ Start all processes listed in the configuration file
+
+ @param boolean wait Wait for each process to be fully started
+ @return array result An array of process status info structs
+ """
+ ...
+
+ def stopProcess(self, name, wait=...): # -> () -> (Type[NOT_DONE_YET] | Literal[True]) | Literal[True]:
+ """ Stop a process named by name
+
+ @param string name The name of the process to stop (or 'group:name')
+ @param boolean wait Wait for the process to be fully stopped
+ @return boolean result Always return True unless error
+ """
+ ...
+
+ def stopProcessGroup(self, name, wait=...): # -> (processes: Unknown = processes, predicate: Unknown = predicate, func: Unknown = func, extra_kwargs: Unknown = extra_kwargs, callbacks: Unknown = callbacks, results: Unknown = results) -> (Unknown | Type[NOT_DONE_YET]):
+ """ Stop all processes in the process group named 'name'
+
+ @param string name The group name
+ @param boolean wait Wait for each process to be fully stopped
+ @return array result An array of process status info structs
+ """
+ ...
+
+ def stopAllProcesses(self, wait=...): # -> (processes: Unknown = processes, predicate: Unknown = predicate, func: Unknown = func, extra_kwargs: Unknown = extra_kwargs, callbacks: Unknown = callbacks, results: Unknown = results) -> (Unknown | Type[NOT_DONE_YET]):
+ """ Stop all processes in the process list
+
+ @param boolean wait Wait for each process to be fully stopped
+ @return array result An array of process status info structs
+ """
+ ...
+
+ def signalProcess(self, name, signal): # -> Literal[True]:
+ """ Send an arbitrary UNIX signal to the process named by name
+
+ @param string name Name of the process to signal (or 'group:name')
+ @param string signal Signal to send, as name ('HUP') or number ('1')
+ @return boolean
+ """
+ ...
+
+ def signalProcessGroup(self, name, signal): # -> Type[NOT_DONE_YET] | list[Unknown]:
+ """ Send a signal to all processes in the group named 'name'
+
+ @param string name The group name
+ @param string signal Signal to send, as name ('HUP') or number ('1')
+ @return array
+ """
+ ...
+
+ def signalAllProcesses(self, signal): # -> Type[NOT_DONE_YET] | list[Unknown]:
+ """ Send a signal to all processes in the process list
+
+ @param string signal Signal to send, as name ('HUP') or number ('1')
+ @return array An array of process status info structs
+ """
+ ...
+
+ def getAllConfigInfo(self): # -> list[Unknown]:
+ """ Get info about all available process configurations. Each struct
+ represents a single process (i.e. groups get flattened).
+
+ @return array result An array of process config info structs
+ """
+ ...
+
+ def getProcessInfo(self, name): # -> dict[str, Unknown | int | str | None]:
+ """ Get info about a process named name
+
+ @param string name The name of the process (or 'group:name')
+ @return struct result A structure containing data about the process
+ """
+ ...
+
+ def getAllProcessInfo(self): # -> list[Unknown]:
+ """ Get info about all processes
+
+ @return array result An array of process status results
+ """
+ ...
+
+ def readProcessStdoutLog(self, name, offset, length): # -> str:
+ """ Read length bytes from name's stdout log starting at offset
+
+ @param string name the name of the process (or 'group:name')
+ @param int offset offset to start reading from.
+ @param int length number of bytes to read from the log.
+ @return string result Bytes of log
+ """
+ ...
+
+ readProcessLog = ...
+ def readProcessStderrLog(self, name, offset, length): # -> str:
+ """ Read length bytes from name's stderr log starting at offset
+
+ @param string name the name of the process (or 'group:name')
+ @param int offset offset to start reading from.
+ @param int length number of bytes to read from the log.
+ @return string result Bytes of log
+ """
+ ...
+
+ def tailProcessStdoutLog(self, name, offset, length): # -> list[str | int | bool]:
+ """
+ Provides a more efficient way to tail the (stdout) log than
+ readProcessStdoutLog(). Use readProcessStdoutLog() to read
+ chunks and tailProcessStdoutLog() to tail.
+
+ Requests (length) bytes from the (name)'s log, starting at
+ (offset). If the total log size is greater than (offset +
+ length), the overflow flag is set and the (offset) is
+ automatically increased to position the buffer at the end of
+ the log. If less than (length) bytes are available, the
+ maximum number of available bytes will be returned. (offset)
+ returned is always the last offset in the log +1.
+
+ @param string name the name of the process (or 'group:name')
+ @param int offset offset to start reading from
+ @param int length maximum number of bytes to return
+ @return array result [string bytes, int offset, bool overflow]
+ """
+ ...
+
+ tailProcessLog = ...
+ def tailProcessStderrLog(self, name, offset, length): # -> list[str | int | bool]:
+ """
+ Provides a more efficient way to tail the (stderr) log than
+ readProcessStderrLog(). Use readProcessStderrLog() to read
+ chunks and tailProcessStderrLog() to tail.
+
+ Requests (length) bytes from the (name)'s log, starting at
+ (offset). If the total log size is greater than (offset +
+ length), the overflow flag is set and the (offset) is
+ automatically increased to position the buffer at the end of
+ the log. If less than (length) bytes are available, the
+ maximum number of available bytes will be returned. (offset)
+ returned is always the last offset in the log +1.
+
+ @param string name the name of the process (or 'group:name')
+ @param int offset offset to start reading from
+ @param int length maximum number of bytes to return
+ @return array result [string bytes, int offset, bool overflow]
+ """
+ ...
+
+ def clearProcessLogs(self, name): # -> Literal[True]:
+ """ Clear the stdout and stderr logs for the named process and
+ reopen them.
+
+ @param string name The name of the process (or 'group:name')
+ @return boolean result Always True unless error
+ """
+ ...
+
+ clearProcessLog = ...
+ def clearAllProcessLogs(self): # -> () -> (Type[NOT_DONE_YET] | list[Unknown]):
+ """ Clear all process log files
+
+ @return array result An array of process status info structs
+ """
+ ...
+
+ def sendProcessStdin(self, name, chars): # -> Literal[True]:
+ """ Send a string of chars to the stdin of the process name.
+ If non-7-bit data is sent (unicode), it is encoded to utf-8
+ before being sent to the process' stdin. If chars is not a
+ string or is not unicode, raise INCORRECT_PARAMETERS. If the
+ process is not running, raise NOT_RUNNING. If the process'
+ stdin cannot accept input (e.g. it was closed by the child
+ process), raise NO_FILE.
+
+ @param string name The process name to send to (or 'group:name')
+ @param string chars The character data to send to the process
+ @return boolean result Always return True unless error
+ """
+ ...
+
+ def sendRemoteCommEvent(self, type, data): # -> Literal[True]:
+ """ Send an event that will be received by event listener
+ subprocesses subscribing to the RemoteCommunicationEvent.
+
+ @param string type String for the "type" key in the event header
+ @param string data Data for the event body
+ @return boolean Always return True unless error
+ """
+ ...
+
+
+
+def make_allfunc(processes, predicate, func, **extra_kwargs): # -> (processes: Unknown = processes, predicate: Unknown = predicate, func: Unknown = func, extra_kwargs: Unknown = extra_kwargs, callbacks: Unknown = callbacks, results: Unknown = results) -> (Unknown | Type[NOT_DONE_YET]):
+ """ Return a closure representing a function that calls a
+ function for every process, and returns a result """
+ ...
+
+def isRunning(process): # -> bool:
+ ...
+
+def isNotRunning(process): # -> bool:
+ ...
+
+def isSignallable(process): # -> Literal[True] | None:
+ ...
+
+def make_main_rpcinterface(supervisord): # -> SupervisorNamespaceRPCInterface:
+ ...
diff --git a/manager/typings/supervisor/socket_manager.pyi b/manager/typings/supervisor/socket_manager.pyi
new file mode 100644
index 00000000..05b26f0b
--- /dev/null
+++ b/manager/typings/supervisor/socket_manager.pyi
@@ -0,0 +1,59 @@
+"""
+This type stub file was generated by pyright.
+"""
+
+class Proxy:
+ """ Class for wrapping a shared resource object and getting
+ notified when it's deleted
+ """
+ def __init__(self, object, **kwargs) -> None:
+ ...
+
+ def __del__(self): # -> None:
+ ...
+
+ def __getattr__(self, name): # -> Any:
+ ...
+
+
+
+class ReferenceCounter:
+ """ Class for tracking references to a shared resource
+ """
+ def __init__(self, **kwargs) -> None:
+ ...
+
+ def get_count(self): # -> int:
+ ...
+
+ def increment(self): # -> None:
+ ...
+
+ def decrement(self): # -> None:
+ ...
+
+
+
+class SocketManager:
+ """ Class for managing sockets in servers that create/bind/listen
+ before forking multiple child processes to accept()
+ Sockets are managed at the process group level and referenced counted
+ at the process level b/c that's really the only place to hook in
+ """
+ def __init__(self, socket_config, **kwargs) -> None:
+ ...
+
+ def __repr__(self): # -> str:
+ ...
+
+ def config(self): # -> Unknown:
+ ...
+
+ def is_prepared(self): # -> bool:
+ ...
+
+ def get_socket(self): # -> Proxy:
+ ...
+
+ def get_socket_ref_count(self): # -> int:
+ ...
diff --git a/manager/typings/supervisor/states.pyi b/manager/typings/supervisor/states.pyi
new file mode 100644
index 00000000..d0a21f5c
--- /dev/null
+++ b/manager/typings/supervisor/states.pyi
@@ -0,0 +1,44 @@
+"""
+This type stub file was generated by pyright.
+"""
+
+class ProcessStates:
+ STOPPED = ...
+ STARTING = ...
+ RUNNING = ...
+ BACKOFF = ...
+ STOPPING = ...
+ EXITED = ...
+ FATAL = ...
+ UNKNOWN = ...
+
+
+STOPPED_STATES = ...
+RUNNING_STATES = ...
+SIGNALLABLE_STATES = ...
+def getProcessStateDescription(code): # -> None:
+ ...
+
+class SupervisorStates:
+ FATAL = ...
+ RUNNING = ...
+ RESTARTING = ...
+ SHUTDOWN = ...
+
+
+def getSupervisorStateDescription(code): # -> None:
+ ...
+
+class EventListenerStates:
+ READY = ...
+ BUSY = ...
+ ACKNOWLEDGED = ...
+ UNKNOWN = ...
+
+
+def getEventListenerStateDescription(code): # -> None:
+ ...
+
+_process_states_by_code = ...
+_supervisor_states_by_code = ...
+_eventlistener_states_by_code = ...
diff --git a/manager/typings/supervisor/supervisorctl.pyi b/manager/typings/supervisor/supervisorctl.pyi
new file mode 100644
index 00000000..3034638a
--- /dev/null
+++ b/manager/typings/supervisor/supervisorctl.pyi
@@ -0,0 +1,280 @@
+"""
+This type stub file was generated by pyright.
+"""
+
+import cmd
+import threading
+
+"""supervisorctl -- control applications run by supervisord from the cmd line.
+
+Usage: %s [options] [action [arguments]]
+
+Options:
+-c/--configuration FILENAME -- configuration file path (searches if not given)
+-h/--help -- print usage message and exit
+-i/--interactive -- start an interactive shell after executing commands
+-s/--serverurl URL -- URL on which supervisord server is listening
+ (default "http://localhost:9001").
+-u/--username USERNAME -- username to use for authentication with server
+-p/--password PASSWORD -- password to use for authentication with server
+-r/--history-file -- keep a readline history (if readline is available)
+
+action [arguments] -- see below
+
+Actions are commands like "tail" or "stop". If -i is specified or no action is
+specified on the command line, a "shell" interpreting actions typed
+interactively is started. Use the action "help" to find out about available
+actions.
+"""
+class LSBInitExitStatuses:
+ SUCCESS = ...
+ GENERIC = ...
+ INVALID_ARGS = ...
+ UNIMPLEMENTED_FEATURE = ...
+ INSUFFICIENT_PRIVILEGES = ...
+ NOT_INSTALLED = ...
+ NOT_RUNNING = ...
+
+
+class LSBStatusExitStatuses:
+ NOT_RUNNING = ...
+ UNKNOWN = ...
+
+
+DEAD_PROGRAM_FAULTS = ...
+class fgthread(threading.Thread):
+ """ A subclass of threading.Thread, with a kill() method.
+ To be used for foreground output/error streaming.
+ http://mail.python.org/pipermail/python-list/2004-May/260937.html
+ """
+ def __init__(self, program, ctl) -> None:
+ ...
+
+ def start(self): # -> None:
+ ...
+
+ def run(self): # -> None:
+ ...
+
+ def globaltrace(self, frame, why, arg): # -> (frame: Unknown, why: Unknown, arg: Unknown) -> (frame: Unknown, why: Unknown, arg: Unknown) -> Unknown | None:
+ ...
+
+ def localtrace(self, frame, why, arg): # -> (frame: Unknown, why: Unknown, arg: Unknown) -> Unknown:
+ ...
+
+ def kill(self): # -> None:
+ ...
+
+
+
+class Controller(cmd.Cmd):
+ def __init__(self, options, completekey=..., stdin=..., stdout=...) -> None:
+ ...
+
+ def emptyline(self): # -> None:
+ ...
+
+ def default(self, line): # -> None:
+ ...
+
+ def exec_cmdloop(self, args, options): # -> None:
+ ...
+
+ def set_exitstatus_from_xmlrpc_fault(self, faultcode, ignored_faultcode=...): # -> None:
+ ...
+
+ def onecmd(self, line): # -> Any | None:
+ """ Override the onecmd method to:
+ - catch and print all exceptions
+ - call 'do_foo' on plugins rather than ourself
+ """
+ ...
+
+ def output(self, message): # -> None:
+ ...
+
+ def get_supervisor(self): # -> Any:
+ ...
+
+ def get_server_proxy(self, namespace=...): # -> Any:
+ ...
+
+ def upcheck(self): # -> bool:
+ ...
+
+ def complete(self, text, state, line=...): # -> str | None:
+ """Completer function that Cmd will register with readline using
+ readline.set_completer(). This function will be called by readline
+ as complete(text, state) where text is a fragment to complete and
+ state is an integer (0..n). Each call returns a string with a new
+ completion. When no more are available, None is returned."""
+ ...
+
+ def do_help(self, arg): # -> None:
+ ...
+
+ def help_help(self): # -> None:
+ ...
+
+ def do_EOF(self, arg): # -> Literal[1]:
+ ...
+
+ def help_EOF(self): # -> None:
+ ...
+
+
+
+def get_names(inst): # -> List[Unknown]:
+ ...
+
+class ControllerPluginBase:
+ name = ...
+ def __init__(self, controller) -> None:
+ ...
+
+ doc_header = ...
+ def do_help(self, arg): # -> None:
+ ...
+
+
+
+def not_all_langs(): # -> str | None:
+ ...
+
+def check_encoding(ctl): # -> None:
+ ...
+
+class DefaultControllerPlugin(ControllerPluginBase):
+ name = ...
+ listener = ...
+ def do_tail(self, arg): # -> None:
+ ...
+
+ def help_tail(self): # -> None:
+ ...
+
+ def do_maintail(self, arg): # -> None:
+ ...
+
+ def help_maintail(self): # -> None:
+ ...
+
+ def do_quit(self, arg):
+ ...
+
+ def help_quit(self): # -> None:
+ ...
+
+ do_exit = ...
+ def help_exit(self): # -> None:
+ ...
+
+ def do_status(self, arg): # -> None:
+ ...
+
+ def help_status(self): # -> None:
+ ...
+
+ def do_pid(self, arg): # -> None:
+ ...
+
+ def help_pid(self): # -> None:
+ ...
+
+ def do_start(self, arg): # -> None:
+ ...
+
+ def help_start(self): # -> None:
+ ...
+
+ def do_stop(self, arg): # -> None:
+ ...
+
+ def help_stop(self): # -> None:
+ ...
+
+ def do_signal(self, arg): # -> None:
+ ...
+
+ def help_signal(self): # -> None:
+ ...
+
+ def do_restart(self, arg): # -> None:
+ ...
+
+ def help_restart(self): # -> None:
+ ...
+
+ def do_shutdown(self, arg): # -> None:
+ ...
+
+ def help_shutdown(self): # -> None:
+ ...
+
+ def do_reload(self, arg): # -> None:
+ ...
+
+ def help_reload(self): # -> None:
+ ...
+
+ def do_avail(self, arg): # -> None:
+ ...
+
+ def help_avail(self): # -> None:
+ ...
+
+ def do_reread(self, arg): # -> None:
+ ...
+
+ def help_reread(self): # -> None:
+ ...
+
+ def do_add(self, arg): # -> None:
+ ...
+
+ def help_add(self): # -> None:
+ ...
+
+ def do_remove(self, arg): # -> None:
+ ...
+
+ def help_remove(self): # -> None:
+ ...
+
+ def do_update(self, arg): # -> None:
+ ...
+
+ def help_update(self): # -> None:
+ ...
+
+ def do_clear(self, arg): # -> None:
+ ...
+
+ def help_clear(self): # -> None:
+ ...
+
+ def do_open(self, arg): # -> None:
+ ...
+
+ def help_open(self): # -> None:
+ ...
+
+ def do_version(self, arg): # -> None:
+ ...
+
+ def help_version(self): # -> None:
+ ...
+
+ def do_fg(self, arg): # -> None:
+ ...
+
+ def help_fg(self, args=...): # -> None:
+ ...
+
+
+
+def main(args=..., options=...): # -> None:
+ ...
+
+if __name__ == "__main__":
+ ...
diff --git a/manager/typings/supervisor/supervisord.pyi b/manager/typings/supervisor/supervisord.pyi
new file mode 100644
index 00000000..5990f964
--- /dev/null
+++ b/manager/typings/supervisor/supervisord.pyi
@@ -0,0 +1,102 @@
+"""
+This type stub file was generated by pyright.
+"""
+
+"""supervisord -- run a set of applications as daemons.
+
+Usage: %s [options]
+
+Options:
+-c/--configuration FILENAME -- configuration file path (searches if not given)
+-n/--nodaemon -- run in the foreground (same as 'nodaemon=true' in config file)
+-s/--silent -- no logs to stdout (maps to 'silent=true' in config file)
+-h/--help -- print this usage message and exit
+-v/--version -- print supervisord version number and exit
+-u/--user USER -- run supervisord as this user (or numeric uid)
+-m/--umask UMASK -- use this umask for daemon subprocess (default is 022)
+-d/--directory DIRECTORY -- directory to chdir to when daemonized
+-l/--logfile FILENAME -- use FILENAME as logfile path
+-y/--logfile_maxbytes BYTES -- use BYTES to limit the max size of logfile
+-z/--logfile_backups NUM -- number of backups to keep when max bytes reached
+-e/--loglevel LEVEL -- use LEVEL as log level (debug,info,warn,error,critical)
+-j/--pidfile FILENAME -- write a pid file for the daemon process to FILENAME
+-i/--identifier STR -- identifier used for this instance of supervisord
+-q/--childlogdir DIRECTORY -- the log directory for child process logs
+-k/--nocleanup -- prevent the process from performing cleanup (removal of
+ old automatic child log files) at startup.
+-a/--minfds NUM -- the minimum number of file descriptors for start success
+-t/--strip_ansi -- strip ansi escape codes from process output
+--minprocs NUM -- the minimum number of processes available for start success
+--profile_options OPTIONS -- run supervisord under profiler and output
+ results based on OPTIONS, which is a comma-sep'd
+ list of 'cumulative', 'calls', and/or 'callers',
+ e.g. 'cumulative,callers')
+"""
+class Supervisor:
+ stopping = ...
+ lastshutdownreport = ...
+ process_groups = ...
+ stop_groups = ...
+ def __init__(self, options) -> None:
+ ...
+
+ def main(self): # -> None:
+ ...
+
+ def run(self): # -> None:
+ ...
+
+ def diff_to_active(self): # -> tuple[list[Unknown], list[Unknown], list[Unknown]]:
+ ...
+
+ def add_process_group(self, config): # -> bool:
+ ...
+
+ def remove_process_group(self, name): # -> bool:
+ ...
+
+ def get_process_map(self): # -> dict[Unknown, Unknown]:
+ ...
+
+ def shutdown_report(self): # -> list[Unknown]:
+ ...
+
+ def ordered_stop_groups_phase_1(self): # -> None:
+ ...
+
+ def ordered_stop_groups_phase_2(self): # -> None:
+ ...
+
+ def runforever(self): # -> None:
+ ...
+
+ def tick(self, now=...): # -> None:
+ """ Send one or more 'tick' events when the timeslice related to
+ the period for the event type rolls over """
+ ...
+
+ def reap(self, once=..., recursionguard=...): # -> None:
+ ...
+
+ def handle_signal(self): # -> None:
+ ...
+
+ def get_state(self):
+ ...
+
+
+
+def timeslice(period, when): # -> int:
+ ...
+
+def profile(cmd, globals, locals, sort_order, callers): # -> None:
+ ...
+
+def main(args=..., test=...): # -> None:
+ ...
+
+def go(options): # -> None:
+ ...
+
+if __name__ == "__main__":
+ ...
diff --git a/manager/typings/supervisor/templating.pyi b/manager/typings/supervisor/templating.pyi
new file mode 100644
index 00000000..e1228263
--- /dev/null
+++ b/manager/typings/supervisor/templating.pyi
@@ -0,0 +1,476 @@
+"""
+This type stub file was generated by pyright.
+"""
+
+from xml.etree.ElementTree import TreeBuilder
+
+from supervisor.compat import PY2, HTMLParser
+
+AUTOCLOSE = ...
+IGNOREEND = ...
+_BLANK = ...
+_SPACE = ...
+_EQUAL = ...
+_QUOTE = ...
+_OPEN_TAG_START = ...
+_CLOSE_TAG_START = ...
+_OPEN_TAG_END = ...
+_SELF_CLOSE = ...
+_OMITTED_TEXT = ...
+_COMMENT_START = ...
+_COMMENT_END = ...
+_PI_START = ...
+_PI_END = ...
+_AMPER_ESCAPED = ...
+_LT = ...
+_LT_ESCAPED = ...
+_QUOTE_ESCAPED = ...
+_XML_PROLOG_BEGIN = ...
+_ENCODING = ...
+_XML_PROLOG_END = ...
+_DOCTYPE_BEGIN = ...
+_PUBLIC = ...
+_DOCTYPE_END = ...
+if PY2:
+ def encode(text, encoding):
+ ...
+
+else:
+ def encode(text, encoding): # -> bytes:
+ ...
+
+def Replace(text, structure=...): # -> _MeldElementInterface:
+ ...
+
+class PyHelper:
+ def findmeld(self, node, name, default=...):
+ ...
+
+ def clone(self, node, parent=...): # -> _MeldElementInterface:
+ ...
+
+ def bfclone(self, node, parent=...): # -> _MeldElementInterface:
+ ...
+
+ def getiterator(self, node, tag=...): # -> list[Unknown]:
+ ...
+
+ def content(self, node, text, structure=...): # -> None:
+ ...
+
+
+
+helper = ...
+_MELD_NS_URL = ...
+_MELD_PREFIX = ...
+_MELD_LOCAL = ...
+_MELD_ID = ...
+_MELD_SHORT_ID = ...
+_XHTML_NS_URL = ...
+_XHTML_PREFIX = ...
+_XHTML_PREFIX_LEN = ...
+_marker = ...
+class doctype:
+ html_strict = ...
+ html = ...
+ xhtml_strict = ...
+ xhtml = ...
+
+
+class _MeldElementInterface:
+ parent = ...
+ attrib = ...
+ text = ...
+ tail = ...
+ structure = ...
+ def __init__(self, tag, attrib) -> None:
+ ...
+
+ def __repr__(self): # -> str:
+ ...
+
+ def __len__(self): # -> int:
+ ...
+
+ def __getitem__(self, index):
+ ...
+
+ def __getslice__(self, start, stop): # -> List[Unknown]:
+ ...
+
+ def getchildren(self): # -> list[Unknown]:
+ ...
+
+ def find(self, path):
+ ...
+
+ def findtext(self, path, default=...):
+ ...
+
+ def findall(self, path):
+ ...
+
+ def clear(self): # -> None:
+ ...
+
+ def get(self, key, default=...):
+ ...
+
+ def set(self, key, value): # -> None:
+ ...
+
+ def keys(self): # -> list[Unknown]:
+ ...
+
+ def items(self): # -> list[Unknown]:
+ ...
+
+ def getiterator(self, *ignored_args, **ignored_kw): # -> list[Unknown]:
+ ...
+
+ def __setitem__(self, index, element): # -> None:
+ ...
+
+ def __setslice__(self, start, stop, elements): # -> None:
+ ...
+
+ def append(self, element): # -> None:
+ ...
+
+ def insert(self, index, element): # -> None:
+ ...
+
+ def __delitem__(self, index): # -> None:
+ ...
+
+ def __delslice__(self, start, stop): # -> None:
+ ...
+
+ def remove(self, element): # -> None:
+ ...
+
+ def makeelement(self, tag, attrib): # -> _MeldElementInterface:
+ ...
+
+ def __mod__(self, other): # -> list[Unknown]:
+ """ Fill in the text values of meld nodes in tree; only
+ support dictionarylike operand (sequence operand doesn't seem
+ to make sense here)"""
+ ...
+
+ def fillmelds(self, **kw): # -> list[Unknown]:
+ """ Fill in the text values of meld nodes in tree using the
+ keyword arguments passed in; use the keyword keys as meld ids
+ and the keyword values as text that should fill in the node
+ text on which that meld id is found. Return a list of keys
+ from **kw that were not able to be found anywhere in the tree.
+ Never raises an exception. """
+ ...
+
+ def fillmeldhtmlform(self, **kw): # -> list[Unknown]:
+ """ Perform magic to 'fill in' HTML form element values from a
+ dictionary. Unlike 'fillmelds', the type of element being
+ 'filled' is taken into consideration.
+
+ Perform a 'findmeld' on each key in the dictionary and use the
+ value that corresponds to the key to perform mutation of the
+ tree, changing data in what is presumed to be one or more HTML
+ form elements according to the following rules::
+
+ If the found element is an 'input group' (its meld id ends
+ with the string ':inputgroup'), set the 'checked' attribute
+ on the appropriate subelement which has a 'value' attribute
+ which matches the dictionary value. Also remove the
+ 'checked' attribute from every other 'input' subelement of
+ the input group. If no input subelement's value matches the
+ dictionary value, this key is treated as 'unfilled'.
+
+ If the found element is an 'input type=text', 'input
+ type=hidden', 'input type=submit', 'input type=password',
+ 'input type=reset' or 'input type=file' element, replace its
+ 'value' attribute with the value.
+
+ If the found element is an 'input type=checkbox' or 'input
+ type='radio' element, set its 'checked' attribute to true if
+ the dict value is true, or remove its 'checked' attribute if
+ the dict value is false.
+
+ If the found element is a 'select' element and the value
+ exists in the 'value=' attribute of one of its 'option'
+ subelements, change that option's 'selected' attribute to
+ true and mark all other option elements as unselected. If
+ the select element does not contain an option with a value
+ that matches the dictionary value, do nothing and return
+ this key as unfilled.
+
+ If the found element is a 'textarea' or any other kind of
+ element, replace its text with the value.
+
+ If the element corresponding to the key is not found,
+ do nothing and treat the key as 'unfilled'.
+
+ Return a list of 'unfilled' keys, representing meld ids
+ present in the dictionary but not present in the element tree
+ or meld ids which could not be filled due to the lack of any
+ matching subelements for 'select' nodes or 'inputgroup' nodes.
+ """
+ ...
+
+ def findmeld(self, name, default=...):
+ """ Find a node in the tree that has a 'meld id' corresponding
+ to 'name'. Iterate over all subnodes recursively looking for a
+ node which matches. If we can't find the node, return None."""
+ ...
+
+ def findmelds(self): # -> list[Unknown]:
+ """ Find all nodes that have a meld id attribute and return
+ the found nodes in a list"""
+ ...
+
+ def findwithattrib(self, attrib, value=...): # -> list[Unknown]:
+ """ Find all nodes that have an attribute named 'attrib'. If
+ 'value' is not None, omit nodes on which the attribute value
+ does not compare equally to 'value'. Return the found nodes in
+ a list."""
+ ...
+
+ def repeat(self, iterable, childname=...): # -> list[Unknown]:
+ """repeats an element with values from an iterable. If
+ 'childname' is not None, repeat the element on which the
+ repeat is called, otherwise find the child element with a
+ 'meld:id' matching 'childname' and repeat that. The element
+ is repeated within its parent element (nodes that are created
+ as a result of a repeat share the same parent). This method
+ returns an iterable; the value of each iteration is a
+ two-sequence in the form (newelement, data). 'newelement' is
+ a clone of the template element (including clones of its
+ children) which has already been seated in its parent element
+ in the template. 'data' is a value from the passed in
+ iterable. Changing 'newelement' (typically based on values
+ from 'data') mutates the element 'in place'."""
+ ...
+
+ def replace(self, text, structure=...): # -> None:
+ """ Replace this element with a Replace node in our parent with
+ the text 'text' and return the index of our position in
+ our parent. If we have no parent, do nothing, and return None.
+ Pass the 'structure' flag to the replace node so it can do the right
+ thing at render time. """
+ ...
+
+ def content(self, text, structure=...): # -> None:
+ """ Delete this node's children and append a Replace node that
+ contains text. Always return None. Pass the 'structure' flag
+ to the replace node so it can do the right thing at render
+ time."""
+ ...
+
+ def attributes(self, **kw): # -> None:
+ """ Set attributes on this node. """
+ ...
+
+ def write_xmlstring(self, encoding=..., doctype=..., fragment=..., declaration=..., pipeline=...): # -> bytes:
+ ...
+
+ def write_xml(self, file, encoding=..., doctype=..., fragment=..., declaration=..., pipeline=...): # -> None:
+ """ Write XML to 'file' (which can be a filename or filelike object)
+
+ encoding - encoding string (if None, 'utf-8' encoding is assumed)
+ Must be a recognizable Python encoding type.
+ doctype - 3-tuple indicating name, pubid, system of doctype.
+ The default is to prevent a doctype from being emitted.
+ fragment - True if a 'fragment' should be emitted for this node (no
+ declaration, no doctype). This causes both the
+ 'declaration' and 'doctype' parameters to become ignored
+ if provided.
+ declaration - emit an xml declaration header (including an encoding
+ if it's not None). The default is to emit the
+ doctype.
+ pipeline - preserve 'meld' namespace identifiers in output
+ for use in pipelining
+ """
+ ...
+
+ def write_htmlstring(self, encoding=..., doctype=..., fragment=...): # -> bytes:
+ ...
+
+ def write_html(self, file, encoding=..., doctype=..., fragment=...): # -> None:
+ """ Write HTML to 'file' (which can be a filename or filelike object)
+
+ encoding - encoding string (if None, 'utf-8' encoding is assumed).
+ Unlike XML output, this is not used in a declaration,
+ but it is used to do actual character encoding during
+ output. Must be a recognizable Python encoding type.
+ doctype - 3-tuple indicating name, pubid, system of doctype.
+ The default is the value of doctype.html (HTML 4.0
+ 'loose')
+ fragment - True if a "fragment" should be omitted (no doctype).
+ This overrides any provided "doctype" parameter if
+ provided.
+
+ Namespace'd elements and attributes have their namespaces removed
+ during output when writing HTML, so pipelining cannot be performed.
+
+ HTML is not valid XML, so an XML declaration header is never emitted.
+ """
+ ...
+
+ def write_xhtmlstring(self, encoding=..., doctype=..., fragment=..., declaration=..., pipeline=...): # -> bytes:
+ ...
+
+ def write_xhtml(self, file, encoding=..., doctype=..., fragment=..., declaration=..., pipeline=...): # -> None:
+ """ Write XHTML to 'file' (which can be a filename or filelike object)
+
+ encoding - encoding string (if None, 'utf-8' encoding is assumed)
+ Must be a recognizable Python encoding type.
+ doctype - 3-tuple indicating name, pubid, system of doctype.
+ The default is the value of doctype.xhtml (XHTML
+ 'loose').
+ fragment - True if a 'fragment' should be emitted for this node (no
+ declaration, no doctype). This causes both the
+ 'declaration' and 'doctype' parameters to be ignored.
+ declaration - emit an xml declaration header (including an encoding
+ string if 'encoding' is not None)
+ pipeline - preserve 'meld' namespace identifiers in output
+ for use in pipelining
+ """
+ ...
+
+ def clone(self, parent=...): # -> _MeldElementInterface:
+ """ Create a clone of an element. If parent is not None,
+ append the element to the parent. Recurse as necessary to create
+ a deep clone of the element. """
+ ...
+
+ def deparent(self): # -> None:
+ """ Remove ourselves from our parent node (de-parent) and return
+ the index of the parent which was deleted. """
+ ...
+
+ def parentindex(self): # -> None:
+ """ Return the parent node index in which we live """
+ ...
+
+ def shortrepr(self, encoding=...): # -> bytes:
+ ...
+
+ def diffmeld(self, other): # -> dict[str, dict[str, list[Unknown]]]:
+ """ Compute the meld element differences from this node (the
+ source) to 'other' (the target). Return a dictionary of
+ sequences in the form {'unreduced:
+ {'added':[], 'removed':[], 'moved':[]},
+ 'reduced':
+ {'added':[], 'removed':[], 'moved':[]},}
+ """
+ ...
+
+ def meldid(self):
+ ...
+
+ def lineage(self): # -> list[Unknown]:
+ ...
+
+
+
+class MeldTreeBuilder(TreeBuilder):
+ def __init__(self) -> None:
+ ...
+
+ def start(self, tag, attrs): # -> Element:
+ ...
+
+ def comment(self, data): # -> None:
+ ...
+
+ def doctype(self, name, pubid, system): # -> None:
+ ...
+
+
+
+class HTMLXMLParser(HTMLParser):
+ """ A mostly-cut-and-paste of ElementTree's HTMLTreeBuilder that
+ does special meld3 things (like preserve comments and munge meld
+ ids). Subclassing is not possible due to private attributes. :-("""
+ def __init__(self, builder=..., encoding=...) -> None:
+ ...
+
+ def close(self): # -> Element:
+ ...
+
+ def handle_starttag(self, tag, attrs): # -> None:
+ ...
+
+ def handle_endtag(self, tag): # -> None:
+ ...
+
+ def handle_charref(self, char): # -> None:
+ ...
+
+ def handle_entityref(self, name): # -> None:
+ ...
+
+ def handle_data(self, data): # -> None:
+ ...
+
+ def unknown_entityref(self, name): # -> None:
+ ...
+
+ def handle_comment(self, data): # -> None:
+ ...
+
+
+
+def do_parse(source, parser): # -> Element:
+ ...
+
+def parse_xml(source): # -> Element:
+ """ Parse source (a filelike object) into an element tree. If
+ html is true, use a parser that can resolve somewhat ambiguous
+ HTML into XHTML. Otherwise use a 'normal' parser only."""
+ ...
+
+def parse_html(source, encoding=...): # -> Element:
+ ...
+
+def parse_xmlstring(text): # -> Element:
+ ...
+
+def parse_htmlstring(text, encoding=...): # -> Element:
+ ...
+
+attrib_needs_escaping = ...
+cdata_needs_escaping = ...
+_HTMLTAGS_UNBALANCED = ...
+_HTMLTAGS_NOESCAPE = ...
+_HTMLATTRS_BOOLEAN = ...
+_NONENTITY_RE = ...
+_XML_DECL_RE = ...
+_BEGIN_TAG_RE = ...
+def insert_doctype(data, doctype=...):
+ ...
+
+def insert_meld_ns_decl(data):
+ ...
+
+def prefeed(data, doctype=...):
+ ...
+
+def sharedlineage(srcelement, tgtelement): # -> bool:
+ ...
+
+def diffreduce(elements): # -> list[Unknown]:
+ ...
+
+def intersection(S1, S2): # -> list[Unknown]:
+ ...
+
+def melditerator(element, meldid=..., _MELD_ID=...): # -> Generator[Unknown, None, None]:
+ ...
+
+_NON_ASCII_MIN = ...
+_NON_ASCII_MAX = ...
+_escape_map = ...
+_namespace_map = ...
+_pattern = ...
+def fixtag(tag, namespaces): # -> tuple[str, tuple[str, str | Unknown] | None]:
+ ...
diff --git a/manager/typings/supervisor/tests/__init__.pyi b/manager/typings/supervisor/tests/__init__.pyi
new file mode 100644
index 00000000..cea7ef96
--- /dev/null
+++ b/manager/typings/supervisor/tests/__init__.pyi
@@ -0,0 +1,3 @@
+"""
+This type stub file was generated by pyright.
+"""
diff --git a/manager/typings/supervisor/web.pyi b/manager/typings/supervisor/web.pyi
new file mode 100644
index 00000000..3ac195f5
--- /dev/null
+++ b/manager/typings/supervisor/web.pyi
@@ -0,0 +1,87 @@
+"""
+This type stub file was generated by pyright.
+"""
+
+class DeferredWebProducer:
+ """ A medusa producer that implements a deferred callback; requires
+ a subclass of asynchat.async_chat that handles NOT_DONE_YET sentinel """
+ CONNECTION = ...
+ def __init__(self, request, callback) -> None:
+ ...
+
+ def more(self): # -> Type[NOT_DONE_YET] | Literal[''] | None:
+ ...
+
+ def sendresponse(self, response): # -> None:
+ ...
+
+
+
+class ViewContext:
+ def __init__(self, **kw) -> None:
+ ...
+
+
+
+class MeldView:
+ content_type = ...
+ delay = ...
+ def __init__(self, context) -> None:
+ ...
+
+ def __call__(self): # -> Type[NOT_DONE_YET]:
+ ...
+
+ def render(self): # -> None:
+ ...
+
+ def clone(self):
+ ...
+
+
+
+class TailView(MeldView):
+ def render(self): # -> str:
+ ...
+
+
+
+class StatusView(MeldView):
+ def actions_for_process(self, process): # -> list[dict[str, str | Unknown | None] | dict[str, str | Unknown]] | list[dict[str, str | Unknown | None] | dict[str, str | Unknown] | None]:
+ ...
+
+ def css_class_for_state(self, state): # -> Literal['statusrunning', 'statuserror', 'statusnominal']:
+ ...
+
+ def make_callback(self, namespec, action): # -> () -> str | () -> (Type[NOT_DONE_YET] | str) | () -> Unknown | () -> (str | Type[NOT_DONE_YET] | Unknown) | () -> (Type[NOT_DONE_YET] | Unknown):
+ ...
+
+ def render(self): # -> Type[NOT_DONE_YET] | str:
+ ...
+
+
+
+class OKView:
+ delay = ...
+ def __init__(self, context) -> None:
+ ...
+
+ def __call__(self): # -> dict[str, str]:
+ ...
+
+
+
+VIEWS = ...
+class supervisor_ui_handler:
+ IDENT = ...
+ def __init__(self, supervisord) -> None:
+ ...
+
+ def match(self, request): # -> bool | None:
+ ...
+
+ def handle_request(self, request): # -> None:
+ ...
+
+ def continue_request(self, data, request): # -> None:
+ ...
diff --git a/manager/typings/supervisor/xmlrpc.pyi b/manager/typings/supervisor/xmlrpc.pyi
new file mode 100644
index 00000000..879c49b9
--- /dev/null
+++ b/manager/typings/supervisor/xmlrpc.pyi
@@ -0,0 +1,174 @@
+"""
+This type stub file was generated by pyright.
+"""
+
+from supervisor.compat import httplib, xmlrpclib
+from supervisor.medusa.xmlrpc_handler import xmlrpc_handler
+
+class Faults:
+ UNKNOWN_METHOD = ...
+ INCORRECT_PARAMETERS = ...
+ BAD_ARGUMENTS = ...
+ SIGNATURE_UNSUPPORTED = ...
+ SHUTDOWN_STATE = ...
+ BAD_NAME = ...
+ BAD_SIGNAL = ...
+ NO_FILE = ...
+ NOT_EXECUTABLE = ...
+ FAILED = ...
+ ABNORMAL_TERMINATION = ...
+ SPAWN_ERROR = ...
+ ALREADY_STARTED = ...
+ NOT_RUNNING = ...
+ SUCCESS = ...
+ ALREADY_ADDED = ...
+ STILL_RUNNING = ...
+ CANT_REREAD = ...
+
+
+def getFaultDescription(code): # -> str:
+ ...
+
+class RPCError(Exception):
+ def __init__(self, code, extra=...) -> None:
+ ...
+
+ def __str__(self) -> str:
+ ...
+
+
+
+class DeferredXMLRPCResponse:
+ """ A medusa producer that implements a deferred callback; requires
+ a subclass of asynchat.async_chat that handles NOT_DONE_YET sentinel """
+ CONNECTION = ...
+ def __init__(self, request, callback) -> None:
+ ...
+
+ def more(self): # -> Type[NOT_DONE_YET] | Literal[''] | None:
+ ...
+
+ def getresponse(self, body): # -> None:
+ ...
+
+
+
+def xmlrpc_marshal(value):
+ ...
+
+class SystemNamespaceRPCInterface:
+ def __init__(self, namespaces) -> None:
+ ...
+
+ def listMethods(self): # -> list[Unknown]:
+ """ Return an array listing the available method names
+
+ @return array result An array of method names available (strings).
+ """
+ ...
+
+ def methodHelp(self, name):
+ """ Return a string showing the method's documentation
+
+ @param string name The name of the method.
+ @return string result The documentation for the method name.
+ """
+ ...
+
+ def methodSignature(self, name): # -> List[Unknown]:
+ """ Return an array describing the method signature in the
+ form [rtype, ptype, ptype...] where rtype is the return data type
+ of the method, and ptypes are the parameter data types that the
+ method accepts in method argument order.
+
+ @param string name The name of the method.
+ @return array result The result.
+ """
+ ...
+
+ def multicall(self, calls): # -> (remaining_calls: Unknown = remaining_calls, callbacks: Unknown = callbacks, results: Unknown = results) -> (Type[NOT_DONE_YET] | Unknown) | Type[NOT_DONE_YET] | list[Unknown]:
+ """Process an array of calls, and return an array of
+ results. Calls should be structs of the form {'methodName':
+ string, 'params': array}. Each result will either be a
+ single-item array containing the result value, or a struct of
+ the form {'faultCode': int, 'faultString': string}. This is
+ useful when you need to make lots of small calls without lots
+ of round trips.
+
+ @param array calls An array of call requests
+ @return array result An array of results
+ """
+ ...
+
+
+
+class AttrDict(dict):
+ def __getattr__(self, name): # -> None:
+ ...
+
+
+
+class RootRPCInterface:
+ def __init__(self, subinterfaces) -> None:
+ ...
+
+
+
+def capped_int(value): # -> int:
+ ...
+
+def make_datetime(text): # -> datetime:
+ ...
+
+class supervisor_xmlrpc_handler(xmlrpc_handler):
+ path = ...
+ IDENT = ...
+ unmarshallers = ...
+ def __init__(self, supervisord, subinterfaces) -> None:
+ ...
+
+ def loads(self, data): # -> tuple[tuple[Any, ...] | None, Any | None]:
+ ...
+
+ def match(self, request):
+ ...
+
+ def continue_request(self, data, request): # -> None:
+ ...
+
+ def call(self, method, params): # -> Any:
+ ...
+
+
+
+def traverse(ob, method, params): # -> Any:
+ ...
+
+class SupervisorTransport(xmlrpclib.Transport):
+ """
+ Provides a Transport for xmlrpclib that uses
+ httplib.HTTPConnection in order to support persistent
+ connections. Also support basic auth and UNIX domain socket
+ servers.
+ """
+ connection = ...
+ def __init__(self, username=..., password=..., serverurl=...) -> None:
+ ...
+
+ def close(self): # -> None:
+ ...
+
+ def request(self, host, handler, request_body, verbose=...):
+ ...
+
+
+
+class UnixStreamHTTPConnection(httplib.HTTPConnection):
+ def connect(self): # -> None:
+ ...
+
+
+
+def gettags(comment): # -> list[Unknown]:
+ """ Parse documentation strings into JavaDoc-like tokens """
+ ...
diff --git a/meson.build b/meson.build
index 3f301e5a..21a89e3e 100644
--- a/meson.build
+++ b/meson.build
@@ -4,7 +4,7 @@ project(
'knot-resolver',
['c', 'cpp'],
license: 'GPLv3+',
- version: '5.6.0',
+ version: '6.0.0a1',
default_options: ['c_std=gnu11', 'b_ndebug=true'],
meson_version: '>=0.49',
)
@@ -52,12 +52,14 @@ etc_dir = prefix / get_option('sysconfdir') / 'knot-resolver'
lib_dir = prefix / get_option('libdir') / 'knot-resolver'
modules_dir = lib_dir / 'kres_modules'
sbin_dir = prefix / get_option('sbindir')
+bin_dir = prefix / get_option('bindir')
run_dir = '/run' / 'knot-resolver'
systemd_work_dir = prefix / get_option('localstatedir') / 'lib' / 'knot-resolver'
systemd_cache_dir = prefix / get_option('localstatedir') / 'cache' / 'knot-resolver'
systemd_unit_dir = prefix / 'lib' / 'systemd' / 'system'
systemd_tmpfiles_dir = prefix / 'lib' / 'tmpfiles.d'
systemd_sysusers_dir = prefix / 'lib' / 'sysusers.d'
+completion_dir = prefix / 'share'
## Trust anchors
managed_ta = get_option('managed_ta') == 'enabled'
@@ -212,6 +214,7 @@ subdir('lib')
## Remaining code
subdir('daemon')
subdir('modules')
+subdir('manager')
subdir('utils')
if get_option('bench') == 'enabled'
subdir('bench')
@@ -297,6 +300,7 @@ run_target(
# https://github.com/mesonbuild/meson/issues/2404
s_managed_ta = managed_ta ? 'enabled' : 'disabled'
s_install_root_keys = install_root_keys ? 'enabled' : 'disabled'
+s_build_manager = build_manager ? 'enabled' : 'disabled'
s_build_client = build_client ? 'enabled' : 'disabled'
s_build_utils = build_utils ? 'enabled' : 'disabled'
s_build_dnstap = build_dnstap ? 'enabled' : 'disabled'
@@ -332,6 +336,7 @@ message('''
cache_dir: @0@'''.format(systemd_cache_dir) + '''
optional components
+ manager: @0@'''.format(s_build_manager) + '''
client: @0@'''.format(s_build_client) + '''
utils: @0@'''.format(s_build_utils) + '''
dnstap: @0@'''.format(s_build_dnstap) + '''
diff --git a/meson_options.txt b/meson_options.txt
index 576d385a..b1af4478 100644
--- a/meson_options.txt
+++ b/meson_options.txt
@@ -116,6 +116,18 @@ option(
)
option(
+ 'manager',
+ type: 'combo',
+ choices: [
+ 'auto',
+ 'enabled',
+ 'disabled',
+ ],
+ value: 'disabled',
+ description: 'build manager and its features',
+)
+
+option(
'client',
type: 'combo',
choices: [
diff --git a/modules/bogus_log/.packaging/test.config b/modules/bogus_log/.packaging/test.config
deleted file mode 100644
index bf1c8219..00000000
--- a/modules/bogus_log/.packaging/test.config
+++ /dev/null
@@ -1,4 +0,0 @@
--- SPDX-License-Identifier: GPL-3.0-or-later
-modules.load('bogus_log')
-assert(bogus_log)
-quit()
diff --git a/modules/daf/.packaging/test.config b/modules/daf/.packaging/test.config
deleted file mode 100644
index 2fa1d8cb..00000000
--- a/modules/daf/.packaging/test.config
+++ /dev/null
@@ -1,4 +0,0 @@
--- SPDX-License-Identifier: GPL-3.0-or-later
-modules.load('daf')
-assert(daf)
-quit()
diff --git a/modules/daf/daf.test.lua b/modules/daf/daf.test.lua
index 2a46393b..557c1a99 100644
--- a/modules/daf/daf.test.lua
+++ b/modules/daf/daf.test.lua
@@ -37,7 +37,9 @@ local function test_basic_actions()
daf.add('qname = deny. deny')
daf.add('qname = drop. drop')
- check_answer('daf pass action', 'pass.', kres.type.A, kres.rcode.NOERROR)
+ -- This one won't work anymore, as PASS(THRU) now also skips hints.
+ --check_answer('daf pass action', 'pass.', kres.type.A, kres.rcode.NOERROR)
+
check_answer('daf deny action', 'deny.', kres.type.A, kres.rcode.NXDOMAIN)
check_answer('daf drop action', 'drop.', kres.type.A, kres.rcode.SERVFAIL)
end
diff --git a/modules/detect_time_jump/.packaging/test.config b/modules/detect_time_jump/.packaging/test.config
deleted file mode 100644
index 7ed0e602..00000000
--- a/modules/detect_time_jump/.packaging/test.config
+++ /dev/null
@@ -1,4 +0,0 @@
--- SPDX-License-Identifier: GPL-3.0-or-later
-modules.load('detect_time_jump')
-assert(detect_time_jump)
-quit()
diff --git a/modules/detect_time_skew/.packaging/test.config b/modules/detect_time_skew/.packaging/test.config
deleted file mode 100644
index 3a379071..00000000
--- a/modules/detect_time_skew/.packaging/test.config
+++ /dev/null
@@ -1,4 +0,0 @@
--- SPDX-License-Identifier: GPL-3.0-or-later
-modules.load('detect_time_skew')
-assert(detect_time_skew)
-quit()
diff --git a/modules/dns64/.packaging/test.config b/modules/dns64/.packaging/test.config
deleted file mode 100644
index 5abf524c..00000000
--- a/modules/dns64/.packaging/test.config
+++ /dev/null
@@ -1,4 +0,0 @@
--- SPDX-License-Identifier: GPL-3.0-or-later
-modules.load('dns64')
-assert(dns64)
-quit()
diff --git a/modules/dns64/dns64.test.lua b/modules/dns64/dns64.test.lua
index 45956a4c..0686ecc0 100644
--- a/modules/dns64/dns64.test.lua
+++ b/modules/dns64/dns64.test.lua
@@ -3,9 +3,9 @@ local condition = require('cqueues.condition')
-- setup resolver
modules = { 'hints', 'dns64' }
-hints['dns64.example'] = '192.168.1.1'
hints.use_nodata(true) -- Respond NODATA to AAAA query
hints.ttl(60)
+hints['dns64.example'] = '192.168.1.1'
dns64.config('fe80::21b:77ff:0:0')
-- helper to wait for query resolution
diff --git a/modules/dnstap/.packaging/centos/7/builddeps b/modules/dnstap/.packaging/centos/7/builddeps
deleted file mode 100644
index d3ab3547..00000000
--- a/modules/dnstap/.packaging/centos/7/builddeps
+++ /dev/null
@@ -1,3 +0,0 @@
-fstrm-devel
-protobuf-c-devel
-protobuf-c-compiler
diff --git a/modules/dnstap/.packaging/centos/7/rundeps b/modules/dnstap/.packaging/centos/7/rundeps
deleted file mode 100644
index 06c2792f..00000000
--- a/modules/dnstap/.packaging/centos/7/rundeps
+++ /dev/null
@@ -1,2 +0,0 @@
-fstrm
-protobuf-c
diff --git a/modules/dnstap/.packaging/centos/8/builddeps b/modules/dnstap/.packaging/centos/8/builddeps
deleted file mode 100644
index d3ab3547..00000000
--- a/modules/dnstap/.packaging/centos/8/builddeps
+++ /dev/null
@@ -1,3 +0,0 @@
-fstrm-devel
-protobuf-c-devel
-protobuf-c-compiler
diff --git a/modules/dnstap/.packaging/centos/8/rundeps b/modules/dnstap/.packaging/centos/8/rundeps
deleted file mode 100644
index 06c2792f..00000000
--- a/modules/dnstap/.packaging/centos/8/rundeps
+++ /dev/null
@@ -1,2 +0,0 @@
-fstrm
-protobuf-c
diff --git a/modules/dnstap/.packaging/debian/10/builddeps b/modules/dnstap/.packaging/debian/10/builddeps
deleted file mode 100644
index 417dc04a..00000000
--- a/modules/dnstap/.packaging/debian/10/builddeps
+++ /dev/null
@@ -1,3 +0,0 @@
-libfstrm-dev
-libprotobuf-c-dev
-protobuf-c-compiler
diff --git a/modules/dnstap/.packaging/debian/10/rundeps b/modules/dnstap/.packaging/debian/10/rundeps
deleted file mode 100644
index a726e120..00000000
--- a/modules/dnstap/.packaging/debian/10/rundeps
+++ /dev/null
@@ -1,2 +0,0 @@
-libfstrm0
-libprotobuf-c1
diff --git a/modules/dnstap/.packaging/debian/9/builddeps b/modules/dnstap/.packaging/debian/9/builddeps
deleted file mode 100644
index 417dc04a..00000000
--- a/modules/dnstap/.packaging/debian/9/builddeps
+++ /dev/null
@@ -1,3 +0,0 @@
-libfstrm-dev
-libprotobuf-c-dev
-protobuf-c-compiler
diff --git a/modules/dnstap/.packaging/debian/9/rundeps b/modules/dnstap/.packaging/debian/9/rundeps
deleted file mode 100644
index a726e120..00000000
--- a/modules/dnstap/.packaging/debian/9/rundeps
+++ /dev/null
@@ -1,2 +0,0 @@
-libfstrm0
-libprotobuf-c1
diff --git a/modules/dnstap/.packaging/fedora/31/builddeps b/modules/dnstap/.packaging/fedora/31/builddeps
deleted file mode 100644
index d3ab3547..00000000
--- a/modules/dnstap/.packaging/fedora/31/builddeps
+++ /dev/null
@@ -1,3 +0,0 @@
-fstrm-devel
-protobuf-c-devel
-protobuf-c-compiler
diff --git a/modules/dnstap/.packaging/fedora/31/rundeps b/modules/dnstap/.packaging/fedora/31/rundeps
deleted file mode 100644
index 06c2792f..00000000
--- a/modules/dnstap/.packaging/fedora/31/rundeps
+++ /dev/null
@@ -1,2 +0,0 @@
-fstrm
-protobuf-c
diff --git a/modules/dnstap/.packaging/fedora/32/builddeps b/modules/dnstap/.packaging/fedora/32/builddeps
deleted file mode 100644
index d3ab3547..00000000
--- a/modules/dnstap/.packaging/fedora/32/builddeps
+++ /dev/null
@@ -1,3 +0,0 @@
-fstrm-devel
-protobuf-c-devel
-protobuf-c-compiler
diff --git a/modules/dnstap/.packaging/fedora/32/rundeps b/modules/dnstap/.packaging/fedora/32/rundeps
deleted file mode 100644
index 06c2792f..00000000
--- a/modules/dnstap/.packaging/fedora/32/rundeps
+++ /dev/null
@@ -1,2 +0,0 @@
-fstrm
-protobuf-c
diff --git a/modules/dnstap/.packaging/leap/15.2/builddeps b/modules/dnstap/.packaging/leap/15.2/builddeps
deleted file mode 100644
index 30f8d9e1..00000000
--- a/modules/dnstap/.packaging/leap/15.2/builddeps
+++ /dev/null
@@ -1,3 +0,0 @@
-fstrm-devel
-libprotobuf-c-devel
-protobuf-c
diff --git a/modules/dnstap/.packaging/leap/15.2/rundeps b/modules/dnstap/.packaging/leap/15.2/rundeps
deleted file mode 100644
index 06c2792f..00000000
--- a/modules/dnstap/.packaging/leap/15.2/rundeps
+++ /dev/null
@@ -1,2 +0,0 @@
-fstrm
-protobuf-c
diff --git a/modules/dnstap/.packaging/test.config b/modules/dnstap/.packaging/test.config
deleted file mode 100644
index 5966860f..00000000
--- a/modules/dnstap/.packaging/test.config
+++ /dev/null
@@ -1,4 +0,0 @@
--- SPDX-License-Identifier: GPL-3.0-or-later
-modules.load('dnstap')
-assert(dnstap)
-quit()
diff --git a/modules/dnstap/.packaging/ubuntu/16.04/builddeps b/modules/dnstap/.packaging/ubuntu/16.04/builddeps
deleted file mode 100644
index 417dc04a..00000000
--- a/modules/dnstap/.packaging/ubuntu/16.04/builddeps
+++ /dev/null
@@ -1,3 +0,0 @@
-libfstrm-dev
-libprotobuf-c-dev
-protobuf-c-compiler
diff --git a/modules/dnstap/.packaging/ubuntu/16.04/rundeps b/modules/dnstap/.packaging/ubuntu/16.04/rundeps
deleted file mode 100644
index a726e120..00000000
--- a/modules/dnstap/.packaging/ubuntu/16.04/rundeps
+++ /dev/null
@@ -1,2 +0,0 @@
-libfstrm0
-libprotobuf-c1
diff --git a/modules/dnstap/.packaging/ubuntu/18.04/builddeps b/modules/dnstap/.packaging/ubuntu/18.04/builddeps
deleted file mode 100644
index 417dc04a..00000000
--- a/modules/dnstap/.packaging/ubuntu/18.04/builddeps
+++ /dev/null
@@ -1,3 +0,0 @@
-libfstrm-dev
-libprotobuf-c-dev
-protobuf-c-compiler
diff --git a/modules/dnstap/.packaging/ubuntu/18.04/rundeps b/modules/dnstap/.packaging/ubuntu/18.04/rundeps
deleted file mode 100644
index a726e120..00000000
--- a/modules/dnstap/.packaging/ubuntu/18.04/rundeps
+++ /dev/null
@@ -1,2 +0,0 @@
-libfstrm0
-libprotobuf-c1
diff --git a/modules/dnstap/.packaging/ubuntu/20.04/builddeps b/modules/dnstap/.packaging/ubuntu/20.04/builddeps
deleted file mode 100644
index 417dc04a..00000000
--- a/modules/dnstap/.packaging/ubuntu/20.04/builddeps
+++ /dev/null
@@ -1,3 +0,0 @@
-libfstrm-dev
-libprotobuf-c-dev
-protobuf-c-compiler
diff --git a/modules/dnstap/.packaging/ubuntu/20.04/rundeps b/modules/dnstap/.packaging/ubuntu/20.04/rundeps
deleted file mode 100644
index a726e120..00000000
--- a/modules/dnstap/.packaging/ubuntu/20.04/rundeps
+++ /dev/null
@@ -1,2 +0,0 @@
-libfstrm0
-libprotobuf-c1
diff --git a/modules/edns_keepalive/.packaging/test.config b/modules/edns_keepalive/.packaging/test.config
deleted file mode 100644
index 5c71c797..00000000
--- a/modules/edns_keepalive/.packaging/test.config
+++ /dev/null
@@ -1,10 +0,0 @@
--- SPDX-License-Identifier: GPL-3.0-or-later
-modules.load('edns_keepalive')
-
-for _,item in ipairs(modules.list()) do
- if item == "edns_keepalive" then
- os.exit(0)
- end
-end
-
-os.exit(1)
diff --git a/modules/etcd/.packaging/centos/7/pre-test.sh b/modules/etcd/.packaging/centos/7/pre-test.sh
deleted file mode 100755
index 4df79d99..00000000
--- a/modules/etcd/.packaging/centos/7/pre-test.sh
+++ /dev/null
@@ -1 +0,0 @@
-luarocks install etcd --from=https://mah0x211.github.io/rocks/
diff --git a/modules/etcd/.packaging/centos/7/rundeps b/modules/etcd/.packaging/centos/7/rundeps
deleted file mode 100644
index 795a3c44..00000000
--- a/modules/etcd/.packaging/centos/7/rundeps
+++ /dev/null
@@ -1,6 +0,0 @@
-openssl-devel
-lua-devel
-luarocks
-git
-gcc
-make
diff --git a/modules/etcd/.packaging/centos/8/NOTSUPPORTED b/modules/etcd/.packaging/centos/8/NOTSUPPORTED
deleted file mode 100644
index e69de29b..00000000
--- a/modules/etcd/.packaging/centos/8/NOTSUPPORTED
+++ /dev/null
diff --git a/modules/etcd/.packaging/debian/10/pre-test.sh b/modules/etcd/.packaging/debian/10/pre-test.sh
deleted file mode 100755
index 20073dc8..00000000
--- a/modules/etcd/.packaging/debian/10/pre-test.sh
+++ /dev/null
@@ -1 +0,0 @@
-luarocks --lua-version 5.1 install etcd --from=https://mah0x211.github.io/rocks/
diff --git a/modules/etcd/.packaging/debian/10/rundeps b/modules/etcd/.packaging/debian/10/rundeps
deleted file mode 100644
index 02d3fcf5..00000000
--- a/modules/etcd/.packaging/debian/10/rundeps
+++ /dev/null
@@ -1,4 +0,0 @@
-libssl-dev
-luarocks
-git
-make
diff --git a/modules/etcd/.packaging/debian/9/pre-test.sh b/modules/etcd/.packaging/debian/9/pre-test.sh
deleted file mode 100755
index 4df79d99..00000000
--- a/modules/etcd/.packaging/debian/9/pre-test.sh
+++ /dev/null
@@ -1 +0,0 @@
-luarocks install etcd --from=https://mah0x211.github.io/rocks/
diff --git a/modules/etcd/.packaging/debian/9/rundeps b/modules/etcd/.packaging/debian/9/rundeps
deleted file mode 100644
index 02d3fcf5..00000000
--- a/modules/etcd/.packaging/debian/9/rundeps
+++ /dev/null
@@ -1,4 +0,0 @@
-libssl-dev
-luarocks
-git
-make
diff --git a/modules/etcd/.packaging/fedora/31/NOTSUPPORTED b/modules/etcd/.packaging/fedora/31/NOTSUPPORTED
deleted file mode 100644
index b912289a..00000000
--- a/modules/etcd/.packaging/fedora/31/NOTSUPPORTED
+++ /dev/null
@@ -1,16 +0,0 @@
-Error installing etcd using luarocks:
-
-
-
-Missing dependencies for process 1.9.0-1:
- luarocks-fetch-gitrec >= 0.2 (not installed)
-
-process 1.9.0-1 depends on luarocks-fetch-gitrec >= 0.2 (not installed)
-Installing https://luarocks.org/luarocks-fetch-gitrec-0.2-1.src.rock
-
-No existing manifest. Attempting to rebuild...
-luarocks-fetch-gitrec 0.2-1 is now installed in /root/.luarocks (license: MIT)
-
-
-Error: Unknown protocol gitrec
-
diff --git a/modules/etcd/.packaging/fedora/32/NOTSUPPORTED b/modules/etcd/.packaging/fedora/32/NOTSUPPORTED
deleted file mode 100644
index b912289a..00000000
--- a/modules/etcd/.packaging/fedora/32/NOTSUPPORTED
+++ /dev/null
@@ -1,16 +0,0 @@
-Error installing etcd using luarocks:
-
-
-
-Missing dependencies for process 1.9.0-1:
- luarocks-fetch-gitrec >= 0.2 (not installed)
-
-process 1.9.0-1 depends on luarocks-fetch-gitrec >= 0.2 (not installed)
-Installing https://luarocks.org/luarocks-fetch-gitrec-0.2-1.src.rock
-
-No existing manifest. Attempting to rebuild...
-luarocks-fetch-gitrec 0.2-1 is now installed in /root/.luarocks (license: MIT)
-
-
-Error: Unknown protocol gitrec
-
diff --git a/modules/etcd/.packaging/leap/15.2/pre-test.sh b/modules/etcd/.packaging/leap/15.2/pre-test.sh
deleted file mode 100755
index 20073dc8..00000000
--- a/modules/etcd/.packaging/leap/15.2/pre-test.sh
+++ /dev/null
@@ -1 +0,0 @@
-luarocks --lua-version 5.1 install etcd --from=https://mah0x211.github.io/rocks/
diff --git a/modules/etcd/.packaging/leap/15.2/rundeps b/modules/etcd/.packaging/leap/15.2/rundeps
deleted file mode 100644
index e8df59ba..00000000
--- a/modules/etcd/.packaging/leap/15.2/rundeps
+++ /dev/null
@@ -1,6 +0,0 @@
-libopenssl-devel
-lua51-devel
-lua51-luarocks
-git
-gcc
-make
diff --git a/modules/etcd/.packaging/test.config b/modules/etcd/.packaging/test.config
deleted file mode 100644
index 1cc7e5aa..00000000
--- a/modules/etcd/.packaging/test.config
+++ /dev/null
@@ -1,4 +0,0 @@
--- SPDX-License-Identifier: GPL-3.0-or-later
-modules.load('etcd')
-assert(etcd)
-quit()
diff --git a/modules/etcd/.packaging/ubuntu/16.04/pre-test.sh b/modules/etcd/.packaging/ubuntu/16.04/pre-test.sh
deleted file mode 100755
index 4df79d99..00000000
--- a/modules/etcd/.packaging/ubuntu/16.04/pre-test.sh
+++ /dev/null
@@ -1 +0,0 @@
-luarocks install etcd --from=https://mah0x211.github.io/rocks/
diff --git a/modules/etcd/.packaging/ubuntu/16.04/rundeps b/modules/etcd/.packaging/ubuntu/16.04/rundeps
deleted file mode 100644
index a355a9f8..00000000
--- a/modules/etcd/.packaging/ubuntu/16.04/rundeps
+++ /dev/null
@@ -1,3 +0,0 @@
-libssl-dev
-luarocks
-git
diff --git a/modules/etcd/.packaging/ubuntu/18.04/pre-test.sh b/modules/etcd/.packaging/ubuntu/18.04/pre-test.sh
deleted file mode 100755
index 4df79d99..00000000
--- a/modules/etcd/.packaging/ubuntu/18.04/pre-test.sh
+++ /dev/null
@@ -1 +0,0 @@
-luarocks install etcd --from=https://mah0x211.github.io/rocks/
diff --git a/modules/etcd/.packaging/ubuntu/18.04/rundeps b/modules/etcd/.packaging/ubuntu/18.04/rundeps
deleted file mode 100644
index a355a9f8..00000000
--- a/modules/etcd/.packaging/ubuntu/18.04/rundeps
+++ /dev/null
@@ -1,3 +0,0 @@
-libssl-dev
-luarocks
-git
diff --git a/modules/etcd/.packaging/ubuntu/20.04/pre-test.sh b/modules/etcd/.packaging/ubuntu/20.04/pre-test.sh
deleted file mode 100755
index 20073dc8..00000000
--- a/modules/etcd/.packaging/ubuntu/20.04/pre-test.sh
+++ /dev/null
@@ -1 +0,0 @@
-luarocks --lua-version 5.1 install etcd --from=https://mah0x211.github.io/rocks/
diff --git a/modules/etcd/.packaging/ubuntu/20.04/rundeps b/modules/etcd/.packaging/ubuntu/20.04/rundeps
deleted file mode 100644
index 02d3fcf5..00000000
--- a/modules/etcd/.packaging/ubuntu/20.04/rundeps
+++ /dev/null
@@ -1,4 +0,0 @@
-libssl-dev
-luarocks
-git
-make
diff --git a/modules/experimental_dot_auth/.packaging/centos/7/rundeps b/modules/experimental_dot_auth/.packaging/centos/7/rundeps
deleted file mode 100644
index 36b83e18..00000000
--- a/modules/experimental_dot_auth/.packaging/centos/7/rundeps
+++ /dev/null
@@ -1 +0,0 @@
-lua-basexx
diff --git a/modules/experimental_dot_auth/.packaging/centos/8/rundeps b/modules/experimental_dot_auth/.packaging/centos/8/rundeps
deleted file mode 100644
index 984c7cec..00000000
--- a/modules/experimental_dot_auth/.packaging/centos/8/rundeps
+++ /dev/null
@@ -1 +0,0 @@
-lua5.1-basexx
diff --git a/modules/experimental_dot_auth/.packaging/debian/10/rundeps b/modules/experimental_dot_auth/.packaging/debian/10/rundeps
deleted file mode 100644
index 36b83e18..00000000
--- a/modules/experimental_dot_auth/.packaging/debian/10/rundeps
+++ /dev/null
@@ -1 +0,0 @@
-lua-basexx
diff --git a/modules/experimental_dot_auth/.packaging/debian/9/rundeps b/modules/experimental_dot_auth/.packaging/debian/9/rundeps
deleted file mode 100644
index 36b83e18..00000000
--- a/modules/experimental_dot_auth/.packaging/debian/9/rundeps
+++ /dev/null
@@ -1 +0,0 @@
-lua-basexx
diff --git a/modules/experimental_dot_auth/.packaging/fedora/31/rundeps b/modules/experimental_dot_auth/.packaging/fedora/31/rundeps
deleted file mode 100644
index 984c7cec..00000000
--- a/modules/experimental_dot_auth/.packaging/fedora/31/rundeps
+++ /dev/null
@@ -1 +0,0 @@
-lua5.1-basexx
diff --git a/modules/experimental_dot_auth/.packaging/fedora/32/rundeps b/modules/experimental_dot_auth/.packaging/fedora/32/rundeps
deleted file mode 100644
index 984c7cec..00000000
--- a/modules/experimental_dot_auth/.packaging/fedora/32/rundeps
+++ /dev/null
@@ -1 +0,0 @@
-lua5.1-basexx
diff --git a/modules/experimental_dot_auth/.packaging/leap/15.2/NOTSUPPORTED b/modules/experimental_dot_auth/.packaging/leap/15.2/NOTSUPPORTED
deleted file mode 100644
index 682eff05..00000000
--- a/modules/experimental_dot_auth/.packaging/leap/15.2/NOTSUPPORTED
+++ /dev/null
@@ -1,6 +0,0 @@
-
-ERROR:test_packaging:Installing https://luarocks.org/basexx-0.4.1-1.rockspec
-Error: Failed extracting v0.4.1.tar.gz
-
-Doesn't works on GitLab CI/CD, but works on localhost.
-gzip and tar packages are installed, all packages has same version as packages on localhost's docker container.
diff --git a/modules/experimental_dot_auth/.packaging/leap/15.2/pre-test.sh b/modules/experimental_dot_auth/.packaging/leap/15.2/pre-test.sh
deleted file mode 100755
index df5d7845..00000000
--- a/modules/experimental_dot_auth/.packaging/leap/15.2/pre-test.sh
+++ /dev/null
@@ -1 +0,0 @@
-luarocks --lua-version 5.1 install basexx --from=https://mah0x211.github.io/rocks/
diff --git a/modules/experimental_dot_auth/.packaging/leap/15.2/rundeps b/modules/experimental_dot_auth/.packaging/leap/15.2/rundeps
deleted file mode 100644
index 9e636d84..00000000
--- a/modules/experimental_dot_auth/.packaging/leap/15.2/rundeps
+++ /dev/null
@@ -1,4 +0,0 @@
-lua51-luarocks
-git
-tar
-gzip
diff --git a/modules/experimental_dot_auth/.packaging/test.config b/modules/experimental_dot_auth/.packaging/test.config
deleted file mode 100644
index 39e9aed8..00000000
--- a/modules/experimental_dot_auth/.packaging/test.config
+++ /dev/null
@@ -1,4 +0,0 @@
--- SPDX-License-Identifier: GPL-3.0-or-later
-modules.load('experimental_dot_auth')
-assert(experimental_dot_auth)
-quit()
diff --git a/modules/experimental_dot_auth/.packaging/ubuntu/16.04/NOTSUPPORTED b/modules/experimental_dot_auth/.packaging/ubuntu/16.04/NOTSUPPORTED
deleted file mode 100644
index e69de29b..00000000
--- a/modules/experimental_dot_auth/.packaging/ubuntu/16.04/NOTSUPPORTED
+++ /dev/null
diff --git a/modules/experimental_dot_auth/.packaging/ubuntu/18.04/rundeps b/modules/experimental_dot_auth/.packaging/ubuntu/18.04/rundeps
deleted file mode 100644
index 36b83e18..00000000
--- a/modules/experimental_dot_auth/.packaging/ubuntu/18.04/rundeps
+++ /dev/null
@@ -1 +0,0 @@
-lua-basexx
diff --git a/modules/experimental_dot_auth/.packaging/ubuntu/20.04/rundeps b/modules/experimental_dot_auth/.packaging/ubuntu/20.04/rundeps
deleted file mode 100644
index 36b83e18..00000000
--- a/modules/experimental_dot_auth/.packaging/ubuntu/20.04/rundeps
+++ /dev/null
@@ -1 +0,0 @@
-lua-basexx
diff --git a/modules/graphite/.packaging/centos/7/rundeps b/modules/graphite/.packaging/centos/7/rundeps
deleted file mode 100644
index 3da806bd..00000000
--- a/modules/graphite/.packaging/centos/7/rundeps
+++ /dev/null
@@ -1 +0,0 @@
-lua-cqueues
diff --git a/modules/graphite/.packaging/centos/8/rundeps b/modules/graphite/.packaging/centos/8/rundeps
deleted file mode 100644
index 182251d9..00000000
--- a/modules/graphite/.packaging/centos/8/rundeps
+++ /dev/null
@@ -1 +0,0 @@
-lua5.1-cqueues
diff --git a/modules/graphite/.packaging/debian/10/rundeps b/modules/graphite/.packaging/debian/10/rundeps
deleted file mode 100644
index 3da806bd..00000000
--- a/modules/graphite/.packaging/debian/10/rundeps
+++ /dev/null
@@ -1 +0,0 @@
-lua-cqueues
diff --git a/modules/graphite/.packaging/debian/9/rundeps b/modules/graphite/.packaging/debian/9/rundeps
deleted file mode 100644
index 3da806bd..00000000
--- a/modules/graphite/.packaging/debian/9/rundeps
+++ /dev/null
@@ -1 +0,0 @@
-lua-cqueues
diff --git a/modules/graphite/.packaging/fedora/31/rundeps b/modules/graphite/.packaging/fedora/31/rundeps
deleted file mode 100644
index 182251d9..00000000
--- a/modules/graphite/.packaging/fedora/31/rundeps
+++ /dev/null
@@ -1 +0,0 @@
-lua5.1-cqueues
diff --git a/modules/graphite/.packaging/fedora/32/rundeps b/modules/graphite/.packaging/fedora/32/rundeps
deleted file mode 100644
index 182251d9..00000000
--- a/modules/graphite/.packaging/fedora/32/rundeps
+++ /dev/null
@@ -1 +0,0 @@
-lua5.1-cqueues
diff --git a/modules/graphite/.packaging/leap/15.2/NOTSUPPORTED b/modules/graphite/.packaging/leap/15.2/NOTSUPPORTED
deleted file mode 100644
index b1ae77d0..00000000
--- a/modules/graphite/.packaging/leap/15.2/NOTSUPPORTED
+++ /dev/null
@@ -1,6 +0,0 @@
-
-ERROR:test_packaging:Installing https://luarocks.org/cqueues-20190813.51-0.src.rock
-164 Error: Failed extracting rel-20190813.tar.gz
-
-Doesn't works on GitLab CI/CD, but works on localhost.
-gzip and tar packages are installed, all packages has same version as packages on localhost's docker container.
diff --git a/modules/graphite/.packaging/leap/15.2/pre-test.sh b/modules/graphite/.packaging/leap/15.2/pre-test.sh
deleted file mode 100755
index 9614066a..00000000
--- a/modules/graphite/.packaging/leap/15.2/pre-test.sh
+++ /dev/null
@@ -1 +0,0 @@
-luarocks --lua-version 5.1 install cqueues --from=https://mah0x211.github.io/rocks/
diff --git a/modules/graphite/.packaging/leap/15.2/rundeps b/modules/graphite/.packaging/leap/15.2/rundeps
deleted file mode 100644
index 83238871..00000000
--- a/modules/graphite/.packaging/leap/15.2/rundeps
+++ /dev/null
@@ -1,6 +0,0 @@
-libopenssl-devel
-lua51-luarocks
-git
-tar
-gzip
-m4
diff --git a/modules/graphite/.packaging/test.config b/modules/graphite/.packaging/test.config
deleted file mode 100644
index c23033b1..00000000
--- a/modules/graphite/.packaging/test.config
+++ /dev/null
@@ -1,4 +0,0 @@
--- SPDX-License-Identifier: GPL-3.0-or-later
-modules.load('graphite')
-assert(graphite)
-quit()
diff --git a/modules/graphite/.packaging/ubuntu/16.04/rundeps b/modules/graphite/.packaging/ubuntu/16.04/rundeps
deleted file mode 100644
index 3da806bd..00000000
--- a/modules/graphite/.packaging/ubuntu/16.04/rundeps
+++ /dev/null
@@ -1 +0,0 @@
-lua-cqueues
diff --git a/modules/graphite/.packaging/ubuntu/18.04/rundeps b/modules/graphite/.packaging/ubuntu/18.04/rundeps
deleted file mode 100644
index 3da806bd..00000000
--- a/modules/graphite/.packaging/ubuntu/18.04/rundeps
+++ /dev/null
@@ -1 +0,0 @@
-lua-cqueues
diff --git a/modules/graphite/.packaging/ubuntu/20.04/rundeps b/modules/graphite/.packaging/ubuntu/20.04/rundeps
deleted file mode 100644
index 3da806bd..00000000
--- a/modules/graphite/.packaging/ubuntu/20.04/rundeps
+++ /dev/null
@@ -1 +0,0 @@
-lua-cqueues
diff --git a/modules/hints/.packaging/test.config b/modules/hints/.packaging/test.config
deleted file mode 100644
index d89c7f0c..00000000
--- a/modules/hints/.packaging/test.config
+++ /dev/null
@@ -1,4 +0,0 @@
--- SPDX-License-Identifier: GPL-3.0-or-later
-modules.load('hints')
-assert(hints)
-quit()
diff --git a/modules/hints/README.rst b/modules/hints/README.rst
index 97d24ddc..7d775188 100644
--- a/modules/hints/README.rst
+++ b/modules/hints/README.rst
@@ -8,8 +8,7 @@ Static hints
This is a module providing static hints for forward records (A/AAAA) and reverse records (PTR).
The records can be loaded from ``/etc/hosts``-like files and/or added directly.
-You can also use the module to change the root hints; they are used as a safety belt or if the root NS
-drops out of cache.
+You can also use the module to change fallback addresses for the root servers.
.. tip::
@@ -110,6 +109,9 @@ Properties
Replace current root hints and return the current table of root hints.
+ These root hints are only used as fallback when addresses of ``NS .`` aren't available,
+ e.g. when cache is completely clear.
+
.. tip:: If no parameters are passed, it only returns current root hints set without changing anything.
Example:
@@ -127,8 +129,6 @@ Properties
[1] => 202.12.27.33
}
- .. tip:: A good rule of thumb is to select only a few fastest root hints. The server learns RTT and NS quality over time, and thus tries all servers available. You can help it by preselecting the candidates.
-
.. function:: hints.use_nodata(toggle)
:param bool toggle: true if enabling NODATA synthesis, false if disabling
@@ -136,6 +136,8 @@ Properties
If set to true (the default), NODATA will be synthesised for matching hint name, but mismatching type (e.g. AAAA query when only A hint exists).
+ The setting is (now) per-entry, so you want to set it before any address-name pairs.
+
.. function:: hints.ttl([new_ttl])
:param int new_ttl: new TTL to set (optional)
@@ -143,3 +145,5 @@ Properties
This function allows to read and write the TTL value used for records generated by the hints module.
+ The setting is (now) per-entry, so you want to set it before any address-name pairs.
+
diff --git a/modules/hints/hints.c b/modules/hints/hints.c
index 25d41c90..ccbc9880 100644
--- a/modules/hints/hints.c
+++ b/modules/hints/hints.c
@@ -20,6 +20,7 @@
#include "lib/zonecut.h"
#include "lib/module.h"
#include "lib/layer.h"
+#include "lib/rules/api.h"
#include <inttypes.h>
#include <math.h>
@@ -29,8 +30,6 @@
#define ERR_MSG(...) kr_log_error(HINT, "[ ]" __VA_ARGS__)
struct hints_data {
- struct kr_zonecut hints;
- struct kr_zonecut reverse_hints;
bool use_nodata; /**< See hint_use_nodata() description, exposed via lua. */
uint32_t ttl; /**< TTL used for the hints, exposed via lua. */
};
@@ -45,130 +44,6 @@ static char * bool2jsonstr(bool val)
return result;
}
-static int put_answer(knot_pkt_t *pkt, struct kr_query *qry, knot_rrset_t *rr, bool use_nodata)
-{
- int ret = 0;
- if (!knot_rrset_empty(rr) || use_nodata) {
- /* Update packet question */
- if (!knot_dname_is_equal(knot_pkt_qname(pkt), rr->owner)) {
- kr_pkt_recycle(pkt);
- knot_pkt_put_question(pkt, qry->sname, qry->sclass, qry->stype);
- }
- if (!knot_rrset_empty(rr)) {
- /* Append to packet */
- ret = knot_pkt_put_rotate(pkt, KNOT_COMPR_HINT_QNAME, rr,
- qry->reorder, KNOT_PF_FREE);
- } else {
- /* Return empty answer if name exists, but type doesn't match */
- knot_wire_set_aa(pkt->wire);
- }
- } else {
- ret = kr_error(ENOENT);
- }
- /* Clear RR if failed */
- if (ret != 0) {
- knot_rrset_clear(rr, &pkt->mm);
- }
- return ret;
-}
-
-static int satisfy_reverse(/*const*/ struct hints_data *data,
- knot_pkt_t *pkt, struct kr_query *qry)
-{
- /* Find a matching name */
- pack_t *addr_set = kr_zonecut_find(&data->reverse_hints, qry->sname);
- if (!addr_set || addr_set->len == 0) {
- return kr_error(ENOENT);
- }
- knot_dname_t *qname = knot_dname_copy(qry->sname, &pkt->mm);
- knot_rrset_t rr;
- knot_rrset_init(&rr, qname, KNOT_RRTYPE_PTR, KNOT_CLASS_IN, data->ttl);
-
- /* Append address records from hints */
- uint8_t *addr = pack_last(*addr_set);
- if (addr != NULL) {
- size_t len = pack_obj_len(addr);
- void *addr_val = pack_obj_val(addr);
- knot_rrset_add_rdata(&rr, addr_val, len, &pkt->mm);
- }
-
- return put_answer(pkt, qry, &rr, data->use_nodata);
-}
-
-static int satisfy_forward(/*const*/ struct hints_data *data,
- knot_pkt_t *pkt, struct kr_query *qry)
-{
- /* Find a matching name */
- pack_t *addr_set = kr_zonecut_find(&data->hints, qry->sname);
- if (!addr_set || addr_set->len == 0) {
- return kr_error(ENOENT);
- }
- knot_dname_t *qname = knot_dname_copy(qry->sname, &pkt->mm);
- knot_rrset_t rr;
- knot_rrset_init(&rr, qname, qry->stype, qry->sclass, data->ttl);
-
- size_t family_len;
- switch (rr.type) {
- case KNOT_RRTYPE_A:
- family_len = sizeof(struct in_addr);
- break;
- case KNOT_RRTYPE_AAAA:
- family_len = sizeof(struct in6_addr);
- break;
- default:
- goto finish;
- };
-
- /* Append address records from hints */
- uint8_t *addr = pack_head(*addr_set);
- while (addr != pack_tail(*addr_set)) {
- size_t len = pack_obj_len(addr);
- void *addr_val = pack_obj_val(addr);
- if (len == family_len) {
- knot_rrset_add_rdata(&rr, addr_val, len, &pkt->mm);
- }
- addr = pack_obj_next(addr);
- }
-finish:
- return put_answer(pkt, qry, &rr, data->use_nodata);
-}
-
-static int query(kr_layer_t *ctx, knot_pkt_t *pkt)
-{
- struct kr_query *qry = ctx->req->current_query;
- if (!qry || (ctx->state & KR_STATE_FAIL)) {
- return ctx->state;
- }
-
- struct kr_module *module = ctx->api->data;
- struct hints_data *data = module->data;
- if (!data) { /* No valid file. */
- return ctx->state;
- }
- /* We can optimize for early return like this: */
- if (!data->use_nodata && qry->stype != KNOT_RRTYPE_A
- && qry->stype != KNOT_RRTYPE_AAAA && qry->stype != KNOT_RRTYPE_PTR) {
- return ctx->state;
- }
- /* FIXME: putting directly into packet breaks ordering in case the hint
- * is applied after a CNAME jump. */
- if (knot_dname_in_bailiwick(qry->sname, (const uint8_t *)"\4arpa\0") >= 0) {
- if (satisfy_reverse(data, pkt, qry) != 0)
- return ctx->state;
- } else {
- if (satisfy_forward(data, pkt, qry) != 0)
- return ctx->state;
- }
-
- VERBOSE_MSG(qry, "<= answered from hints\n");
- qry->flags.DNSSEC_WANT = false; /* Never authenticated */
- qry->flags.CACHED = true;
- qry->flags.NO_MINIMIZE = true;
- pkt->parsed = pkt->size;
- knot_wire_set_qr(pkt->wire);
- return KR_STATE_DONE;
-}
-
static int parse_addr_str(union kr_sockaddr *sa, const char *addr)
{
int family = strchr(addr, ':') ? AF_INET6 : AF_INET;
@@ -228,7 +103,7 @@ static const knot_dname_t * addr2reverse(const char *addr)
kr_inaddr_family(&ia.ip));
}
-static int add_pair(struct kr_zonecut *hints, const char *name, const char *addr)
+static int add_pair_root(struct kr_zonecut *hints, const char *name, const char *addr)
{
/* Build key */
knot_dname_t key[KNOT_DNAME_MAXLEN];
@@ -241,73 +116,107 @@ static int add_pair(struct kr_zonecut *hints, const char *name, const char *addr
if (parse_addr_str(&ia, addr) != 0) {
return kr_error(EINVAL);
}
-
return kr_zonecut_add(hints, key, kr_inaddr(&ia.ip), kr_inaddr_len(&ia.ip));
}
-static int add_reverse_pair(struct kr_zonecut *hints, const char *name, const char *addr)
+static int add_pair(const struct hints_data *data, const char *name, const char *addr)
{
- const knot_dname_t *key = addr2reverse(addr);
+ /* Build key */
+ knot_dname_t key[KNOT_DNAME_MAXLEN];
+ if (!knot_dname_from_str(key, name, sizeof(key))) {
+ return kr_error(EINVAL);
+ }
+ knot_dname_to_lower(key);
- if (key == NULL) {
+ union kr_sockaddr ia;
+ if (parse_addr_str(&ia, addr) != 0) {
return kr_error(EINVAL);
}
+ uint16_t rrtype = ia.ip.sa_family == AF_INET6 ? KNOT_RRTYPE_AAAA : KNOT_RRTYPE_A;
+ knot_rrset_t rrs;
+ knot_rrset_init(&rrs, key, rrtype, KNOT_CLASS_IN, data->ttl);
+ int ret;
+ if (ia.ip.sa_family == AF_INET6) {
+ ret = knot_rrset_add_rdata(&rrs, (const uint8_t *)&ia.ip6.sin6_addr, 16, NULL);
+ } else {
+ ret = knot_rrset_add_rdata(&rrs, (const uint8_t *)&ia.ip4.sin_addr, 4, NULL);
+ }
+ if (!ret) ret = kr_rule_local_data_ins(&rrs, NULL, KR_RULE_TAGS_ALL);
+ if (!ret && data->use_nodata) {
+ rrs.type = KNOT_RRTYPE_CNAME;
+ rrs.rrs.count = 0;
+ rrs.rrs.size = 0;
+ ret = kr_rule_local_data_ins(&rrs, NULL, KR_RULE_TAGS_ALL);
+ }
+
+ knot_rdataset_clear(&rrs.rrs, NULL);
+ return ret;
+}
+
+static int add_reverse_pair(const struct hints_data *data, const char *name, const char *addr)
+{
+ const knot_dname_t *key = addr2reverse(addr);
+ if (!key)
+ return kr_error(EINVAL);
+ knot_rrset_t rrs;
+ knot_rrset_init(&rrs, /*const-cast*/(knot_dname_t *)key,
+ KNOT_RRTYPE_PTR, KNOT_CLASS_IN, data->ttl);
knot_dname_t ptr_name[KNOT_DNAME_MAXLEN];
- if (!knot_dname_from_str(ptr_name, name, sizeof(ptr_name))) {
+ if (!knot_dname_from_str(ptr_name, name, sizeof(ptr_name)))
return kr_error(EINVAL);
+ int ret = knot_rrset_add_rdata(&rrs, ptr_name, knot_dname_size(ptr_name), NULL);
+ if (!ret) {
+ ret = kr_rule_local_data_ins(&rrs, NULL, KR_RULE_TAGS_ALL);
+ knot_rdataset_clear(&rrs.rrs, NULL);
}
-
- return kr_zonecut_add(hints, key, ptr_name, knot_dname_size(ptr_name));
+ return ret;
}
-/** For a given name, remove either one address or all of them (if == NULL).
+/** For a given name, remove either one address ##or all of them (if == NULL).
*
* Also remove the corresponding reverse records.
*/
static int del_pair(struct hints_data *data, const char *name, const char *addr)
{
- /* Build key */
- knot_dname_t key[KNOT_DNAME_MAXLEN];
- if (!knot_dname_from_str(key, name, sizeof(key))) {
+ // Parse addr
+ if (!addr)
+ return kr_error(ENOSYS);
+ union kr_sockaddr ia;
+ if (parse_addr_str(&ia, addr) != 0)
return kr_error(EINVAL);
- }
- int key_len = knot_dname_size(key);
-
- if (addr) {
- /* Remove the pair. */
- union kr_sockaddr ia;
- if (parse_addr_str(&ia, addr) != 0) {
- return kr_error(EINVAL);
- }
-
- const knot_dname_t *reverse_key = addr2reverse(addr);
- kr_zonecut_del(&data->reverse_hints, reverse_key, key, key_len);
- return kr_zonecut_del(&data->hints, key,
- kr_inaddr(&ia.ip), kr_inaddr_len(&ia.ip));
- }
- /* We're removing everything for the name;
- * first find the name's pack */
- pack_t *addr_set = kr_zonecut_find(&data->hints, key);
- if (!addr_set || addr_set->len == 0) {
- return kr_error(ENOENT);
- }
-
- /* Remove address records in hints from reverse_hints. */
-
- for (uint8_t *a = pack_head(*addr_set); a != pack_tail(*addr_set);
- a = pack_obj_next(a)) {
- void *addr_val = pack_obj_val(a);
- int family = pack_obj_len(a) == kr_family_len(AF_INET)
- ? AF_INET : AF_INET6;
- const knot_dname_t *reverse_key = raw_addr2reverse(addr_val, family);
- if (reverse_key != NULL) {
- kr_zonecut_del(&data->reverse_hints, reverse_key, key, key_len);
- }
- }
- /* Remove the whole name. */
- return kr_zonecut_del_all(&data->hints, key);
+ // Remove the PTR
+ const knot_dname_t *reverse_key = addr2reverse(addr);
+ knot_rrset_t rrs;
+ knot_rrset_init(&rrs, /*const-cast*/(knot_dname_t *)reverse_key,
+ KNOT_RRTYPE_PTR, KNOT_CLASS_IN, data->ttl);
+ int ret = kr_rule_local_data_del(&rrs, KR_RULE_TAGS_ALL);
+ if (ret != 1)
+ VERBOSE_MSG(NULL, "del_pair PTR for %s; error: %s\n", addr, kr_strerror(ret));
+ if (ret != 1 && ret != kr_error(ENOENT)) // ignore ENOENT for PTR (duplicities)
+ return ret;
+
+ // Remove the forward entry
+ knot_dname_t key_buf[KNOT_DNAME_MAXLEN];
+ rrs.owner = knot_dname_from_str(key_buf, name, sizeof(key_buf));
+ if (!rrs.owner)
+ return kr_error(EINVAL);
+ rrs.type = ia.ip.sa_family == AF_INET6 ? KNOT_RRTYPE_AAAA : KNOT_RRTYPE_A;
+ ret = kr_rule_local_data_del(&rrs, KR_RULE_TAGS_ALL);
+ if (ret != 1)
+ VERBOSE_MSG(NULL, "del_pair for %s; error: %s\n", name, kr_strerror(ret));
+
+ // Remove the NODATA entry; again, not perfect matching,
+ // but we don't care much about this dynamic hints API.
+ if (ret == 1 && data->use_nodata) {
+ rrs.type = KNOT_RRTYPE_CNAME;
+ ret = kr_rule_local_data_del(&rrs, KR_RULE_TAGS_ALL);
+ if (ret != 1)
+ VERBOSE_MSG(NULL, "del_pair for NODATA %s; error: %s\n",
+ name, kr_strerror(ret));
+ }
+ return ret < 0 ? ret : kr_ok();
}
static int load_file(struct kr_module *module, const char *path)
@@ -343,31 +252,21 @@ static int load_file(struct kr_module *module, const char *path)
}
const char *canonical_name = strtok_r(NULL, " \t\n", &saveptr);
if (canonical_name == NULL) {
- ret = -1;
+ ret = kr_error(EINVAL);
goto error;
}
- /* Since the last added PTR records takes preference,
- * we add canonical name as the last one. */
const char *name_tok;
while ((name_tok = strtok_r(NULL, " \t\n", &saveptr)) != NULL) {
- ret = add_pair(&data->hints, name_tok, addr);
- if (!ret) {
- ret = add_reverse_pair(&data->reverse_hints, name_tok, addr);
- }
- if (ret) {
- ret = -1;
+ ret = add_pair(data, name_tok, addr);
+ if (ret)
goto error;
- }
count += 1;
}
- ret = add_pair(&data->hints, canonical_name, addr);
- if (!ret) {
- ret = add_reverse_pair(&data->reverse_hints, canonical_name, addr);
- }
- if (ret) {
- ret = -1;
+ ret = add_pair(data, canonical_name, addr);
+ if (!ret) // PTR only to the canonical name
+ ret = add_reverse_pair(data, canonical_name, addr);
+ if (ret)
goto error;
- }
count += 1;
}
error:
@@ -408,12 +307,9 @@ static char* hint_set(void *env, struct kr_module *module, const char *args)
if (addr) {
*addr = '\0';
++addr;
- ret = add_reverse_pair(&data->reverse_hints, args_copy, addr);
- if (ret) {
- del_pair(data, args_copy, addr);
- } else {
- ret = add_pair(&data->hints, args_copy, addr);
- }
+ ret = add_reverse_pair(data, args_copy, addr);
+ if (!ret)
+ ret = add_pair(data, args_copy, addr);
}
return bool2jsonstr(ret == 0);
@@ -435,6 +331,8 @@ static char* hint_del(void *env, struct kr_module *module, const char *args)
++addr;
}
ret = del_pair(data, args_copy, addr);
+ if (ret)
+ VERBOSE_MSG(NULL, "hints.del(%s) error: %s\n", args, kr_strerror(ret));
return bool2jsonstr(ret == 0);
}
@@ -457,7 +355,6 @@ static JsonNode *pack_addrs(pack_t *pack)
return root;
}
-static char* pack_hints(struct kr_zonecut *hints);
/**
* Retrieve address hints, either for given name or for all names.
*
@@ -466,30 +363,7 @@ static char* pack_hints(struct kr_zonecut *hints);
*/
static char* hint_get(void *env, struct kr_module *module, const char *args)
{
- struct kr_zonecut *hints = &((struct hints_data *) module->data)->hints;
- if (kr_fails_assert(hints))
- return NULL;
-
- if (!args) {
- return pack_hints(hints);
- }
-
- knot_dname_t key[KNOT_DNAME_MAXLEN];
- pack_t *pack = NULL;
- if (knot_dname_from_str(key, args, sizeof(key))) {
- pack = kr_zonecut_find(hints, key);
- }
- if (!pack || pack->len == 0) {
- return NULL;
- }
-
- char *result = NULL;
- JsonNode *root = pack_addrs(pack);
- if (root) {
- result = json_encode(root);
- json_delete(root);
- }
- return result;
+ return NULL;
}
/** @internal Pack all hints into serialized JSON. */
@@ -515,8 +389,12 @@ static void unpack_hint(struct kr_zonecut *root_hints, JsonNode *table, const ch
JsonNode *node = NULL;
json_foreach(node, table) {
switch(node->tag) {
- case JSON_STRING: add_pair(root_hints, name ? name : node->key, node->string_); break;
- case JSON_ARRAY: unpack_hint(root_hints, node, name ? name : node->key); break;
+ case JSON_STRING:
+ add_pair_root(root_hints, name ? name : node->key, node->string_);
+ break;
+ case JSON_ARRAY:
+ unpack_hint(root_hints, node, name ? name : node->key);
+ break;
default: continue;
}
}
@@ -597,14 +475,22 @@ static char* hint_ttl(void *env, struct kr_module *module, const char *args)
KR_EXPORT
int hints_init(struct kr_module *module)
{
- static kr_layer_api_t layer = {
- .produce = &query,
- };
+ static kr_layer_api_t layer = { 0 };
/* Store module reference */
layer.data = module;
module->layer = &layer;
static const struct kr_prop props[] = {
+ /* FIXME(decide): .set() and .del() used to work on individual RRs;
+ * now they overwrite or delete whole RRsets.
+ * Also, .get() doesn't work at all.
+ *
+ * It really depends what kind of config/API we'll be exposing to user.
+ * - Manipulating whole RRsets generally makes more sense to me.
+ * (But hints.set() currently can't even insert larger sets.)
+ * - We'll probably be deprecating access through these non-declarative
+ * commands (set, get, del) which are also usable dynamically.
+ */
{ &hint_set, "set", "Set {name, address} hint.", },
{ &hint_del, "del", "Delete one {name, address} hint or all addresses for the name.", },
{ &hint_get, "get", "Retrieve hint for given name.", },
@@ -617,17 +503,9 @@ int hints_init(struct kr_module *module)
};
module->props = props;
- knot_mm_t *pool = mm_ctx_mempool2(MM_DEFAULT_BLKSIZE);
- if (!pool) {
- return kr_error(ENOMEM);
- }
- struct hints_data *data = mm_alloc(pool, sizeof(struct hints_data));
- if (!data) {
- mp_delete(pool->ctx);
+ struct hints_data *data = malloc(sizeof(*data));
+ if (!data)
return kr_error(ENOMEM);
- }
- kr_zonecut_init(&data->hints, (const uint8_t *)(""), pool);
- kr_zonecut_init(&data->reverse_hints, (const uint8_t *)(""), pool);
data->use_nodata = true;
data->ttl = HINTS_TTL_DEFAULT;
module->data = data;
@@ -639,13 +517,8 @@ int hints_init(struct kr_module *module)
KR_EXPORT
int hints_deinit(struct kr_module *module)
{
- struct hints_data *data = module->data;
- if (data) {
- kr_zonecut_deinit(&data->hints);
- kr_zonecut_deinit(&data->reverse_hints);
- mp_delete(data->hints.pool->ctx);
- module->data = NULL;
- }
+ free(module->data);
+ module->data = NULL;
return kr_ok();
}
diff --git a/modules/http/.packaging/centos/7/rundeps b/modules/http/.packaging/centos/7/rundeps
deleted file mode 100644
index c557cb28..00000000
--- a/modules/http/.packaging/centos/7/rundeps
+++ /dev/null
@@ -1 +0,0 @@
-lua-http
diff --git a/modules/http/.packaging/centos/8/rundeps b/modules/http/.packaging/centos/8/rundeps
deleted file mode 100644
index ed5aee15..00000000
--- a/modules/http/.packaging/centos/8/rundeps
+++ /dev/null
@@ -1 +0,0 @@
-lua5.1-http
diff --git a/modules/http/.packaging/debian/10/rundeps b/modules/http/.packaging/debian/10/rundeps
deleted file mode 100644
index c557cb28..00000000
--- a/modules/http/.packaging/debian/10/rundeps
+++ /dev/null
@@ -1 +0,0 @@
-lua-http
diff --git a/modules/http/.packaging/debian/9/rundeps b/modules/http/.packaging/debian/9/rundeps
deleted file mode 100644
index c557cb28..00000000
--- a/modules/http/.packaging/debian/9/rundeps
+++ /dev/null
@@ -1 +0,0 @@
-lua-http
diff --git a/modules/http/.packaging/fedora/31/rundeps b/modules/http/.packaging/fedora/31/rundeps
deleted file mode 100644
index ed5aee15..00000000
--- a/modules/http/.packaging/fedora/31/rundeps
+++ /dev/null
@@ -1 +0,0 @@
-lua5.1-http
diff --git a/modules/http/.packaging/fedora/32/rundeps b/modules/http/.packaging/fedora/32/rundeps
deleted file mode 100644
index ed5aee15..00000000
--- a/modules/http/.packaging/fedora/32/rundeps
+++ /dev/null
@@ -1 +0,0 @@
-lua5.1-http
diff --git a/modules/http/.packaging/leap/15.2/NOTSUPPORTED b/modules/http/.packaging/leap/15.2/NOTSUPPORTED
deleted file mode 100644
index bb50260c..00000000
--- a/modules/http/.packaging/leap/15.2/NOTSUPPORTED
+++ /dev/null
@@ -1,5 +0,0 @@
-
-https://github.com/wahern/luaossl/issues/175
-
-
-Doesn't work with libopenssl-devel 1.1.0i-lp151.1.1
diff --git a/modules/http/.packaging/leap/15.2/pre-test.sh b/modules/http/.packaging/leap/15.2/pre-test.sh
deleted file mode 100755
index bb1e1311..00000000
--- a/modules/http/.packaging/leap/15.2/pre-test.sh
+++ /dev/null
@@ -1 +0,0 @@
-luarocks --lua-version 5.1 install http --from=https://mah0x211.github.io/rocks/
diff --git a/modules/http/.packaging/leap/15.2/rundeps b/modules/http/.packaging/leap/15.2/rundeps
deleted file mode 100644
index ab051889..00000000
--- a/modules/http/.packaging/leap/15.2/rundeps
+++ /dev/null
@@ -1,7 +0,0 @@
-libopenssl-devel
-lua51-devel
-lua51-luarocks
-git
-tar
-gzip
-m4
diff --git a/modules/http/.packaging/test.config b/modules/http/.packaging/test.config
deleted file mode 100644
index cb5e5dd5..00000000
--- a/modules/http/.packaging/test.config
+++ /dev/null
@@ -1,4 +0,0 @@
--- SPDX-License-Identifier: GPL-3.0-or-later
-modules.load('http')
-assert(http)
-quit()
diff --git a/modules/http/.packaging/ubuntu/16.04/NOTSUPPORTED b/modules/http/.packaging/ubuntu/16.04/NOTSUPPORTED
deleted file mode 100644
index e69de29b..00000000
--- a/modules/http/.packaging/ubuntu/16.04/NOTSUPPORTED
+++ /dev/null
diff --git a/modules/http/.packaging/ubuntu/18.04/rundeps b/modules/http/.packaging/ubuntu/18.04/rundeps
deleted file mode 100644
index c557cb28..00000000
--- a/modules/http/.packaging/ubuntu/18.04/rundeps
+++ /dev/null
@@ -1 +0,0 @@
-lua-http
diff --git a/modules/http/.packaging/ubuntu/20.04/rundeps b/modules/http/.packaging/ubuntu/20.04/rundeps
deleted file mode 100644
index c557cb28..00000000
--- a/modules/http/.packaging/ubuntu/20.04/rundeps
+++ /dev/null
@@ -1 +0,0 @@
-lua-http
diff --git a/modules/nsid/.packaging/test.config b/modules/nsid/.packaging/test.config
deleted file mode 100644
index de54cceb..00000000
--- a/modules/nsid/.packaging/test.config
+++ /dev/null
@@ -1,4 +0,0 @@
--- SPDX-License-Identifier: GPL-3.0-or-later
-modules.load('nsid')
-assert(nsid)
-quit()
diff --git a/modules/policy/.packaging/test.config b/modules/policy/.packaging/test.config
deleted file mode 100644
index 60c9ddc0..00000000
--- a/modules/policy/.packaging/test.config
+++ /dev/null
@@ -1,4 +0,0 @@
--- SPDX-License-Identifier: GPL-3.0-or-later
-modules.load('policy')
-assert(policy)
-quit()
diff --git a/modules/policy/policy.lua b/modules/policy/policy.lua
index 47e436f0..443fc0b0 100644
--- a/modules/policy/policy.lua
+++ b/modules/policy/policy.lua
@@ -5,8 +5,6 @@ local ffi = require('ffi')
local LOG_GRP_POLICY_TAG = ffi.string(ffi.C.kr_log_grp2name(ffi.C.LOG_GRP_POLICY))
local LOG_GRP_REQDBG_TAG = ffi.string(ffi.C.kr_log_grp2name(ffi.C.LOG_GRP_REQDBG))
-local todname = kres.str2dname -- not available during module load otherwise
-
-- Counter of unique rules
local nextid = 0
local function getruleid()
@@ -71,7 +69,8 @@ end
-- policy functions are defined below
local policy = {}
-function policy.PASS(state, _)
+function policy.PASS(state, req)
+ policy.FLAGS('PASSTHRU_LEGACY')(state, req)
return state
end
@@ -269,91 +268,6 @@ function policy.ANSWER(rtable, nodata)
end
end
-local dname_localhost = todname('localhost.')
-
--- Rule for localhost. zone; see RFC6303, sec. 3
-local function localhost(_, req)
- local qry = req:current()
- local answer = req:ensure_answer()
- if answer == nil then return nil end
- ffi.C.kr_pkt_make_auth_header(answer)
-
- local is_exact = ffi.C.knot_dname_is_equal(qry.sname, dname_localhost)
-
- answer:rcode(kres.rcode.NOERROR)
- answer:begin(kres.section.ANSWER)
- if qry.stype == kres.type.AAAA then
- answer:put(qry.sname, 900, answer:qclass(), kres.type.AAAA,
- '\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\1')
- elseif qry.stype == kres.type.A then
- answer:put(qry.sname, 900, answer:qclass(), kres.type.A, '\127\0\0\1')
- elseif is_exact and qry.stype == kres.type.SOA then
- mkauth_soa(answer, dname_localhost)
- elseif is_exact and qry.stype == kres.type.NS then
- answer:put(dname_localhost, 900, answer:qclass(), kres.type.NS, dname_localhost)
- else
- answer:begin(kres.section.AUTHORITY)
- mkauth_soa(answer, dname_localhost)
- end
- return kres.DONE
-end
-
-local dname_rev4_localhost = todname('1.0.0.127.in-addr.arpa');
-local dname_rev4_localhost_apex = todname('127.in-addr.arpa');
-
--- Rule for reverse localhost.
--- Answer with locally served minimal 127.in-addr.arpa domain, only having
--- a PTR record in 1.0.0.127.in-addr.arpa, and with 1.0...0.ip6.arpa. zone.
--- TODO: much of this would better be left to the hints module (or coordinated).
-local function localhost_reversed(_, req)
- local qry = req:current()
- local answer = req:ensure_answer()
- if answer == nil then return nil end
-
- -- classify qry.sname:
- local is_exact -- exact dname for localhost
- local is_apex -- apex of a locally-served localhost zone
- local is_nonterm -- empty non-terminal name
- if ffi.C.knot_dname_in_bailiwick(qry.sname, todname('ip6.arpa.')) > 0 then
- -- exact ::1 query (relying on the calling rule)
- is_exact = true
- is_apex = true
- else
- -- within 127.in-addr.arpa.
- local labels = ffi.C.knot_dname_labels(qry.sname, nil)
- if labels == 3 then
- is_exact = false
- is_apex = true
- elseif labels == 4+2 and ffi.C.knot_dname_is_equal(
- qry.sname, dname_rev4_localhost) then
- is_exact = true
- else
- is_exact = false
- is_apex = false
- is_nonterm = ffi.C.knot_dname_in_bailiwick(dname_rev4_localhost, qry.sname) > 0
- end
- end
-
- ffi.C.kr_pkt_make_auth_header(answer)
- answer:rcode(kres.rcode.NOERROR)
- answer:begin(kres.section.ANSWER)
- if is_exact and qry.stype == kres.type.PTR then
- answer:put(qry.sname, 900, answer:qclass(), kres.type.PTR, dname_localhost)
- elseif is_apex and qry.stype == kres.type.SOA then
- mkauth_soa(answer, dname_rev4_localhost_apex, dname_localhost)
- elseif is_apex and qry.stype == kres.type.NS then
- answer:put(dname_rev4_localhost_apex, 900, answer:qclass(), kres.type.NS,
- dname_localhost)
- else
- if not is_nonterm then
- answer:rcode(kres.rcode.NXDOMAIN)
- end
- answer:begin(kres.section.AUTHORITY)
- mkauth_soa(answer, dname_rev4_localhost_apex, dname_localhost)
- end
- return kres.DONE
-end
-
-- All requests
function policy.all(action)
return function(_, _) return action end
@@ -916,172 +830,88 @@ function policy.todnames(names)
return names
end
--- RFC1918 Private, local, broadcast, test and special zones
--- Considerations: RFC6761, sec 6.1.
--- https://www.iana.org/assignments/locally-served-dns-zones
-local private_zones = {
- -- RFC6303
- '10.in-addr.arpa.',
- '16.172.in-addr.arpa.',
- '17.172.in-addr.arpa.',
- '18.172.in-addr.arpa.',
- '19.172.in-addr.arpa.',
- '20.172.in-addr.arpa.',
- '21.172.in-addr.arpa.',
- '22.172.in-addr.arpa.',
- '23.172.in-addr.arpa.',
- '24.172.in-addr.arpa.',
- '25.172.in-addr.arpa.',
- '26.172.in-addr.arpa.',
- '27.172.in-addr.arpa.',
- '28.172.in-addr.arpa.',
- '29.172.in-addr.arpa.',
- '30.172.in-addr.arpa.',
- '31.172.in-addr.arpa.',
- '168.192.in-addr.arpa.',
- '0.in-addr.arpa.',
- '254.169.in-addr.arpa.',
- '2.0.192.in-addr.arpa.',
- '100.51.198.in-addr.arpa.',
- '113.0.203.in-addr.arpa.',
- '255.255.255.255.in-addr.arpa.',
- -- RFC7793
- '64.100.in-addr.arpa.',
- '65.100.in-addr.arpa.',
- '66.100.in-addr.arpa.',
- '67.100.in-addr.arpa.',
- '68.100.in-addr.arpa.',
- '69.100.in-addr.arpa.',
- '70.100.in-addr.arpa.',
- '71.100.in-addr.arpa.',
- '72.100.in-addr.arpa.',
- '73.100.in-addr.arpa.',
- '74.100.in-addr.arpa.',
- '75.100.in-addr.arpa.',
- '76.100.in-addr.arpa.',
- '77.100.in-addr.arpa.',
- '78.100.in-addr.arpa.',
- '79.100.in-addr.arpa.',
- '80.100.in-addr.arpa.',
- '81.100.in-addr.arpa.',
- '82.100.in-addr.arpa.',
- '83.100.in-addr.arpa.',
- '84.100.in-addr.arpa.',
- '85.100.in-addr.arpa.',
- '86.100.in-addr.arpa.',
- '87.100.in-addr.arpa.',
- '88.100.in-addr.arpa.',
- '89.100.in-addr.arpa.',
- '90.100.in-addr.arpa.',
- '91.100.in-addr.arpa.',
- '92.100.in-addr.arpa.',
- '93.100.in-addr.arpa.',
- '94.100.in-addr.arpa.',
- '95.100.in-addr.arpa.',
- '96.100.in-addr.arpa.',
- '97.100.in-addr.arpa.',
- '98.100.in-addr.arpa.',
- '99.100.in-addr.arpa.',
- '100.100.in-addr.arpa.',
- '101.100.in-addr.arpa.',
- '102.100.in-addr.arpa.',
- '103.100.in-addr.arpa.',
- '104.100.in-addr.arpa.',
- '105.100.in-addr.arpa.',
- '106.100.in-addr.arpa.',
- '107.100.in-addr.arpa.',
- '108.100.in-addr.arpa.',
- '109.100.in-addr.arpa.',
- '110.100.in-addr.arpa.',
- '111.100.in-addr.arpa.',
- '112.100.in-addr.arpa.',
- '113.100.in-addr.arpa.',
- '114.100.in-addr.arpa.',
- '115.100.in-addr.arpa.',
- '116.100.in-addr.arpa.',
- '117.100.in-addr.arpa.',
- '118.100.in-addr.arpa.',
- '119.100.in-addr.arpa.',
- '120.100.in-addr.arpa.',
- '121.100.in-addr.arpa.',
- '122.100.in-addr.arpa.',
- '123.100.in-addr.arpa.',
- '124.100.in-addr.arpa.',
- '125.100.in-addr.arpa.',
- '126.100.in-addr.arpa.',
- '127.100.in-addr.arpa.',
-
- -- RFC6303
- -- localhost_reversed handles ::1
- '0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ip6.arpa.',
- 'd.f.ip6.arpa.',
- '8.e.f.ip6.arpa.',
- '9.e.f.ip6.arpa.',
- 'a.e.f.ip6.arpa.',
- 'b.e.f.ip6.arpa.',
- '8.b.d.0.1.0.0.2.ip6.arpa.',
- -- RFC8375
- 'home.arpa.',
-}
-policy.todnames(private_zones)
-
-- @var Default rules
policy.rules = {}
policy.postrules = {}
-policy.special_names = {
- -- XXX: beware of special_names_optim() when modifying these filters
- {
- cb=policy.suffix_common(policy.DENY_MSG(
- 'Blocking is mandated by standards, see references on '
- .. 'https://www.iana.org/assignments/'
- .. 'locally-served-dns-zones/locally-served-dns-zones.xhtml',
- kres.extended_error.NOTSUP),
- private_zones, todname('arpa.')),
- count=0
- },
- {
- cb=policy.suffix(policy.DENY_MSG(
- 'Blocking is mandated by standards, see references on '
- .. 'https://www.iana.org/assignments/'
- .. 'special-use-domain-names/special-use-domain-names.xhtml',
- kres.extended_error.NOTSUP),
+
+-- This certainly isn't perfect, but it allows lua config like:
+-- kr_view_insert_action('127.0.0.0/24', policy.TAGS_ASSIGN({'t01', 't02'}))
+local kr_rule_tags_t = ffi.typeof('kr_rule_tags_t[1]')
+function policy.get_tagset(names)
+ local result = ffi.new(kr_rule_tags_t, 0)
+ for _, name in pairs(names) do
+ if ffi.C.kr_rule_tag_add(name, result) ~= 0 then
+ error('converting tagset failed')
+ end
+ end
+ return result[0] -- it's atomic value fortunately
+end
+function policy.tags_assign_bitmap(bitmap)
+ return function (_, req)
+ req.rule_tags = bitmap
+ end
+end
+function policy.TAGS_ASSIGN(names)
+ local bitmap = policy.get_tagset(names)
+ return 'policy.tags_assign_bitmap(' .. tostring(bitmap) .. ')'
+end
+
+--[[ Insert a forwarding rule, i.e. override upstream for one DNS subtree.
+
+Throws lua exceptions when detecting something fishy.
+
+\param subtree plain string
+\param options
+ .auth targets are authoritative (false by default = resolver)
+ .dnssec if overridden to false, don't validate DNSSEC locally
+ - for resolvers we still do *not* send CD=1 upstream,
+ i.e. we trust their DNSSEC validation.
+ - for auths this inserts a negative trust anchor
+ Beware that setting .set_insecure() *later* would override that.
+\param targets same format as policy.TLS_FORWARD() except that `tls = true`
+ can be specified for each address (defaults to false)
+--]]
+function policy.rule_forward_add(subtree, options, targets)
+ local targets_2 = {}
+ for _, target in ipairs(targets) do
+ local port_default = 53
+ if target.tls or false then
+ port_default = 853
+ -- lots of code; easiest to just call it this way; checks and throws
+ -- The extra .tls field gets ignored.
+ policy.TLS_FORWARD({target})
+ end
+
+ -- this also throws on failure
+ local sock = addr2sock(target[1], port_default)
+ if options.auth then
+ local port = ffi.C.kr_inaddr_port(sock)
+ assert(not options.tls and port == port_default)
+ end
+ table.insert(targets_2, sock)
+ end
+ local targets_3 = ffi.new('const struct sockaddr * [?]', #targets_2 + 1, targets_2)
+ targets_3[#targets_2] = nil
+
+ local subtree_dname = todname(subtree)
+ assert(ffi.C.kr_rule_forward(subtree_dname,
{
- todname('test.'),
- todname('onion.'),
- todname('invalid.'),
- todname('local.'), -- RFC 8375.4
- }),
- count=0
- },
- {
- cb=policy.suffix(localhost, {dname_localhost}),
- count=0
- },
- {
- cb=policy.suffix_common(localhost_reversed, {
- todname('127.in-addr.arpa.'),
- todname('1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ip6.arpa.')},
- todname('arpa.')),
- count=0
- },
-}
+ is_nods = options.dnssec == false,
+ is_auth = options.auth,
+ },
+ targets_3
+ ) == 0)
--- Return boolean; false = no special name may apply, true = some might apply.
--- The point is to *efficiently* filter almost all QNAMEs that do not apply.
-local function special_names_optim(req, sname)
- local qname_size = req.qsource.packet.qname_size
- if qname_size < 9 then return true end -- don't want to special-case bad array access
- local root = sname + qname_size - 1
- return
- -- .a???. or .t???.
- (root[-5] == 4 and (root[-4] == 97 or root[-4] == 116))
- -- .on???. or .in?????. or lo???. or *ost.
- or (root[-6] == 5 and root[-5] == 111 and root[-4] == 110)
- or (root[-8] == 7 and root[-7] == 105 and root[-6] == 110)
- or (root[-6] == 5 and root[-5] == 108 and root[-4] == 111)
- or (root[-3] == 111 and root[-2] == 115 and root[-1] == 116)
+ -- Probably the best way to turn off DNSSEC validation for auth is negative TA.
+ if options.auth and options.dnssec == false then
+ local ntas = kres.context().negative_anchors
+ assert(ffi.C.kr_ta_add(ntas, subtree_dname, kres.type.DS, 0, nil, 0) == 0)
+ end
end
+
+local view_action_buf = ffi.new('knot_db_val_t[1]')
+
-- Top-down policy list walk until we hit a match
-- the caller is responsible for reordering policy list
-- from most specific to least specific.
@@ -1091,10 +921,14 @@ policy.layer = {
begin = function(state, req)
-- Don't act on "finished" cases.
if bit.band(state, bit.bor(kres.FAIL, kres.DONE)) ~= 0 then return state end
+
+ if ffi.C.kr_view_select_action(req, view_action_buf) == 0 then
+ local act_str = ffi.string(view_action_buf[0].data, view_action_buf[0].len)
+ return loadstring('return '..act_str)()(state, req)
+ end
+
local qry = req:initial() -- same as :current() but more descriptive
return policy.evaluate(policy.rules, req, qry, state)
- or (special_names_optim(req, qry.sname)
- and policy.evaluate(policy.special_names, req, qry, state))
or state
end,
finish = function(state, req)
diff --git a/modules/predict/.packaging/test.config b/modules/predict/.packaging/test.config
deleted file mode 100644
index b8e706e3..00000000
--- a/modules/predict/.packaging/test.config
+++ /dev/null
@@ -1,4 +0,0 @@
--- SPDX-License-Identifier: GPL-3.0-or-later
-modules.load('predict')
-assert(predict)
-quit()
diff --git a/modules/prefill/.packaging/test.config b/modules/prefill/.packaging/test.config
deleted file mode 100644
index d0258b02..00000000
--- a/modules/prefill/.packaging/test.config
+++ /dev/null
@@ -1,4 +0,0 @@
--- SPDX-License-Identifier: GPL-3.0-or-later
-modules.load('prefill')
-assert(prefill)
-quit()
diff --git a/modules/priming/.packaging/test.config b/modules/priming/.packaging/test.config
deleted file mode 100644
index 63239f07..00000000
--- a/modules/priming/.packaging/test.config
+++ /dev/null
@@ -1,4 +0,0 @@
--- SPDX-License-Identifier: GPL-3.0-or-later
-modules.load('priming')
-assert(priming)
-quit()
diff --git a/modules/rebinding/.packaging/test.config b/modules/rebinding/.packaging/test.config
deleted file mode 100644
index 0a84b88b..00000000
--- a/modules/rebinding/.packaging/test.config
+++ /dev/null
@@ -1,4 +0,0 @@
--- SPDX-License-Identifier: GPL-3.0-or-later
-modules.load('rebinding')
-assert(rebinding)
-quit()
diff --git a/modules/refuse_nord/.packaging/test.config b/modules/refuse_nord/.packaging/test.config
deleted file mode 100644
index 8679e269..00000000
--- a/modules/refuse_nord/.packaging/test.config
+++ /dev/null
@@ -1,3 +0,0 @@
--- SPDX-License-Identifier: GPL-3.0-or-later
-assert(modules.load('refuse_nord') == true)
-quit()
diff --git a/modules/renumber/.packaging/test.config b/modules/renumber/.packaging/test.config
deleted file mode 100644
index 37f136ab..00000000
--- a/modules/renumber/.packaging/test.config
+++ /dev/null
@@ -1,4 +0,0 @@
--- SPDX-License-Identifier: GPL-3.0-or-later
-modules.load('renumber')
-assert(renumber)
-quit()
diff --git a/modules/serve_stale/.packaging/test.config b/modules/serve_stale/.packaging/test.config
deleted file mode 100644
index 362c4ec8..00000000
--- a/modules/serve_stale/.packaging/test.config
+++ /dev/null
@@ -1,4 +0,0 @@
--- SPDX-License-Identifier: GPL-3.0-or-later
-modules.load('serve_stale')
-assert(serve_stale)
-quit()
diff --git a/modules/stats/.packaging/test.config b/modules/stats/.packaging/test.config
deleted file mode 100644
index fd25460d..00000000
--- a/modules/stats/.packaging/test.config
+++ /dev/null
@@ -1,4 +0,0 @@
--- SPDX-License-Identifier: GPL-3.0-or-later
-modules.load('stats')
-assert(stats)
-quit()
diff --git a/modules/stats/README.rst b/modules/stats/README.rst
index 7d423aa8..014c9f06 100644
--- a/modules/stats/README.rst
+++ b/modules/stats/README.rst
@@ -89,6 +89,8 @@ Built-in counters keep track of number of queries and answers matching specific
+-----------------+----------------------------------+
| answer.slow | completed in more than 1500ms |
+-----------------+----------------------------------+
+| answer.sum_ms | sum of all latencies in ms |
++-----------------+----------------------------------+
+-----------------+----------------------------------+
| **Answer flags** |
diff --git a/modules/stats/stats.c b/modules/stats/stats.c
index ebb28778..ca3a932c 100644
--- a/modules/stats/stats.c
+++ b/modules/stats/stats.c
@@ -42,6 +42,7 @@
X(answer,total) X(answer,noerror) X(answer,nodata) X(answer,nxdomain) X(answer,servfail) \
X(answer,cached) X(answer,1ms) X(answer,10ms) X(answer,50ms) X(answer,100ms) \
X(answer,250ms) X(answer,500ms) X(answer,1000ms) X(answer,1500ms) X(answer,slow) \
+ X(answer,sum_ms) \
X(answer,aa) X(answer,tc) X(answer,rd) X(answer,ra) X(answer, ad) X(answer,cd) \
X(answer,edns0) X(answer,do) \
X(query,edns) X(query,dnssec) \
@@ -220,6 +221,7 @@ static int collect(kr_layer_t *ctx)
/* Histogram of answer latency. */
struct kr_query *first = rplan->resolved.at[0];
uint64_t elapsed = kr_now() - first->timestamp_mono;
+ stat_const_add(data, metric_answer_sum_ms, elapsed);
if (elapsed <= 1) {
stat_const_add(data, metric_answer_1ms, 1);
} else if (elapsed <= 10) {
diff --git a/modules/stats/test.integr/kresd_config.j2 b/modules/stats/test.integr/kresd_config.j2
index 4db7caab..872ce2e3 100644
--- a/modules/stats/test.integr/kresd_config.j2
+++ b/modules/stats/test.integr/kresd_config.j2
@@ -52,6 +52,7 @@ function reply_result(state, req)
local result = check_stats(got)
return result(state, req)
end
+policy.add(policy.all(policy.FLAGS('PASSTHRU_LEGACY'))) -- the test isn't written with this in mind
policy.add(policy.pattern(reply_result, 'stats.test.'))
policy.add(policy.all(FWD_TARGET)) -- avoid iteration
diff --git a/modules/ta_sentinel/.packaging/test.config b/modules/ta_sentinel/.packaging/test.config
deleted file mode 100644
index 4bb6ac9a..00000000
--- a/modules/ta_sentinel/.packaging/test.config
+++ /dev/null
@@ -1,4 +0,0 @@
--- SPDX-License-Identifier: GPL-3.0-or-later
-modules.load('ta_sentinel')
-assert(ta_sentinel)
-quit()
diff --git a/modules/ta_signal_query/.packaging/test.config b/modules/ta_signal_query/.packaging/test.config
deleted file mode 100644
index dfa7c2a5..00000000
--- a/modules/ta_signal_query/.packaging/test.config
+++ /dev/null
@@ -1,4 +0,0 @@
--- SPDX-License-Identifier: GPL-3.0-or-later
-modules.load('ta_signal_query')
-assert(ta_signal_query)
-quit()
diff --git a/modules/ta_update/.packaging/test.config b/modules/ta_update/.packaging/test.config
deleted file mode 100644
index 5fe55875..00000000
--- a/modules/ta_update/.packaging/test.config
+++ /dev/null
@@ -1,4 +0,0 @@
--- SPDX-License-Identifier: GPL-3.0-or-later
-modules.load('ta_update')
-assert(ta_update)
-quit()
diff --git a/modules/view/.packaging/test.config b/modules/view/.packaging/test.config
deleted file mode 100644
index b639fdaf..00000000
--- a/modules/view/.packaging/test.config
+++ /dev/null
@@ -1,4 +0,0 @@
--- SPDX-License-Identifier: GPL-3.0-or-later
-modules.load('view')
-assert(view)
-quit()
diff --git a/modules/watchdog/.packaging/test.config b/modules/watchdog/.packaging/test.config
deleted file mode 100644
index 9d1a291c..00000000
--- a/modules/watchdog/.packaging/test.config
+++ /dev/null
@@ -1,4 +0,0 @@
--- SPDX-License-Identifier: GPL-3.0-or-later
-modules.load('watchdog')
-assert(watchdog)
-quit()
diff --git a/modules/workarounds/.packaging/test.config b/modules/workarounds/.packaging/test.config
deleted file mode 100644
index c420810b..00000000
--- a/modules/workarounds/.packaging/test.config
+++ /dev/null
@@ -1,4 +0,0 @@
--- SPDX-License-Identifier: GPL-3.0-or-later
-modules.load('workarounds')
-assert(workarounds)
-quit()
diff --git a/scripts/enable-repo.py b/scripts/enable-repo.py
new file mode 100755
index 00000000..2b9319eb
--- /dev/null
+++ b/scripts/enable-repo.py
@@ -0,0 +1,132 @@
+#!/usr/bin/python3
+"""
+Enable Knot Resolver upstream repo on current system.
+
+Requires python3-distro.
+
+Run this as ROOT.
+"""
+
+import argparse
+import distro as distro_
+from pathlib import Path
+from subprocess import run, PIPE
+import sys
+
+
+REPO_CHOICES = ['latest', 'testing', 'build']
+
+
+def detect_distro():
+ return '%s-%s' % (distro_.id(), distro_.version())
+
+
+def parse_distro(distro):
+ id_, _, ver_ = distro.rpartition('-')
+ return id_, ver_
+
+
+def distro2obs(distro):
+ distro_id, distro_ver = parse_distro(distro)
+ if not str(distro_ver):
+ return None
+ if distro_id == 'debian':
+ return 'Debian_%s' % distro_ver
+ if distro_id == 'ubuntu':
+ return 'xUbuntu_%s' % distro_ver
+ if distro_id == 'opensuse-leap':
+ return 'openSUSE_Leap_%s' % distro_ver
+ return None
+
+
+def show_info():
+ print("distro ID: %s" % detect_distro())
+ print("distro name: %s %s" % (distro_.name(), distro_.version(pretty=True)))
+
+
+def enable_deb_repo(repo_id, distro):
+ obs_distro = distro2obs(distro)
+ if not obs_distro:
+ return fail('unsupported Debian-based distro: %s' % distro)
+
+ requires = ['python3-requests', 'gnupg']
+ print("installing required packages: %s" % ' '.join(requires))
+ p = run(['apt', 'install', '-y'] + requires)
+ import requests
+
+ sources_p = Path('/etc/apt/sources.list.d/%s.list' % repo_id)
+ sources_txt = 'deb http://download.opensuse.org/repositories/home:/CZ-NIC:/%s/%s/ /' % (repo_id, obs_distro)
+ key_url = 'https://download.opensuse.org/repositories/home:CZ-NIC:%s/%s/Release.key' % (repo_id, obs_distro)
+ print("writing sources list: %s" % sources_p)
+ with sources_p.open('wt') as f:
+ f.write(sources_txt + '\n')
+ print(sources_txt)
+ print("fetching key: %s" % key_url)
+ r = requests.get(key_url)
+ if not r.ok:
+ return fail('failed to fetch repo key: %s' % key_url)
+ key_txt = r.content.decode('utf-8')
+ print("adding key using `apt-key add`")
+ p = run(['apt-key', 'add', '-'], input=key_txt, encoding='utf-8')
+ if p.returncode != 0:
+ print('apt-key add failed :(')
+ run(['apt', 'update'])
+ print("%s repo added" % repo_id)
+
+
+def enable_suse_repo(repo_id, distro):
+ obs_distro = distro2obs(distro)
+ if not obs_distro:
+ return fail('unsupported SUSE distro: %s' % distro)
+
+ repo_url = 'https://download.opensuse.org/repositories/home:CZ-NIC:{repo}/{distro}/home:CZ-NIC:{repo}.repo'.format(
+ repo=repo_id, distro=obs_distro)
+ print("adding OBS repo: %s" % repo_url)
+ run(['zypper', 'addrepo', repo_url])
+ run(['zypper', '--no-gpg-checks', 'refresh'])
+
+
+def enable_repo(repo_id, distro):
+ distro_id, distro_ver = parse_distro(distro)
+ print("enable %s repo on %s" % (repo_id, distro))
+
+ if distro_id in ['debian', 'ubuntu']:
+ enable_deb_repo(repo_id, distro)
+ elif distro_id == 'opensuse-leap':
+ enable_suse_repo(repo_id, distro)
+ elif distro_id == 'arch':
+ print("no external repo needed on %s" % distro_id)
+ else:
+ fail("unsupported distro: %s" % distro_id)
+
+
+def fail(msg):
+ print(msg)
+ sys.exit(1)
+
+
+def main():
+ parser = argparse.ArgumentParser(
+ description="Enable Knot Resolver repo on this system")
+ parser.add_argument('repo', choices=REPO_CHOICES, nargs='?', default=REPO_CHOICES[0],
+ help="repo to enable")
+ parser.add_argument('-d', '--distro', type=str,
+ help="override target distro (DISTRO-VERSION format)")
+ parser.add_argument('-i', '--info', action='store_true',
+ help="show distro information and exit")
+
+ args = parser.parse_args()
+ if args.info:
+ show_info()
+ return
+
+ distro = args.distro
+ if not distro:
+ distro = detect_distro()
+
+ repo = 'knot-resolver-%s' % args.repo
+ enable_repo(repo, distro)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/scripts/make-doc.sh b/scripts/make-doc.sh
index d41e234c..c1ad6b5e 100755
--- a/scripts/make-doc.sh
+++ b/scripts/make-doc.sh
@@ -2,6 +2,20 @@
# SPDX-License-Identifier: GPL-3.0-or-later
cd "$(dirname ${0})/.."
+# generate JSON schema for the manager's declarative config
+pushd manager
+## the following python command should hopefully run without any dependencies except for standard python
+mkdir -p ../doc/_static/
+python3 -m knot_resolver_manager.cli schema > ../doc/_static/config.schema.json
+generate-schema-doc --config expand_buttons=true ../doc/_static/config.schema.json ../doc/_static/schema_doc.html
+
+# generate readable version of the JSON schema
+# we could replace jsonschema2md with the following at some point in the future:
+#generate-schema-doc --config template_name=md --config show_toc=false ../doc/_static/config.schema.json ../doc/_static/schema_doc.md
+jsonschema2md ../doc/_static/config.schema.json /dev/stdout | sed 's/^#/###/' > ../doc/config-schema-body.md
+popd
+
+
pushd doc
doxygen
popd
diff --git a/systemd/knot-resolver.service.in b/systemd/knot-resolver.service.in
new file mode 100644
index 00000000..a6886700
--- /dev/null
+++ b/systemd/knot-resolver.service.in
@@ -0,0 +1,22 @@
+[Unit]
+Description=Knot Resolver Manager
+Documentation=man:knot-resolver.systemd(7)
+Wants=network-online.target
+After=network-online.target
+Before=nss-lookup.target
+Wants=nss-lookup.target
+
+[Service]
+Type=notify
+TimeoutStartSec=10s
+ExecStart=@bin_dir@/knot-resolver --config=@etc_dir@/config.yml
+ExecReload=@bin_dir@/kresctl --socket @run_dir@/manager.sock reload
+KillSignal=SIGINT
+WorkingDirectory=@systemd_work_dir@
+User=@user@
+Group=@group@
+CapabilityBoundingSet=CAP_NET_BIND_SERVICE CAP_SETPCAP
+AmbientCapabilities=CAP_NET_BIND_SERVICE CAP_SETPCAP
+
+[Install]
+WantedBy=multi-user.target \ No newline at end of file
diff --git a/systemd/meson.build b/systemd/meson.build
index 6ca0bac1..0948e69f 100644
--- a/systemd/meson.build
+++ b/systemd/meson.build
@@ -8,6 +8,7 @@ systemd_config.set('group', group)
systemd_config.set('systemd_work_dir', systemd_work_dir)
systemd_config.set('systemd_cache_dir', systemd_cache_dir)
systemd_config.set('sbin_dir', sbin_dir)
+systemd_config.set('bin_dir', bin_dir)
systemd_config.set('etc_dir', etc_dir)
systemd_config.set('run_dir', run_dir)
systemd_config.set('lib_dir', lib_dir)
@@ -30,6 +31,12 @@ if systemd_files == 'enabled'
sources: 'kresd.target',
install_dir: systemd_unit_dir,
)
+ manager_service = configure_file(
+ input: 'knot-resolver.service.in',
+ output: 'knot-resolver.service',
+ configuration: systemd_config,
+ install_dir: systemd_unit_dir,
+ )
## man page
kresd_systemd_man = configure_file(
diff --git a/tests/dnstap/src/dnstap-test/go.mod b/tests/dnstap/src/dnstap-test/go.mod
new file mode 100644
index 00000000..6b650889
--- /dev/null
+++ b/tests/dnstap/src/dnstap-test/go.mod
@@ -0,0 +1,9 @@
+module gitlab.nic.cz/knot/knot-resolver/tests/dnstap-test
+
+go 1.17
+
+require (
+ github.com/cloudflare/dns v0.0.0-20151007113418-e20ffa3da443
+ github.com/dnstap/golang-dnstap v0.4.0
+ google.golang.org/protobuf v1.30.0
+)
diff --git a/tests/dnstap/src/dnstap-test/go.sum b/tests/dnstap/src/dnstap-test/go.sum
new file mode 100644
index 00000000..1860f9ef
--- /dev/null
+++ b/tests/dnstap/src/dnstap-test/go.sum
@@ -0,0 +1,44 @@
+github.com/cloudflare/dns v0.0.0-20151007113418-e20ffa3da443 h1:dYR6/V5rx/uaHsy4m1JuWfKYZO0r+G89BLD+XN7s9AI=
+github.com/cloudflare/dns v0.0.0-20151007113418-e20ffa3da443/go.mod h1:pa4p3oKOxzbXjrV5AGD1v5xjL7skv9BvO4J0Llo3P+s=
+github.com/dnstap/golang-dnstap v0.4.0 h1:KRHBoURygdGtBjDI2w4HifJfMAhhOqDuktAokaSa234=
+github.com/dnstap/golang-dnstap v0.4.0/go.mod h1:FqsSdH58NAmkAvKcpyxht7i4FoBjKu8E4JUPt8ipSUs=
+github.com/farsightsec/golang-framestream v0.3.0 h1:/spFQHucTle/ZIPkYqrfshQqPe2VQEzesH243TjIwqA=
+github.com/farsightsec/golang-framestream v0.3.0/go.mod h1:eNde4IQyEiA5br02AouhEHCu3p3UzrCdFR4LuQHklMI=
+github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
+github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
+github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
+github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
+github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
+github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/miekg/dns v1.1.31 h1:sJFOl9BgwbYAWOGEwr61FU28pqsBNdpRBnhGXtO06Oo=
+github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550 h1:ObdrDkeb4kJdCP557AjRjq69pTHfNouLtWZG7j9rPN8=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190923162816-aa69164e4478 h1:l5EDrHhldLYb3ZRHDUhXF7Om7MvYXnkV9/iQNo1lX6g=
+golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe h1:6fAMxZRR6sl1Uq8U61gxU+kPTs2tR8uOySCbBP7BN/M=
+golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
+google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
+google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
+google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
+google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
+google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
+google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng=
+google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
diff --git a/tests/dnstap/src/dnstap-test/main.go b/tests/dnstap/src/dnstap-test/main.go
index c04b4c14..a9d6635c 100644
--- a/tests/dnstap/src/dnstap-test/main.go
+++ b/tests/dnstap/src/dnstap-test/main.go
@@ -6,7 +6,7 @@ import (
"fmt"
"github.com/cloudflare/dns"
dnstap "github.com/dnstap/golang-dnstap"
- "github.com/golang/protobuf/proto"
+ "google.golang.org/protobuf/proto"
"io"
"io/ioutil"
"log"
diff --git a/tests/dnstap/src/dnstap-test/run.sh b/tests/dnstap/src/dnstap-test/run.sh
index 2f32ea18..37822b75 100755
--- a/tests/dnstap/src/dnstap-test/run.sh
+++ b/tests/dnstap/src/dnstap-test/run.sh
@@ -4,6 +4,7 @@ KRESD_CMD=$1
MESON_BUILD_ROOT=$(pwd)
mkdir -p tests/dnstap
export GOPATH=$MESON_BUILD_ROOT/tests/dnstap
+echo "$GOPATH"
cd "$(dirname $0)"
DNSTAP_TEST=dnstap-test
@@ -11,15 +12,17 @@ if [ -z "$GITLAB_CI" ]; then
type -P go >/dev/null || exit 77
echo "Building the dnstap test and its dependencies..."
# some packages may be missing on the system right now
- go get github.com/{FiloSottile/gvt,cloudflare/dns,dnstap/golang-dnstap,golang/protobuf/proto}
+ go get .
else
# In CI we've prebuilt dependencies into the default GOPATH.
# We're in a scratch container, so we just add the dnstap test inside.
export GOPATH=/root/go
fi
-DTAP=$GOPATH/src/$DNSTAP_TEST
+DTAP_DIR="$GOPATH/src"
+DTAP="$DTAP_DIR/$DNSTAP_TEST"
+mkdir -p "$DTAP_DIR"
rm -f $DTAP && ln -s $(realpath ..)/$DNSTAP_TEST $DTAP
-go install $DNSTAP_TEST
+go install .
CONFIG=$(realpath ./config)
diff --git a/tests/dnstap/src/dnstap-test/vendor/manifest b/tests/dnstap/src/dnstap-test/vendor/manifest
deleted file mode 100644
index 27c1dec1..00000000
--- a/tests/dnstap/src/dnstap-test/vendor/manifest
+++ /dev/null
@@ -1,55 +0,0 @@
-{
- "version": 0,
- "dependencies": [
- {
- "importpath": "github.com/cloudflare/dns",
- "repository": "https://github.com/cloudflare/dns",
- "vcs": "git",
- "revision": "e20ffa3da443071c7b3d164dec5b1f80dfb2ecf3",
- "branch": "master",
- "notests": true
- },
- {
- "importpath": "github.com/dnstap/golang-dnstap",
- "repository": "https://github.com/dnstap/golang-dnstap",
- "vcs": "git",
- "revision": "0145fd8482619f9c04788c7ba4e96cdeef64a041",
- "branch": "master",
- "notests": true
- },
- {
- "importpath": "github.com/farsightsec/golang-framestream",
- "repository": "https://github.com/farsightsec/golang-framestream",
- "vcs": "git",
- "revision": "b600ccf606747139c84b6d69b5c3988164db4d42",
- "branch": "master",
- "notests": true
- },
- {
- "importpath": "github.com/golang/protobuf/proto",
- "repository": "https://github.com/golang/protobuf",
- "vcs": "git",
- "revision": "8ee79997227bf9b34611aee7946ae64735e6fd93",
- "branch": "master",
- "path": "/proto",
- "notests": true
- },
- {
- "importpath": "github.com/golang/protobuf/ptypes/any",
- "repository": "https://github.com/golang/protobuf",
- "vcs": "git",
- "revision": "8ee79997227bf9b34611aee7946ae64735e6fd93",
- "branch": "master",
- "path": "ptypes/any",
- "notests": true
- },
- {
- "importpath": "github.com/miekg/dns",
- "repository": "https://github.com/miekg/dns",
- "vcs": "git",
- "revision": "f4d2b086946a624202dc59e6a43f72e8f3f02bc1",
- "branch": "master",
- "notests": true
- }
- ]
-} \ No newline at end of file
diff --git a/tests/integration/deckard b/tests/integration/deckard
-Subproject 9ec5992515f7bad376692b85030ee2ee009d687
+Subproject b5b338678d48a9807097000afe03ebfdf705f7a
diff --git a/tests/packaging/README.rst b/tests/packaging/README.rst
deleted file mode 100644
index 997f6667..00000000
--- a/tests/packaging/README.rst
+++ /dev/null
@@ -1,87 +0,0 @@
-.. SPDX-License-Identifier: GPL-3.0-or-later
-
-Packaging tests
-===============
-
-Packaging tests used pytest, docker and each directory with subdirectory *.packaging*
-is called as *component*.
-
-Run tests for all components:
-
-.. code-block::
-
- pytest -r fEsxX tests/packaging
-
-List all components:
-
-.. code-block::
-
- pytest tests/packaging --collect-only
-
-Run test for specific component (*doc/.packaging*):
-
-.. code-block::
-
- pytest -r fEsxX tests/packaging -k test_collect[debian_10-doc/.packaging]
-
-.. note::
-
- For debug add argument :code:`-s`.
-
-daemon/.packaging component
----------------------------
-
-This is special component that is used by all others components.
-For each distribution and version are created two docker images with this component.
-One with building dependencies and one for running dependencies.
-*Build docker image* is tagged as :code:`kr-packaging-tests-<distro><version>-build`
-and *Run docker image* is tagged as :code:`kr-packaging-tests-<distro><version>-run`.
-
-Others components
------------------
-
-All others components are based on *daemon/.packaging* component (docker image).
-When component needs new building dependencies, new running dependencies
-or some scripts that change build or run phase (see `File structure of each component`_),
-new docker image is created.
-*Build docker image* is tagged as :code:`kr-packaging-tests-<distro><version>-<component>-build`
-and *Run docker image* is tagged as :code:`kr-packaging-tests-<distro><version>-<component>-run`.
-
-File structure of each component
-------------------------------------
-
-* <distro>
- * <version>
- * builddeps - list of build dependencies
- * rundeps - list of runtime dependencies
- * pre-build.sh - script called before build phase
- * post-build.sh - script called after build phase
- * pre-run.sh - script called before run phase
- * post-run.sh - script called after run phase
- * install.sh and build.sh - scripts to rewrite standard commands for building and installing knot-resolvers
- * pre-test.sh - script called immediately before testing
-* test.config or test.sh - kresd config test or shell script (one of them must exists)
-
-Commands order to create docker image
--------------------------------------
-
-For *build docker image*:
-
-#. run pre-build.sh
-#. install packages specified in the file *builddeps*
-#. run build.sh
-#. run install.sh
-#. run post-build.sh
-
-For *run docker image*:
-
-#. run pre-run.sh
-#. install packages specified in the file *rundeps*
-#. run pre-test.sh
-#. run test (:code:`kresd -c test.config` or :code:`test.sh`)
-#. run post-build.sh
-
-
-.. note::
-
- knot-resolver builded in *build docker image* is automatically moved to *run docker image*.
diff --git a/tests/packaging/conftest.py b/tests/packaging/conftest.py
deleted file mode 100644
index 7279c15c..00000000
--- a/tests/packaging/conftest.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-import pytest
-import os
-
-
-def pytest_configure():
- pytest.KR_PYTESTS_DIR = os.path.dirname(os.path.realpath(__file__))
- pytest.KR_ROOT_DIR = os.path.join(pytest.KR_PYTESTS_DIR, "..", "..")
- pytest.KR_PREFIX = "kr-packaging-tests-"
diff --git a/tests/packaging/test_packaging.py b/tests/packaging/test_packaging.py
deleted file mode 100644
index 1a9bc413..00000000
--- a/tests/packaging/test_packaging.py
+++ /dev/null
@@ -1,494 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-import os
-import pytest
-import docker
-import logging
-from pathlib import Path
-from abc import ABC, abstractmethod
-
-
-logging.basicConfig(level=logging.INFO)
-logger = logging.getLogger(__name__)
-logger.setLevel(logging.DEBUG)
-client = docker.from_env()
-
-
-class DockerCmdError(Exception):
- """ Raised when shell command in Docker container failed """
- pass
-
-
-class ContainerHandler():
- def __init__(self, image):
- self.img_id = image
- self.container = None
-
- def run(self):
- self.container = client.containers.run(self.img_id, network_mode='host',
- tty=True, detach=True)
- logger.info('Run container ID={}'.format(self.container))
-
- def stop(self):
- self.container.kill()
-
- def exec_cmd(self, cmd, workdir):
- # workaround: When exec_run is called in GitLab CI/CD workdir argument doesn't work.
- inter_cmd = ''
- if workdir is not None:
- inter_cmd = 'cd {}; '.format(workdir)
-
- rcode, out = self.container.exec_run('/bin/sh -c \'' + inter_cmd + cmd + '\'')
- if rcode != 0:
- raise DockerCmdError(rcode, out)
-
- def getFiles(self, output, path):
- strm, stat = self.container.get_archive(path)
- with open(output, 'wb') as ofile:
- for data in strm:
- ofile.write(data)
-
-
-class DockerImages(ABC):
- def __init__(self, version):
- self.version = version
- self.module = None
- self.distro = None
- self.build_id = None
- self.run_id = None
-
- @abstractmethod
- def cmd_pkgs_install(self):
- raise NotImplementedError
-
- @abstractmethod
- def cmd_kresd_install(self):
- raise NotImplementedError
-
- @abstractmethod
- def cmd_kresd_build(self):
- raise NotImplementedError
-
- def readDependencies(self, deps_file):
- """Read dependencies from file"""
- listf = None
- try:
- with open(deps_file, 'r') as f:
- listf = f.read().splitlines()
- except FileNotFoundError:
- pass
-
- return listf
-
- def __genDockerFile(self, path, from_image=None):
- """Generate Dockerfile for build image"""
- if self.module is None:
- raise AttributeError
-
- if from_image is None:
- if os.path.isfile(os.path.join(self.module, self.distro, 'docker-image-name')):
- with open(os.path.join(self.module, self.distro, 'docker-image-name')) as f:
- from_image = f.read()
- else:
- from_image = '{0}:{1}'.format(self.distro, self.version)
-
- distro_dir = os.path.join(self.module, self.distro, self.version)
-
- dockerf = open(os.path.join(path, 'Dockerfile-build'), 'w')
-
- dockerf.write('FROM {}\n'.format(from_image))
- dockerf.write('WORKDIR /root/kresd\n')
- if self.module == 'daemon/.packaging':
- dockerf.write('COPY . /root/kresd\n')
- # when this file doesn't exists, tzdata needs user interaction
- dockerf.write('RUN if [ ! -f /etc/localtime ];' +
- 'then ln -fs /usr/share/zoneinfo/Europe/Prague /etc/localtime; fi\n')
- if os.path.isfile(os.path.join(distro_dir, 'pre-build.sh')):
- dockerf.write('RUN {}\n'.format(os.path.join(distro_dir, 'pre-build.sh')))
- if os.path.isfile(os.path.join(distro_dir, 'builddeps')):
- dockerf.write('RUN {0} {1}\n'.format(self.cmd_pkgs_install(),
- ' '.join(self.readDependencies(os.path.join(distro_dir, 'builddeps')))))
- if os.path.isfile(os.path.join(distro_dir, 'build.sh')):
- dockerf.write('RUN {}\n'.format(os.path.join(distro_dir, 'build.sh')))
- else:
- dockerf.write('RUN {}\n'.format(self.cmd_kresd_build()))
- if os.path.isfile(os.path.join(distro_dir, 'install.sh')):
- dockerf.write('RUN {}\n'.format(os.path.join(distro_dir, 'install.sh')))
- else:
- dockerf.write('RUN {}\n'.format(self.cmd_kresd_install()))
- if os.path.isfile(os.path.join(distro_dir, 'post-build.sh')):
- dockerf.write('RUN {}\n'.format(os.path.join(distro_dir, 'post-build.sh')))
-
- dockerf.close()
-
- def __genDockerFile_run(self, path, build_id, from_image=None):
- """Generate Dockerfile for run image"""
- if self.module is None:
- raise AttributeError
-
- if from_image is None:
- if os.path.isfile(os.path.join(self.module, self.distro, 'docker-image-name')):
- with open(os.path.join(self.module, self.distro, 'docker-image-name')) as f:
- from_image = f.read()
- else:
- from_image = '{0}:{1}'.format(self.distro, self.version)
-
- distro_dir = os.path.join(self.module, self.distro, self.version)
-
- dockerf = open(os.path.join(path, 'Dockerfile-run'), 'w')
-
- dockerf.write('FROM {}\n'.format(from_image))
- dockerf.write('COPY --from={} /root/kresd /root/kresd\n'.format(build_id))
- dockerf.write('WORKDIR /root/kresd\n')
- if os.path.isfile(os.path.join(distro_dir, 'pre-run.sh')):
- dockerf.write('RUN {}\n'.format(os.path.join(distro_dir, 'pre-run.sh')))
- if os.path.isfile(os.path.join(distro_dir, 'rundeps')):
- dockerf.write('RUN {0} {1}\n'.format(self.cmd_pkgs_install(),
- ' '.join(self.readDependencies(os.path.join(distro_dir, 'rundeps')))))
- if os.path.isfile(os.path.join(distro_dir, 'pre-test.sh')):
- dockerf.write('RUN {}\n'.format(os.path.join(distro_dir, 'pre-test.sh')))
-
- dockerf.close()
-
- def build_printing_errors(self, path, dockerfile, network_mode, tag, rm):
- try:
- return client.images.build(path=path, dockerfile=dockerfile,
- network_mode=network_mode, tag=tag, rm=rm)
- except docker.errors.BuildError as e:
- iterable = iter(e.build_log)
- while True:
- try:
- item = next(iterable)
- if item['stream']:
- for l in item['stream'].splitlines():
- stripped = l.strip()
- if stripped:
- logging.error(stripped)
- except StopIteration:
- break
- raise e
-
- def build(self, tmpdir, tag="", from_image=None):
- self.__genDockerFile(tmpdir, from_image=from_image)
-
- logger.debug('tmpdir={}'.format(tmpdir))
- logger.debug('datadir={}'.format(pytest.KR_ROOT_DIR))
- logger.debug('tag={}'.format(tag))
- image = self.build_printing_errors(path=str(pytest.KR_ROOT_DIR),
- dockerfile=os.path.join(tmpdir, 'Dockerfile-build'),
- network_mode='host', tag=tag, rm=True)
- logger.info('"Build image" ID={} created'.format(image[0].short_id))
- self.build_id = image[0].short_id
- return self.build_id
-
- def build_run(self, tmpdir, build_id, from_image=None, tag=""):
- self.__genDockerFile_run(tmpdir, build_id, from_image=from_image)
-
- logger.debug('tmpdir={}'.format(tmpdir))
- logger.debug('datadir={}'.format(tmpdir))
- logger.debug('tag={}'.format(tag))
- image = self.build_printing_errors(path=str(tmpdir),
- dockerfile=os.path.join(tmpdir, 'Dockerfile-run'),
- network_mode='host', tag=tag, rm=True)
- logger.info('"Run image" ID={} created'.format(image[0].short_id))
- self.run_id = image[0].short_id
- return self.run_id
-
-
-class DebianImage(DockerImages):
- def __init__(self, version):
- super().__init__(version)
- self.distro = 'debian'
-
- def cmd_pkgs_install(self):
- return 'apt-get install -y '
-
- def cmd_kresd_install(self):
- return 'ninja -C build_packaging install >/dev/null'
-
- def cmd_kresd_build(self):
- return """\\
- [ -d /root/kresd/build_packaging ] && rm -rf /root/kresd/build_packaging/; \\
- CFLAGS=\"$CFLAGS -Wall -pedantic -fno-omit-frame-pointer\"; \\
- LDFLAGS=\"$LDFLAGS -Wl,--as-needed\"; \\
- meson build_packaging \\
- --buildtype=plain \\
- --prefix=/root/kresd/install_packaging \\
- --libdir=lib \\
- --default-library=static \\
- -Dsystemd_files=enabled \\
- -Dclient=enabled \\
- -Dkeyfile_default=/usr/share/dns/root.key \\
- -Droot_hints=/usr/share/dns/root.hints \\
- -Dinstall_kresd_conf=enabled \\
- -Dunit_tests=enabled \\
- -Dc_args=\"${CFLAGS}\" \\
- -Dc_link_args=\"${LDFLAGS}\"; \\
- ninja -C build_packaging
- """
-
-
-class UbuntuImage(DebianImage):
- def __init__(self, version):
- super().__init__(version)
- self.distro = 'ubuntu'
-
-
-class CentosImage(DockerImages):
- def __init__(self, version):
- super().__init__(version)
- self.distro = 'centos'
-
- def cmd_pkgs_install(self):
- return "yum install -y "
-
- def cmd_kresd_install(self):
- return 'ninja-build -C build_packaging install'
-
- def cmd_kresd_build(self):
- return """\\
- [ -d /root/kresd/build_packaging ] && rm -rf /root/kresd/build_packaging/; \\
- CFLAGS=\"$CFLAGS -Wall -pedantic -fno-omit-frame-pointer\"; \\
- LDFLAGS=\"$LDFLAGS -Wl,--as-needed\"; \\
- meson build_packaging \\
- --buildtype=plain \\
- --prefix=/root/kresd/install_packaging \\
- --sbindir=sbin \\
- --libdir=lib \\
- --includedir=include \\
- --sysconfdir=etc \\
- --default-library=static \\
- -Dsystemd_files=enabled \\
- -Dclient=enabled \\
- -Dunit_tests=enabled \\
- -Dmanaged_ta=enabled \\
- -Dkeyfile_default=/root/kresd/install_packaging/var/lib/knot-resolver/root.keys \\
- -Dinstall_root_keys=enabled \\
- -Dinstall_kresd_conf=enabled; \\
- ninja-build -C build_packaging
- """
-
-
-class FedoraImage(DockerImages):
- def __init__(self, version):
- super().__init__(version)
- self.distro = 'fedora'
-
- def cmd_pkgs_install(self):
- return "dnf install -y "
-
- def cmd_kresd_install(self):
- return 'ninja -C build_packaging install >/dev/null'
-
- def cmd_kresd_build(self):
- return """\\
- [ -d /root/kresd/build_packaging ] && rm -rf /root/kresd/build_packaging/; \\
- CFLAGS=\"$CFLAGS -Wall -pedantic -fno-omit-frame-pointer\"; \\
- LDFLAGS=\"$LDFLAGS -Wl,--as-needed\"; \\
- meson build_packaging \\
- --buildtype=plain \\
- --prefix=/root/kresd/install_packaging \\
- --sbindir=sbin \\
- --libdir=lib \\
- --includedir=include \\
- --sysconfdir=etc \\
- --default-library=static \\
- -Dsystemd_files=enabled \\
- -Dclient=enabled \\
- -Dunit_tests=enabled \\
- -Dmanaged_ta=enabled \\
- -Dkeyfile_default=/root/kresd/install_packaging/var/lib/knot-resolver/root.keys \\
- -Dinstall_root_keys=enabled \\
- -Dinstall_kresd_conf=enabled; \\
- ninja -C build_packaging
- """
-
-
-class LeapImage(FedoraImage):
- def __init__(self, version):
- super().__init__(version)
- self.distro = 'leap'
-
- def cmd_pkgs_install(self):
- return "zypper install -y "
-
-
-def create_distro_image(name, version):
- img = None
-
- if (name == 'debian'):
- img = DebianImage(version)
- elif (name == 'ubuntu'):
- img = UbuntuImage(version)
- elif (name == 'centos'):
- img = CentosImage(version)
- elif (name == 'fedora'):
- img = FedoraImage(version)
- elif (name == 'leap'):
- img = LeapImage(version)
- else:
- img = None
-
- return img
-
-
-def list_dirs(path, exclude=None):
- """return all 'packaging' directories with full path"""
- filtered_dirs = []
-
- for rootpath, dirs, _ in os.walk(path):
-
- if (os.path.basename(rootpath) == '.packaging'):
- fdir = os.path.relpath(rootpath, path)
- if exclude is not None:
- if fdir not in exclude:
- filtered_dirs.append(fdir)
- else:
- filtered_dirs.append(fdir)
-
- return filtered_dirs
-
-
-def list_tests_dirs():
- """return all 'packaging' directories"""
- return list_dirs(pytest.KR_ROOT_DIR)
-
-
-def list_distro_vers(distro_root):
- """
- return list of { 'name': distro_name, 'version': distro_version)
- pairs found in distro_root
- """
- # transform list of paths like TOP/debian/10 into (debian, 10)
- dist_ver = [{'name': p.parts[-2], 'version': p.parts[-1]} for p
- in Path(distro_root).glob('*/*') if p.is_dir()]
-
- return list(dist_ver)
-
-
-MODULES = list_tests_dirs()
-DISTROS = list_distro_vers(os.path.join(pytest.KR_ROOT_DIR, 'daemon/.packaging'))
-DISTROS_NAMES = ['{0}_{1}'.format(distro['name'], distro['version']) for distro in DISTROS]
-
-
-@pytest.fixture(scope='session', params=DISTROS, ids=DISTROS_NAMES)
-def buildenv(request, tmpdir_factory):
- distro = request.param
-
- logger.debug('Creating main images for "{0} {1}"'.format(distro['name'], distro['version']))
- img = create_distro_image(distro['name'], distro['version'])
- if img is None:
- logger.warning('Unknown distro {}'.format(distro['name']))
- else:
- img.module = 'daemon/.packaging'
- tmpdir = tmpdir_factory.mktemp(distro['name']+distro['version'])
- img.build(tmpdir, tag=pytest.KR_PREFIX+distro['name']+distro['version']+'-build')
- img.build_run(tmpdir, img.build_id,
- tag=pytest.KR_PREFIX+distro['name']+distro['version']+'-run')
-
- yield img
-# client.images.remove(img.run_id)
-# client.images.remove(img.build_id)
-
-
-@pytest.mark.parametrize('module', MODULES)
-def test_collect(module, buildenv, tmp_path):
- logger.info(' ### Run test {} ###'.format(module))
-
- if buildenv is None:
- logger.error('Distro "{0} {1}" isn\'t implemented'.format(buildenv.distro,
- buildenv.version))
- assert False
-
- rcode = None
- buildmod = None
- module_dir = os.path.join(pytest.KR_ROOT_DIR, module)
- distro_dir = os.path.join(module_dir, buildenv.distro, buildenv.version)
-
- if os.path.isfile(os.path.join(distro_dir, 'NOTSUPPORTED')):
- pytest.skip('Unsupported linux distribution ({0} {1}:{2})'.format(buildenv.distro, buildenv.version, module))
-
- try:
- if module == 'daemon/.packaging':
- # use main "run image" without changes
- logging.info('Use main "run image"')
- ch = ContainerHandler(buildenv.run_id)
- ch.run()
- elif buildenv is not None:
- if os.path.isfile(os.path.join(distro_dir, 'pre-build.sh')) \
- or os.path.isfile(os.path.join(distro_dir, 'builddeps')):
- # create module specific "build image"
- logger.info('Create new "build image"')
- buildmod = create_distro_image(buildenv.distro, buildenv.version)
- buildmod.module = module
- buildmod.build(tmp_path, from_image=buildenv.build_id,
- tag=pytest.KR_PREFIX+buildmod.distro+buildmod.version+'-' +
- module.replace('/.packaging', '')+'-build')
-
- if buildmod is not None:
- # new build image was made, create new module specific "run image"
- logger.info('Create module specific "run image" from Dockerfile')
- buildmod.build_run(tmp_path, buildmod.build_id,
- tag=pytest.KR_PREFIX+buildmod.distro+buildmod.version+'-' +
- module.replace('/.packaging', '')+'-run', from_image=buildenv.run_id)
- ch = ContainerHandler(buildmod.run_id)
- ch.run()
- elif os.path.isfile(os.path.join(distro_dir, 'pre-run.sh')) \
- or os.path.isfile(os.path.join(distro_dir, 'rundeps')):
- # use main "run image" and apply module specific changes
- logger.info('Apply module specific changes to "run image"')
- buildmod = buildenv
- ch = ContainerHandler(buildmod.run_id)
- ch.run()
-
- if os.path.isfile(os.path.join(distro_dir, 'pre-run.sh')):
- ch.exec_cmd(os.path.join(module, buildenv.distro, buildenv.version,
- 'pre-run.sh'), '/root/kresd/')
-
- if os.path.isfile(os.path.join(distro_dir, 'rundeps')):
- logger.debug(buildmod.cmd_pkgs_install() + ' '.join(
- buildmod.readDependencies(os.path.join(distro_dir, 'rundeps'))))
- ch.exec_cmd(buildmod.cmd_pkgs_install() + ' '.join(
- buildmod.readDependencies(os.path.join(distro_dir, 'rundeps'))),
- '/root/kresd/')
-
- if os.path.isfile(os.path.join(distro_dir, 'pre-test.sh')):
- ch.exec_cmd(os.path.join(module, buildenv.distro, buildenv.version,
- 'pre-test.sh'), '/root/kresd/')
- else:
- # use main "run image" without changes
- logging.info('Use main "run image"')
- ch = ContainerHandler(buildenv.run_id)
- ch.run()
-
- # run test
- if os.path.isfile(os.path.join(module_dir, 'test.config')):
- ch.exec_cmd('/root/kresd/install_packaging/sbin/kresd -n -c ' + os.path.join('..',
- module, 'test.config'), '/root/kresd/install_packaging/')
- elif os.path.isfile(os.path.join(module_dir, 'test.sh')):
- ch.exec_cmd(os.path.join('..', module, 'test.sh'),
- '/root/kresd/install_packaging/')
- else:
- ch.stop()
- ch.container.remove()
- logger.error('Test file (test.config or test.sh) not found')
- assert False
-
- rcode = 0
-
- if os.path.isfile(os.path.join(distro_dir, 'post-run.sh')):
- ch.exec_cmd(os.path.join(module, buildenv.distro, buildenv.version, 'post-run.sh'),
- '/root/kresd/')
-
- except DockerCmdError as err:
- rcode, out = err.args
- logger.debug('rcode: {}'.format(rcode))
- logger.error(out.decode('utf-8'))
- finally:
- ch.stop()
- ch.container.remove()
- if buildmod is not None and buildmod is not buildenv:
- client.images.remove(buildmod.run_id)
- client.images.remove(buildmod.build_id)
-
- assert(rcode == 0)
diff --git a/tests/pytests/pylintrc b/tests/pytests/pylintrc
index 2c406be2..5e2b50b5 100644
--- a/tests/pytests/pylintrc
+++ b/tests/pytests/pylintrc
@@ -11,7 +11,6 @@ disable=
line-too-long, # checked by flake8
invalid-name,
broad-except,
- bad-continuation,
global-statement,
no-else-return,
redefined-outer-name, # commonly used with pytest fixtures
diff --git a/tests/pytests/test_random_close.py b/tests/pytests/test_random_close.py
index a7cc8777..cadd8ef7 100644
--- a/tests/pytests/test_random_close.py
+++ b/tests/pytests/test_random_close.py
@@ -18,7 +18,7 @@ import utils
QPS = 500
-def random_string(size=32, chars=(string.ascii_lowercase + string.digits)):
+def random_string(size=32, chars=string.ascii_lowercase + string.digits):
return ''.join(random.choice(chars) for x in range(size))
diff --git a/utils/cache_gc/db.c b/utils/cache_gc/db.c
index fc4a2fdb..76a2b5fa 100644
--- a/utils/cache_gc/db.c
+++ b/utils/cache_gc/db.c
@@ -22,7 +22,11 @@ int kr_gc_cache_open(const char *cache_path, struct kr_cache *kres_db,
return -ENOENT;
}
- struct kr_cdb_opts opts = { .path = cache_path, .maxsize = 0/*don't resize*/ };
+ struct kr_cdb_opts opts = {
+ .is_cache = true,
+ .path = cache_path,
+ .maxsize = 0,/*don't resize*/
+ };
int ret = kr_cache_open(kres_db, NULL, &opts, NULL);
if (ret || kres_db->db == NULL) {