summaryrefslogtreecommitdiffstats
path: root/doc/developer
diff options
context:
space:
mode:
Diffstat (limited to 'doc/developer')
-rw-r--r--doc/developer/bgpd.rst1
-rw-r--r--doc/developer/conf.py43
-rw-r--r--doc/developer/mgmtd-dev.rst2
-rw-r--r--doc/developer/northbound/yang-tools.rst4
-rw-r--r--doc/developer/ospf-ls-retrans.rst69
-rw-r--r--doc/developer/ospf.rst1
-rw-r--r--doc/developer/packaging-debian.rst2
-rw-r--r--doc/developer/packaging-redhat.rst31
-rw-r--r--doc/developer/scripting.rst5
-rw-r--r--doc/developer/topotests.rst68
-rw-r--r--doc/developer/workflow.rst77
11 files changed, 255 insertions, 48 deletions
diff --git a/doc/developer/bgpd.rst b/doc/developer/bgpd.rst
index a35fa614..f5263ff3 100644
--- a/doc/developer/bgpd.rst
+++ b/doc/developer/bgpd.rst
@@ -9,3 +9,4 @@ BGPD
next-hop-tracking
bgp-typecodes
+ bmp
diff --git a/doc/developer/conf.py b/doc/developer/conf.py
index 495c604a..a5b51481 100644
--- a/doc/developer/conf.py
+++ b/doc/developer/conf.py
@@ -18,6 +18,7 @@ import re
import pygments
from sphinx.highlighting import lexers
from sphinx.util import logging
+
logger = logging.getLogger(__name__)
# If extensions (or modules to document with autodoc) are in another directory,
@@ -53,18 +54,26 @@ source_suffix = ".rst"
master_doc = "index"
# General information about the project.
-project = u"FRR"
-copyright = u"2017, FRR"
-author = u"FRR authors"
+project = "FRR"
+copyright = "2017, FRR"
+author = "FRR authors"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
# The short X.Y version.
-version = u"?.?"
+version = "?.?"
# The full version, including alpha/beta/rc tags.
-release = u"?.?-?"
+release = "?.?-?"
+
+# Set canonical URL from the Read the Docs Domain
+html_baseurl = os.environ.get("READTHEDOCS_CANONICAL_URL", "")
+
+# Tell Jinja2 templates the build is running on Read the Docs
+html_context = {}
+if os.environ.get("READTHEDOCS", "") == "True":
+ html_context["READTHEDOCS"] = True
# -----------------------------------------------------------------------------
@@ -95,7 +104,7 @@ replace_vars = {
# extract version information, installation location, other stuff we need to
# use when building final documents
-val = re.compile('^S\["([^"]+)"\]="(.*)"$')
+val = re.compile(r'^S\["([^"]+)"\]="(.*)"$')
try:
with open("../../config.status", "r") as cfgstatus:
for ln in cfgstatus.readlines():
@@ -287,7 +296,7 @@ latex_elements = {
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
- (master_doc, "FRR.tex", u"FRR Developer's Manual", u"FRR", "manual"),
+ (master_doc, "FRR.tex", "FRR Developer's Manual", "FRR", "manual"),
]
# The name of an image file (relative to this directory) to place at the top of
@@ -315,7 +324,7 @@ latex_logo = "../figures/frr-logo-medium.png"
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
-man_pages = [(master_doc, "frr", u"FRR Developer's Manual", [author], 1)]
+man_pages = [(master_doc, "frr", "FRR Developer's Manual", [author], 1)]
# If true, show URL addresses after external links.
# man_show_urls = False
@@ -330,7 +339,7 @@ texinfo_documents = [
(
master_doc,
"frr",
- u"FRR Developer's Manual",
+ "FRR Developer's Manual",
author,
"FRR",
"One line description of project.",
@@ -358,27 +367,29 @@ texinfo_documents = [
with open("../extra/frrlexer.py", "rb") as lex:
frrlexerpy = lex.read()
-frrfmt_re = re.compile(r'^\s*%(?P<spec>[^\s]+)\s+\((?P<types>.*)\)\s*$')
+frrfmt_re = re.compile(r"^\s*%(?P<spec>[^\s]+)\s+\((?P<types>.*)\)\s*$")
+
def parse_frrfmt(env, text, node):
from sphinx import addnodes
m = frrfmt_re.match(text)
if not m:
- logger.warning('could not parse frrfmt:: %r' % (text), location=node)
+ logger.warning("could not parse frrfmt:: %r" % (text), location=node)
node += addnodes.desc_name(text, text)
return text
- spec, types = m.group('spec'), m.group('types')
+ spec, types = m.group("spec"), m.group("types")
- node += addnodes.desc_sig_operator('%', '%')
- node += addnodes.desc_name(spec + ' ', spec + ' ')
+ node += addnodes.desc_sig_operator("%", "%")
+ node += addnodes.desc_name(spec + " ", spec + " ")
plist = addnodes.desc_parameterlist()
- for typ in types.split(','):
+ for typ in types.split(","):
typ = typ.strip()
plist += addnodes.desc_parameter(typ, typ)
node += plist
- return '%' + spec
+ return "%" + spec
+
# custom extensions here
def setup(app):
diff --git a/doc/developer/mgmtd-dev.rst b/doc/developer/mgmtd-dev.rst
index b979af06..4c56cadb 100644
--- a/doc/developer/mgmtd-dev.rst
+++ b/doc/developer/mgmtd-dev.rst
@@ -147,7 +147,7 @@ Front-End Interface:
- change route_map_init() to route_map_init_new(false) and remove from
VTYSH_ROUTE_MAP_CONFIG (leave in VTYSH_ROUTE_MAP_SHOW).
- remove vrf_cmd_init(NULL) => remove from VTYSH_INTERFACE_SUBSET
- ...
+
Back-End Interface:
diff --git a/doc/developer/northbound/yang-tools.rst b/doc/developer/northbound/yang-tools.rst
index fb5a2872..91a767dc 100644
--- a/doc/developer/northbound/yang-tools.rst
+++ b/doc/developer/northbound/yang-tools.rst
@@ -87,7 +87,7 @@ Generate skeleton instance data:
* XML:
- .. code:: sh
+.. code:: sh
$ pyang -p <yang-search-path> \
-f sample-xml-skeleton --sample-xml-skeleton-defaults \
@@ -95,7 +95,7 @@ Generate skeleton instance data:
* JSON:
- .. code:: sh
+.. code:: sh
$ pyang -p <yang-search-path> \
-f jsonxsl module.yang -o module.xsl
diff --git a/doc/developer/ospf-ls-retrans.rst b/doc/developer/ospf-ls-retrans.rst
new file mode 100644
index 00000000..230d7a1c
--- /dev/null
+++ b/doc/developer/ospf-ls-retrans.rst
@@ -0,0 +1,69 @@
+OSPF Neighor Retransmission List
+================================
+
+Overview
+--------
+
+OSPF neighbor link-state retransmission lists are implemented using
+both a sparse Link State Database (LSDB) and a doubly-linked list.
+Rather than previous per-neighbor periodic timer, a per-neighbor
+timer is set to the expiration time of the next scheduled LSA
+retransmission.
+
+Sparse Link State Database (LSDB)
+---------------------------------
+
+When an explicit or implied acknowledgment is recieved from a
+neighbor in 2-way state or higher, the acknowledge LSA must be
+removed from the neighbor's link state retransmission list. In order
+to do this efficiently, a sparse LSDB is utilized. LSDB entries also
+include a pointer to the corresponding list entry so that it may be
+efficiently removed from the doubly-linked list.
+
+The sparse LSDB is implemented using the OSPF functions is
+ospf_lsdb.[c,h]. OSPF LSDBs are implemented as an array of route
+tables (lib/table.[c,h]). What is unique of the LS Retransmission
+list LSDB is that each entry also has a pointer into the doubly-linked
+list to facilitate fast deletions.
+
+Doubly-Linked List
+------------------
+
+In addition to the sparse LSDB, LSAs on a neighbor LS retransmission
+list are also maintained in a linked-list order chronologically
+with the LSA scheduled for the next retransmission at the head of
+the list.
+
+The doubly-link list is implemented using the dlist macros in
+lib/typesafe.h.
+
+LSA LS Retransmission List Addition
+------------------------------------
+
+When an LSA is added to a neighbor retransmission list, it is
+added to both the sparse LSDB and the doubly-linked list with a pointer
+in the LSDB route-table node to the list entry. The LSA is added to
+the tail of the list with the expiration time set to the current time
+with the retransmission interval added. If the neighbor retransmission
+timer is not set, it is set to expire at the time of the newly added
+LSA.
+
+LSA LS Retransmission List Deletion
+-----------------------------------
+
+When an LSA is deleted from a neighbor retransmission list, it is
+deleted from eboth the sparse LSDB and the doubly-linked list with the
+pointer the LSDB route-table node used to efficiently delete the entry
+from the list. If the LSA at the head of the list was removed, then
+the neighbor retransmission timer is reset to the expiration of the
+LSA at the head of the list or canceled if the list is empty.
+
+Neighbor LS Retransmission List Expiration
+------------------------------------------
+
+When the neighbor retransmission timer expires, the LSA at the top of
+list and any in a configured window (e.g., 50 milliseconds) are
+retransmitted. The LSAs that have been retransmitted are removed from
+the list and readded to the tail of the list with a new expiration time
+which is retransmit-interval seconds in the future.
+
diff --git a/doc/developer/ospf.rst b/doc/developer/ospf.rst
index 837a0bd1..da480253 100644
--- a/doc/developer/ospf.rst
+++ b/doc/developer/ospf.rst
@@ -8,6 +8,7 @@ OSPFD
:maxdepth: 2
ospf-api
+ ospf-ls-retrans
ospf-sr
cspf
diff --git a/doc/developer/packaging-debian.rst b/doc/developer/packaging-debian.rst
index c2c3b7e7..4109057e 100644
--- a/doc/developer/packaging-debian.rst
+++ b/doc/developer/packaging-debian.rst
@@ -68,6 +68,8 @@ buster.)
+----------------+-------------------+-----------------------------------------+
| pkg.frr.pim6d | pkg.frr.nopim6d | builds pim6d (default enabled) |
+----------------+-------------------+-----------------------------------------+
+ | pkg.frr.grpc | pkg.frr.nogrpc | builds with grpc support (default: no) |
+ +----------------+-------------------+-----------------------------------------+
* the ``-uc -us`` options to disable signing the packages with your GPG key
diff --git a/doc/developer/packaging-redhat.rst b/doc/developer/packaging-redhat.rst
index d88f4499..80378734 100644
--- a/doc/developer/packaging-redhat.rst
+++ b/doc/developer/packaging-redhat.rst
@@ -67,24 +67,27 @@ Tested on CentOS 6, CentOS 7, CentOS 8 and Fedora 24.
############### FRRouting (FRR) configure options #################
# with-feature options
- %{!?with_pam: %global with_pam 0 }
- %{!?with_ospfclient: %global with_ospfclient 1 }
- %{!?with_ospfapi: %global with_ospfapi 1 }
- %{!?with_irdp: %global with_irdp 1 }
- %{!?with_rtadv: %global with_rtadv 1 }
+ %{!?with_babeld: %global with_babeld 1 }
+ %{!?with_bfdd: %global with_bfdd 1 }
+ %{!?with_bgp_vnc: %global with_bgp_vnc 0 }
+ %{!?with_cumulus: %global with_cumulus 0 }
+ %{!?with_eigrpd: %global with_eigrpd 1 }
+ %{!?with_fpm: %global with_fpm 1 }
+ %{!?with_mgmtd_test_be_client: %global with_mgmtd_test_be_client 0 }
%{!?with_ldpd: %global with_ldpd 1 }
- %{!?with_nhrpd: %global with_nhrpd 1 }
- %{!?with_eigrp: %global with_eigrpd 1 }
- %{!?with_shared: %global with_shared 1 }
%{!?with_multipath: %global with_multipath 256 }
- %{!?frr_user: %global frr_user frr }
- %{!?vty_group: %global vty_group frrvty }
- %{!?with_fpm: %global with_fpm 0 }
- %{!?with_watchfrr: %global with_watchfrr 1 }
- %{!?with_bgp_vnc: %global with_bgp_vnc 0 }
+ %{!?with_nhrpd: %global with_nhrpd 1 }
+ %{!?with_ospfapi: %global with_ospfapi 1 }
+ %{!?with_ospfclient: %global with_ospfclient 1 }
+ %{!?with_pam: %global with_pam 0 }
+ %{!?with_pbrd: %global with_pbrd 1 }
%{!?with_pimd: %global with_pimd 1 }
%{!?with_pim6d: %global with_pim6d 1 }
- %{!?with_rpki: %global with_rpki 0 }
+ %{!?with_vrrpd: %global with_vrrpd 1 }
+ %{!?with_rtadv: %global with_rtadv 1 }
+ %{!?with_watchfrr: %global with_watchfrr 1 }
+ %{!?with_pathd: %global with_pathd 1 }
+ %{!?with_grpc: %global with_grpc 0 }
8. Build the RPM::
diff --git a/doc/developer/scripting.rst b/doc/developer/scripting.rst
index 7a433144..f51130b1 100644
--- a/doc/developer/scripting.rst
+++ b/doc/developer/scripting.rst
@@ -523,6 +523,7 @@ object which contains methods corresponding to each of the ``zlog`` levels:
log.error("error")
log.notice("notice")
log.debug("debug")
+ log.trace("trace")
The log messages will show up in the daemon's log output.
@@ -579,14 +580,14 @@ accomplished with scripting.
RM_FAILURE, RM_NOMATCH, RM_MATCH, RM_MATCH_AND_CHANGE)
log.info("Evaluating route " .. prefix.network .. " from peer " .. peer.remote_id.string)
-
+
function on_match (prefix, attributes)
log.info("Match")
return {
attributes = RM_MATCH
}
end
-
+
function on_nomatch (prefix, attributes)
log.info("No match")
return {
diff --git a/doc/developer/topotests.rst b/doc/developer/topotests.rst
index e1702c47..d2308bea 100644
--- a/doc/developer/topotests.rst
+++ b/doc/developer/topotests.rst
@@ -33,10 +33,11 @@ Installing Topotest Requirements
net-tools \
python3-pip \
iputils-ping \
+ iptables \
tshark \
valgrind
python3 -m pip install wheel
- python3 -m pip install 'pytest>=6.2.4' 'pytest-xdist>=2.3.0'
+ python3 -m pip install 'pytest>=8.3.2' 'pytest-asyncio>=0.24.0' 'pytest-xdist>=3.6.1'
python3 -m pip install 'scapy>=2.4.5'
python3 -m pip install xmltodict
python3 -m pip install git+https://github.com/Exa-Networks/exabgp@0659057837cd6c6351579e9f0fa47e9fb7de7311
@@ -411,6 +412,14 @@ for ``master`` branch:
and create ``frr`` user and ``frrvty`` group as shown above.
+Newer versions of Address Sanitizers require a sysctl to be changed
+to allow for the tests to be successfully run. This is also true
+for Undefined behavior Sanitizers as well as Memory Sanitizer.
+
+.. code:: shell
+
+ sysctl vm.mmap_rnd_bits=28
+
Debugging Topotest Failures
^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -722,8 +731,8 @@ packages.
Code coverage can automatically be gathered for any topotest run. To support
this FRR must first be compiled with the ``--enable-gcov`` configure option.
-This will cause *.gnco files to be created during the build. When topotests are
-run the statistics are generated and stored in *.gcda files. Topotest
+This will cause \*.gnco files to be created during the build. When topotests are
+run the statistics are generated and stored in \*.gcda files. Topotest
infrastructure will gather these files, capture the information into a
``coverage.info`` ``lcov`` file and also report the coverage summary.
@@ -732,7 +741,7 @@ If you build your FRR in a directory outside of the FRR source directory you
will also need to pass the ``--cov-frr-build-dir`` argument specifying the build
directory location.
-During the topotest run the *.gcda files are generated into a ``gcda``
+During the topotest run the \*.gcda files are generated into a ``gcda``
sub-directory of the top-level run directory (i.e., normally
``/tmp/topotests/gcda``). These files will then be copied at the end of the
topotest run into the FRR build directory where the ``gcov`` and ``lcov``
@@ -747,9 +756,49 @@ The ``coverage.info`` file can then be used to generate coverage reports or file
markup (e.g., using the ``genhtml`` utility) or enable markup within your
IDE/editor if supported (e.g., the emacs ``cov-mode`` package)
-NOTE: the *.gcda files in ``/tmp/topotests/gcda`` are cumulative so if you do
+NOTE: the \*.gcda files in ``/tmp/topotests/gcda`` are cumulative so if you do
not remove them they will aggregate data across multiple topotest runs.
+How to reproduce failed Tests
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Generally tests fail but recreating the test failure reliably is not necessarily
+easy, or it happens once every 10 runs locally. Here are some generic strategies
+that are employed to allow for the test to be reproduced reliably
+
+.. code:: console
+
+ cd <test directory>
+ ln -s test_the_test_name.py test_a.py
+ ln -s test_the_test_name.py test_b.py
+
+This allows you to run multiple copies of the same test with one full test run.
+Additionally if you need to modify the test you don't need to recopy everything
+to make it work. By adding multiple copies of the same occassionally failing test
+you raise the odds of it failing again. Additionally you have easily accessible
+good and bad runs to compare.
+
+.. code:: console
+
+ sudo -E python3 -m pytest -n <some value> --dist=loadfile
+
+Choose a n value that is greater than the number of cpu's avalaible on the system.
+This changes the timing and may or may not make it more likely that the test fails.
+Be aware, though, that this changes memory requirements as well as may make other
+tests fail more often as well. You should choose values that do not cause the system
+to go into swap usage.
+
+.. code:: console
+
+ stress -n <number of cpu's to put at 100%>
+
+By filling up cpu's with programs that do nothing you also change the timing again and
+may cause the problem to happen more often.
+
+There is no magic bullet here. You as a developer might have to experiment with different
+values and different combinations of the above to cause the problem to happen more often.
+These are just the tools that we know of at this point in time.
+
.. _topotests_docker:
@@ -1292,6 +1341,15 @@ Example:
router.load_config(TopoRouter.RD_ZEBRA, "zebra.conf")
router.load_config(TopoRouter.RD_OSPF)
+or using unified config (specifying which daemons to run is optional):
+
+.. code:: py
+
+ for _, (rname, router) in enumerate(router_list.items(), 1):
+ router.load_frr_config(os.path.join(CWD, "{}/frr.conf".format(rname)), [
+ (TopoRouter.RD_ZEBRA, "-s 90000000"),
+ (TopoRouter.RD_MGMTD, None),
+ (TopoRouter.RD_BGP, None)]
- The topology definition or build function
diff --git a/doc/developer/workflow.rst b/doc/developer/workflow.rst
index f720f627..5e22c4cb 100644
--- a/doc/developer/workflow.rst
+++ b/doc/developer/workflow.rst
@@ -6,9 +6,10 @@ Process & Workflow
.. highlight:: none
-FRR is a large project developed by many different groups. This section
-documents standards for code style & quality, commit messages, pull requests
-and best practices that all contributors are asked to follow.
+FRR is a large project developed by many different groups. This
+section documents standards for code style & quality, commit messages,
+pull requests (PRs) and best practices that all contributors are asked
+to follow.
This chapter is "descriptive/post-factual" in that it documents pratices that
are in use; it is not "definitive/pre-factual" in prescribing practices. This
@@ -241,7 +242,7 @@ discontinued.
The LTS branch duties are the following ones:
- organise meetings on a (bi-)weekly or monthly basis, the handling of issues
- and pull requested relative to that branch. When time permits, this may be done
+ and pull requests relative to that branch. When time permits, this may be done
during the regularly scheduled FRR meeting.
- ensure the stability of the branch, by using and eventually adapting the
@@ -324,11 +325,17 @@ relevant to your work.
Submitting Patches and Enhancements
===================================
-FRR accepts patches using GitHub pull requests.
+FRR accepts patches using GitHub pull requests (PRs). The typical FRR
+developer will maintain a fork of the FRR project in GitHub; see the
+GitHub documentation for help setting up an account and creating a
+fork repository. Keep the ``master`` branch of your fork up-to-date
+with the FRR version. Create a dev branch in your fork and commit your
+work there. When ready, create a pull-request between your dev branch
+in your fork and the main FRR repository in GitHub.
-The base branch for new contributions and non-critical bug fixes should be
-``master``. Please ensure your pull request is based on this branch when you
-submit it.
+The base branch for new contributions and non-critical bug fixes
+should be ``master``. Please ensure your pull request targets this
+branch when you submit it.
Code submitted by pull request will be automatically tested by one or more CI
systems. Once the automated tests succeed, other developers will review your
@@ -531,6 +538,42 @@ After Submitting Your Changes
community members.
- Your submission is done once it is merged to the master branch.
+Reverting the changes
+=====================
+
+When you revert a regular commit in Git, the process is straightforward - it
+undoes the changes introduced by that commit. However, reverting a merge commit
+is more complex. While it undoes the data changes brought in by the merge, it
+does not alter the repository's history or the merge's effect on it.
+
+Reverting a Merge Commit
+------------------------
+
+When you revert a merge commit, the following occurs:
+
+* The changes made by the merge are undone;
+* The merge itself remains in the history: it continues to be recognized as the point where two branches were joined;
+* Future merges will still treat this as the last shared state, regardless of the revert.
+
+Thus, a "revert" in Git undoes data changes, but it does not serve as a true "undo"
+for the historical effects of a commit.
+
+Reverting a Merge and Bisectability
+-----------------------------------
+
+Consider the implications of reverting a merge and then reverting that revert.
+This scenario complicates the debugging process, especially when using tools like
+git bisect. A reverted merge effectively consolidates all changes from the original
+merge into a single commit, but in reverse. This creates a challenge for debugging,
+as you lose the granularity of individual commits, making it difficult to identify
+the specific change causing an issue.
+
+Considerations
+--------------
+
+When reverting the changes, e.g. a full Pull Request, we SHOULD revert every commit
+individually, and not use git revert on merge commits.
+
Programming Languages, Tools and Libraries
==========================================
@@ -1306,6 +1349,16 @@ MemorySanitizer
to ``configure``.
+UndefinedSanitizer
+ Similar to AddressSanitizer, this tool provides runtime instrumentation for
+ detecting use of undefined behavior in C. Testing your own code with this
+ tool before submission is encouraged. You can enable it by passing::
+
+ --enable-undefined-sanitizer
+
+ to ``configure``. If you run FRR with this you will probably also have
+ to set ``sudo sysctl vm.mmap_rnd_bits=28``
+
All of the above tools are available in the Clang/LLVM toolchain since 3.4.
AddressSanitizer and ThreadSanitizer are available in recent versions of GCC,
but are no longer actively maintained. MemorySanitizer is not available in GCC.
@@ -1315,6 +1368,14 @@ but are no longer actively maintained. MemorySanitizer is not available in GCC.
The different Sanitizers are mostly incompatible with each other. Please
refer to GCC/LLVM documentation for details.
+.. note::
+
+ The different sanitizers also require setting
+
+ sysctl vm.mmap_rnd_bits=28
+
+ in order to work properly.
+
frr-format plugin
This is a GCC plugin provided with FRR that does extended type checks for
``%pFX``-style printfrr extensions. To use this plugin,