summaryrefslogtreecommitdiffstats
path: root/tests/topotests/lib
diff options
context:
space:
mode:
Diffstat (limited to 'tests/topotests/lib')
-rw-r--r--tests/topotests/lib/bgp.py106
-rw-r--r--tests/topotests/lib/bgprib.py112
-rw-r--r--tests/topotests/lib/checkping.py2
-rw-r--r--tests/topotests/lib/common_check.py65
-rw-r--r--tests/topotests/lib/common_config.py27
-rwxr-xr-xtests/topotests/lib/fe_client.py115
-rwxr-xr-xtests/topotests/lib/grpc-query.py1
-rw-r--r--tests/topotests/lib/ospf.py4
-rw-r--r--tests/topotests/lib/pim.py264
-rw-r--r--tests/topotests/lib/topogen.py24
-rw-r--r--tests/topotests/lib/topotest.py54
11 files changed, 633 insertions, 141 deletions
diff --git a/tests/topotests/lib/bgp.py b/tests/topotests/lib/bgp.py
index 3a16ed5a..bcd1c748 100644
--- a/tests/topotests/lib/bgp.py
+++ b/tests/topotests/lib/bgp.py
@@ -333,7 +333,7 @@ def __create_bgp_global(tgen, input_dict, router, build=False):
else:
del_action = False
- for rs_timer, value in timer.items():
+ for rs_timer, _ in timer.items():
rs_timer_value = timer.setdefault(rs_timer, None)
if rs_timer_value and rs_timer != "delete":
@@ -1229,7 +1229,7 @@ def modify_bgp_config_when_bgpd_down(tgen, topo, input_dict):
# Copy bgp config file to /etc/frr
for dut in input_dict.keys():
router_list = tgen.routers()
- for router, rnode in router_list.items():
+ for router, _ in router_list.items():
if router != dut:
continue
@@ -1750,7 +1750,7 @@ def verify_as_numbers(tgen, topo, input_dict, expected=True):
for bgp_neighbor, peer_data in bgp_neighbors.items():
remote_as = input_dict[bgp_neighbor]["bgp"]["local_as"]
- for dest_link, peer_dict in peer_data["dest_link"].items():
+ for dest_link, _ in peer_data["dest_link"].items():
neighbor_ip = None
data = topo["routers"][bgp_neighbor]["links"]
@@ -1833,7 +1833,7 @@ def verify_bgp_convergence_from_running_config(tgen, dut=None, expected=True):
return errormsg
for vrf, addr_family_data in show_bgp_json.items():
- for address_family, neighborship_data in addr_family_data.items():
+ for _, neighborship_data in addr_family_data.items():
total_peer = 0
no_of_peer = 0
@@ -1980,7 +1980,7 @@ def clear_bgp_and_verify(tgen, topo, router, rid=None):
bgp_neighbors = bgp_addr_type[addr_type]["unicast"]["neighbor"]
for bgp_neighbor, peer_data in bgp_neighbors.items():
- for dest_link, peer_dict in peer_data["dest_link"].items():
+ for dest_link, _ in peer_data["dest_link"].items():
data = topo["routers"][bgp_neighbor]["links"]
if dest_link in data:
@@ -3231,7 +3231,7 @@ def verify_graceful_restart(
if bgp_neighbor != peer:
continue
- for dest_link, peer_dict in peer_data["dest_link"].items():
+ for dest_link, _ in peer_data["dest_link"].items():
data = topo["routers"][bgp_neighbor]["links"]
if dest_link in data:
@@ -3266,27 +3266,43 @@ def verify_graceful_restart(
lmode = None
rmode = None
+
# Local GR mode
- if "address_family" in input_dict[dut]["bgp"]:
- bgp_neighbors = input_dict[dut]["bgp"]["address_family"][addr_type][
- "unicast"
- ]["neighbor"][peer]["dest_link"]
+ if "bgp" not in input_dict[dut] and "graceful-restart" in input_dict[dut]:
+ if (
+ "graceful-restart" in input_dict[dut]["graceful-restart"]
+ and input_dict[dut]["graceful-restart"]["graceful-restart"]
+ ):
+ lmode = "Restart*"
+ elif (
+ "graceful-restart-disable" in input_dict[dut]["graceful-restart"]
+ and input_dict[dut]["graceful-restart"]["graceful-restart-disable"]
+ ):
+ lmode = "Disable*"
+ else:
+ lmode = "Helper*"
- for dest_link, data in bgp_neighbors.items():
- if (
- "graceful-restart-helper" in data
- and data["graceful-restart-helper"]
- ):
- lmode = "Helper"
- elif "graceful-restart" in data and data["graceful-restart"]:
- lmode = "Restart"
- elif (
- "graceful-restart-disable" in data
- and data["graceful-restart-disable"]
- ):
- lmode = "Disable"
- else:
- lmode = None
+ if lmode is None:
+ if "address_family" in input_dict[dut]["bgp"]:
+ bgp_neighbors = input_dict[dut]["bgp"]["address_family"][addr_type][
+ "unicast"
+ ]["neighbor"][peer]["dest_link"]
+
+ for dest_link, data in bgp_neighbors.items():
+ if (
+ "graceful-restart-helper" in data
+ and data["graceful-restart-helper"]
+ ):
+ lmode = "Helper"
+ elif "graceful-restart" in data and data["graceful-restart"]:
+ lmode = "Restart"
+ elif (
+ "graceful-restart-disable" in data
+ and data["graceful-restart-disable"]
+ ):
+ lmode = "Disable"
+ else:
+ lmode = None
if lmode is None:
if "graceful-restart" in input_dict[dut]["bgp"]:
@@ -3314,7 +3330,11 @@ def verify_graceful_restart(
return True
# Remote GR mode
- if "address_family" in input_dict[peer]["bgp"]:
+
+ if (
+ "bgp" in input_dict[peer]
+ and "address_family" in input_dict[peer]["bgp"]
+ ):
bgp_neighbors = input_dict[peer]["bgp"]["address_family"][addr_type][
"unicast"
]["neighbor"][dut]["dest_link"]
@@ -3336,7 +3356,10 @@ def verify_graceful_restart(
rmode = None
if rmode is None:
- if "graceful-restart" in input_dict[peer]["bgp"]:
+ if (
+ "bgp" in input_dict[peer]
+ and "graceful-restart" in input_dict[peer]["bgp"]
+ ):
if (
"graceful-restart"
in input_dict[peer]["bgp"]["graceful-restart"]
@@ -3355,6 +3378,27 @@ def verify_graceful_restart(
rmode = "Disable"
else:
rmode = "Helper"
+
+ if rmode is None:
+ if (
+ "bgp" not in input_dict[peer]
+ and "graceful-restart" in input_dict[peer]
+ ):
+ if (
+ "graceful-restart" in input_dict[peer]["graceful-restart"]
+ and input_dict[peer]["graceful-restart"]["graceful-restart"]
+ ):
+ rmode = "Restart"
+ elif (
+ "graceful-restart-disable"
+ in input_dict[peer]["graceful-restart"]
+ and input_dict[peer]["graceful-restart"][
+ "graceful-restart-disable"
+ ]
+ ):
+ rmode = "Disable"
+ else:
+ rmode = "Helper"
else:
rmode = "Helper"
@@ -3479,7 +3523,7 @@ def verify_r_bit(tgen, topo, addr_type, input_dict, dut, peer, expected=True):
if bgp_neighbor != peer:
continue
- for dest_link, peer_dict in peer_data["dest_link"].items():
+ for dest_link, _ in peer_data["dest_link"].items():
data = topo["routers"][bgp_neighbor]["links"]
if dest_link in data:
@@ -3597,7 +3641,7 @@ def verify_eor(tgen, topo, addr_type, input_dict, dut, peer, expected=True):
if bgp_neighbor != peer:
continue
- for dest_link, peer_dict in peer_data["dest_link"].items():
+ for dest_link, _ in peer_data["dest_link"].items():
data = topo["routers"][bgp_neighbor]["links"]
if dest_link in data:
@@ -3762,7 +3806,7 @@ def verify_f_bit(tgen, topo, addr_type, input_dict, dut, peer, expected=True):
if bgp_neighbor != peer:
continue
- for dest_link, peer_dict in peer_data["dest_link"].items():
+ for dest_link, _ in peer_data["dest_link"].items():
data = topo["routers"][bgp_neighbor]["links"]
if dest_link in data:
@@ -3890,7 +3934,7 @@ def verify_graceful_restart_timers(tgen, topo, addr_type, input_dict, dut, peer)
if bgp_neighbor != peer:
continue
- for dest_link, peer_dict in peer_data["dest_link"].items():
+ for dest_link, _ in peer_data["dest_link"].items():
data = topo["routers"][bgp_neighbor]["links"]
if dest_link in data:
diff --git a/tests/topotests/lib/bgprib.py b/tests/topotests/lib/bgprib.py
index 699c7a4d..f01a440b 100644
--- a/tests/topotests/lib/bgprib.py
+++ b/tests/topotests/lib/bgprib.py
@@ -64,10 +64,9 @@ class BgpRib:
self.log("missing route: pfx=" + want["p"] + ", nh=" + want["n"])
return 0
- def RequireVpnRoutes(self, target, title, wantroutes, debug=0):
+ def RequireVpnRoutesOne(self, target, title, wantroutes, debug=0):
import json
- logstr = "RequireVpnRoutes " + str(wantroutes)
# non json form for humans
luCommand(
target,
@@ -86,11 +85,18 @@ class BgpRib:
if re.search(r"^\s*$", ret):
# degenerate case: empty json means no routes
if len(wantroutes) > 0:
- luResult(target, False, title, logstr)
- return
- luResult(target, True, title, logstr)
+ return False
+ return True
rib = json.loads(ret)
- rds = rib["routes"]["routeDistinguishers"]
+ try:
+ rds = rib["routes"]["routeDistinguishers"]
+ except KeyError as err:
+ # KeyError: 'routes' probably means missing/bad VRF
+ # This error also happens if we are too quick and the routing
+ # table has not been fully populated yet.
+ if debug:
+ self.log("KeyError, no routes")
+ return False
for want in wantroutes:
found = 0
if debug:
@@ -105,11 +111,39 @@ class BgpRib:
found = 1
break
if not found:
- luResult(target, False, title, logstr)
- return
- luResult(target, True, title, logstr)
+ return False
+ return True
+
+ def RequireVpnRoutes(
+ self, target, title, wantroutes, debug=0, wait=10, wait_time=0.5
+ ):
+ import time
+ import math
+
+ logstr = "RequireVpnRoutes " + str(wantroutes)
+ found = False
+ n = 0
+ startt = time.time()
+
+ # Calculate the amount of `sleep`s we are going to peform.
+ wait_count = int(math.ceil(wait / wait_time)) + 1
+
+ while wait_count > 0:
+ n += 1
+ found = self.RequireVpnRoutesOne(target, title, wantroutes, debug)
+ if found is not False:
+ break
- def RequireUnicastRoutes(self, target, afi, vrf, title, wantroutes, debug=0):
+ wait_count -= 1
+ if wait_count > 0:
+ time.sleep(wait_time)
+
+ delta = time.time() - startt
+ self.log("Done after %d loops, time=%s, Found=%s" % (n, delta, found))
+ luResult(target, found, title, logstr)
+ return found
+
+ def RequireUnicastRoutesOne(self, target, afi, vrf, title, wantroutes, debug=0):
logstr = "RequireUnicastRoutes %s" % str(wantroutes)
vrfstr = ""
if vrf != "":
@@ -129,9 +163,8 @@ class BgpRib:
if re.search(r"^\s*$", ret):
# degenerate case: empty json means no routes
if len(wantroutes) > 0:
- luResult(target, False, title, logstr)
- return
- luResult(target, True, title, logstr)
+ return False, ""
+ return True, ""
rib = json.loads(ret)
try:
table = rib["routes"]
@@ -141,25 +174,60 @@ class BgpRib:
errstr = "-script ERROR: check if wrong vrf (%s)" % (vrf)
else:
errstr = "-script ERROR: check if vrf missing"
- luResult(target, False, title + errstr, logstr)
- return
+ self.log(errstr)
+ return False, errstr
# if debug:
# self.log("table=%s" % table)
for want in wantroutes:
if debug:
self.log("want=%s" % want)
if not self.routes_include_wanted(table, want, debug):
- luResult(target, False, title, logstr)
- return
- luResult(target, True, title, logstr)
+ return False, ""
+ return True, ""
+
+ def RequireUnicastRoutes(
+ self, target, afi, vrf, title, wantroutes, debug=0, wait=10, wait_time=0.5
+ ):
+ import time
+ import math
+
+ logstr = "RequireUnicastRoutes %s" % str(wantroutes)
+ found = False
+ n = 0
+ startt = time.time()
+ errstr = ""
+
+ # Calculate the amount of `sleep`s we are going to peform.
+ wait_count = int(math.ceil(wait / wait_time)) + 1
+
+ while wait_count > 0:
+ n += 1
+ found, errstr = self.RequireUnicastRoutesOne(
+ target, afi, vrf, title, wantroutes, debug
+ )
+ if found is not False:
+ break
+
+ wait_count -= 1
+ if wait_count > 0:
+ time.sleep(wait_time)
+
+ delta = time.time() - startt
+ self.log("Done after %d loops, time=%s, Found=%s" % (n, delta, found))
+ luResult(target, found, title + errstr, logstr)
+ return found
BgpRib = BgpRib()
-def bgpribRequireVpnRoutes(target, title, wantroutes, debug=0):
- BgpRib.RequireVpnRoutes(target, title, wantroutes, debug)
+def bgpribRequireVpnRoutes(target, title, wantroutes, debug=0, wait=10, wait_time=0.5):
+ BgpRib.RequireVpnRoutes(target, title, wantroutes, debug, wait, wait_time)
-def bgpribRequireUnicastRoutes(target, afi, vrf, title, wantroutes, debug=0):
- BgpRib.RequireUnicastRoutes(target, afi, vrf, title, wantroutes, debug)
+def bgpribRequireUnicastRoutes(
+ target, afi, vrf, title, wantroutes, debug=0, wait=10, wait_time=0.5
+):
+ BgpRib.RequireUnicastRoutes(
+ target, afi, vrf, title, wantroutes, debug, wait, wait_time
+ )
diff --git a/tests/topotests/lib/checkping.py b/tests/topotests/lib/checkping.py
index 5500807f..aa95f45b 100644
--- a/tests/topotests/lib/checkping.py
+++ b/tests/topotests/lib/checkping.py
@@ -33,5 +33,5 @@ def check_ping(name, dest_addr, expect_connected, count, wait, source_addr=None)
logger.info("[+] check {} {} {}".format(name, dest_addr, match))
tgen = get_topogen()
func = functools.partial(_check, name, dest_addr, source_addr, match)
- success, result = topotest.run_and_expect(func, None, count=count, wait=wait)
+ _, result = topotest.run_and_expect(func, None, count=count, wait=wait)
assert result is None, "Failed"
diff --git a/tests/topotests/lib/common_check.py b/tests/topotests/lib/common_check.py
new file mode 100644
index 00000000..b04b9de4
--- /dev/null
+++ b/tests/topotests/lib/common_check.py
@@ -0,0 +1,65 @@
+#!/usr/bin/env python
+# SPDX-License-Identifier: ISC
+#
+# common_check.py
+#
+# Copyright 2024 6WIND S.A.
+
+#
+import json
+from lib import topotest
+
+
+def ip_check_path_selection(
+ router, ipaddr_str, expected, vrf_name=None, check_fib=False
+):
+ if vrf_name:
+ cmdstr = f"show ip route vrf {vrf_name} {ipaddr_str} json"
+ else:
+ cmdstr = f"show ip route {ipaddr_str} json"
+ try:
+ output = json.loads(router.vtysh_cmd(cmdstr))
+ except:
+ output = {}
+
+ ret = topotest.json_cmp(output, expected)
+ if ret is None:
+ num_nh_expected = len(expected[ipaddr_str][0]["nexthops"])
+ num_nh_observed = len(output[ipaddr_str][0]["nexthops"])
+ if num_nh_expected == num_nh_observed:
+ if check_fib:
+ # special case: when fib flag is unset,
+ # an extra test should be done to check that the flag is really unset
+ for nh_output, nh_expected in zip(
+ output[ipaddr_str][0]["nexthops"],
+ expected[ipaddr_str][0]["nexthops"],
+ ):
+ if (
+ "fib" in nh_output.keys()
+ and nh_output["fib"]
+ and ("fib" not in nh_expected.keys() or not nh_expected["fib"])
+ ):
+ return "{}, prefix {} nexthop {} has the fib flag set, whereas it is not expected".format(
+ router.name, ipaddr_str, nh_output["ip"]
+ )
+ return ret
+ return "{}, prefix {} does not have the correct number of nexthops : observed {}, expected {}".format(
+ router.name, ipaddr_str, num_nh_observed, num_nh_expected
+ )
+ return ret
+
+
+def iproute2_check_path_selection(router, ipaddr_str, expected, vrf_name=None):
+ if not topotest.iproute2_is_json_capable():
+ return None
+
+ if vrf_name:
+ cmdstr = f"ip -json route show vrf {vrf_name} {ipaddr_str}"
+ else:
+ cmdstr = f"ip -json route show {ipaddr_str}"
+ try:
+ output = json.loads(router.cmd(cmdstr))
+ except:
+ output = []
+
+ return topotest.json_cmp(output, expected)
diff --git a/tests/topotests/lib/common_config.py b/tests/topotests/lib/common_config.py
index 7787b6f7..f34c48b8 100644
--- a/tests/topotests/lib/common_config.py
+++ b/tests/topotests/lib/common_config.py
@@ -7,7 +7,6 @@
import functools
import ipaddress
-import json
import os
import platform
import socket
@@ -442,7 +441,7 @@ def check_router_status(tgen):
try:
router_list = tgen.routers()
- for router, rnode in router_list.items():
+ for _, rnode in router_list.items():
result = rnode.check_router_running()
if result != "":
daemons = []
@@ -686,7 +685,7 @@ def prep_load_config_to_routers(tgen, *config_name_list):
"""
routers = tgen.routers()
- for rname, router in routers.items():
+ for rname, _ in routers.items():
destname = "{}/{}/{}".format(tgen.logdir, rname, FRRCFG_FILE)
wmode = "w"
for cfbase in config_name_list:
@@ -871,7 +870,7 @@ def get_frr_ipv6_linklocal(tgen, router, intf=None, vrf=None):
"""
router_list = tgen.routers()
- for rname, rnode in router_list.items():
+ for rname, _ in router_list.items():
if rname != router:
continue
@@ -887,7 +886,7 @@ def get_frr_ipv6_linklocal(tgen, router, intf=None, vrf=None):
cmd = "show interface vrf {}".format(vrf)
else:
cmd = "show interface"
- for chk_ll in range(0, 60):
+ for _ in range(0, 60):
sleep(1 / 4)
ifaces = router_list[router].run('vtysh -c "{}"'.format(cmd))
# Fix newlines (make them all the same)
@@ -937,7 +936,7 @@ def generate_support_bundle():
tgen = get_topogen()
if tgen is None:
- logger.warn(
+ logger.warning(
"Support bundle attempted to be generated, but topogen is not being used"
)
return True
@@ -1848,7 +1847,13 @@ def retry(retry_timeout, initial_wait=0, expected=True, diag_pct=0.75):
while True:
seconds_left = (retry_until - datetime.now()).total_seconds()
try:
- ret = func(*args, **kwargs)
+ try:
+ ret = func(*args, seconds_left=seconds_left, **kwargs)
+ except TypeError as error:
+ if "seconds_left" not in str(error):
+ raise
+ ret = func(*args, **kwargs)
+
logger.debug("Function returned %s", ret)
negative_result = ret is False or is_string(ret)
@@ -1869,7 +1874,7 @@ def retry(retry_timeout, initial_wait=0, expected=True, diag_pct=0.75):
return saved_failure
except Exception as error:
- logger.info("Function raised exception: %s", str(error))
+ logger.info('Function raised exception: "%s"', repr(error))
ret = error
if seconds_left < 0 and saved_failure:
@@ -3095,7 +3100,7 @@ def configure_brctl(tgen, topo, input_dict):
"{} dev {} master {}".format(ip_cmd, brctl_name, vrf)
)
- for intf_name, data in topo["routers"][dut]["links"].items():
+ for _, data in topo["routers"][dut]["links"].items():
if "vrf" not in data:
continue
@@ -3366,7 +3371,7 @@ def verify_rib(
found_hops = [
rib_r["ip"]
for rib_r in rib_routes_json[st_rt][0]["nexthops"]
- if "ip" in rib_r
+ if "ip" in rib_r and "active" in rib_r
]
# If somehow key "ip" is not found in nexthops JSON
@@ -4942,7 +4947,7 @@ def scapy_send_raw_packet(tgen, topo, senderRouter, intf, packet=None):
sender_interface = intf
rnode = tgen.routers()[senderRouter]
- for destLink, data in topo["routers"][senderRouter]["links"].items():
+ for _, data in topo["routers"][senderRouter]["links"].items():
if "type" in data and data["type"] == "loopback":
continue
diff --git a/tests/topotests/lib/fe_client.py b/tests/topotests/lib/fe_client.py
index a4754463..784f7d17 100755
--- a/tests/topotests/lib/fe_client.py
+++ b/tests/topotests/lib/fe_client.py
@@ -9,7 +9,6 @@
# noqa: E501
#
import argparse
-import json
import logging
import os
import socket
@@ -18,6 +17,8 @@ import sys
import time
from pathlib import Path
+from munet.base import Timeout
+
CWD = os.path.dirname(os.path.realpath(__file__))
# This is painful but works if you have installed protobuf would be better if we
@@ -80,6 +81,13 @@ GET_DATA_FLAG_EXACT = 0x4
MSG_NOTIFY_FMT = "=B7x"
NOTIFY_FIELD_RESULT_TYPE = 0
+MSG_NOTIFY_SELECT_FMT = "=B7x"
+
+MSG_SESSION_REQ_FMT = "=8x"
+
+MSG_SESSION_REPLY_FMT = "=B7x"
+SESSION_REPLY_FIELD_CREATED = 0
+
#
# Native message codes
#
@@ -88,6 +96,9 @@ MSG_CODE_ERROR = 0
MSG_CODE_TREE_DATA = 2
MSG_CODE_GET_DATA = 3
MSG_CODE_NOTIFY = 4
+MSG_CODE_NOTIFY_SELECT = 9
+MSG_CODE_SESSION_REQ = 10
+MSG_CODE_SESSION_REPLY = 11
msg_native_formats = {
MSG_CODE_ERROR: MSG_ERROR_FMT,
@@ -95,6 +106,9 @@ msg_native_formats = {
MSG_CODE_TREE_DATA: MSG_TREE_DATA_FMT,
MSG_CODE_GET_DATA: MSG_GET_DATA_FMT,
MSG_CODE_NOTIFY: MSG_NOTIFY_FMT,
+ MSG_CODE_NOTIFY_SELECT: MSG_NOTIFY_SELECT_FMT,
+ MSG_CODE_SESSION_REQ: MSG_SESSION_REQ_FMT,
+ MSG_CODE_SESSION_REPLY: MSG_SESSION_REPLY_FMT,
}
@@ -177,27 +191,44 @@ class Session:
client_id = 1
- def __init__(self, sock):
+ def __init__(self, sock, use_protobuf):
self.sock = sock
self.next_req_id = 1
- req = mgmt_pb2.FeMessage()
- req.register_req.client_name = "test-client"
- self.send_pb_msg(req)
- logging.debug("Sent FeRegisterReq: %s", req)
+ if use_protobuf:
+ req = mgmt_pb2.FeMessage()
+ req.register_req.client_name = "test-client"
+ self.send_pb_msg(req)
+ logging.debug("Sent FeRegisterReq: %s", req)
- req = mgmt_pb2.FeMessage()
- req.session_req.create = 1
- req.session_req.client_conn_id = Session.client_id
- Session.client_id += 1
- self.send_pb_msg(req)
- logging.debug("Sent FeSessionReq: %s", req)
+ req = mgmt_pb2.FeMessage()
+ req.session_req.create = 1
+ req.session_req.client_conn_id = Session.client_id
+ Session.client_id += 1
+ self.send_pb_msg(req)
+ logging.debug("Sent FeSessionReq: %s", req)
- reply = self.recv_pb_msg(mgmt_pb2.FeMessage())
- logging.debug("Received FeSessionReply: %s", repr(reply))
+ reply = self.recv_pb_msg(mgmt_pb2.FeMessage())
+ logging.debug("Received FeSessionReply: %s", repr(reply))
- assert reply.session_reply.success
- self.sess_id = reply.session_reply.session_id
+ assert reply.session_reply.success
+ self.sess_id = reply.session_reply.session_id
+ else:
+ self.sess_id = 0
+ mdata, _ = self.get_native_msg_header(MSG_CODE_SESSION_REQ)
+ mdata += struct.pack(MSG_SESSION_REQ_FMT)
+ mdata += "test-client".encode("utf-8") + b"\x00"
+
+ self.send_native_msg(mdata)
+ logging.debug("Sent native SESSION-REQ")
+
+ mhdr, mfixed, mdata = self.recv_native_msg()
+ if mhdr[HDR_FIELD_CODE] == MSG_CODE_SESSION_REPLY:
+ logging.debug("Recv native SESSION-REQ Message: %s: %s", mfixed, mdata)
+ else:
+ raise Exception(f"Recv NON-SESSION-REPLY Message: {mfixed}: {mdata}")
+ assert mfixed[0]
+ self.sess_id = mhdr[HDR_FIELD_SESS_ID]
def close(self, clean=True):
if clean:
@@ -292,7 +323,7 @@ class Session:
def get_data(self, query, data=True, config=False):
# Create the message
- mdata, req_id = self.get_native_msg_header(MSG_CODE_GET_DATA)
+ mdata, _ = self.get_native_msg_header(MSG_CODE_GET_DATA)
flags = GET_DATA_FLAG_STATE if data else 0
flags |= GET_DATA_FLAG_CONFIG if config else 0
mdata += struct.pack(MSG_GET_DATA_FMT, MSG_FORMAT_JSON, flags)
@@ -301,24 +332,29 @@ class Session:
self.send_native_msg(mdata)
logging.debug("Sent GET-TREE")
- mhdr, mfixed, mdata = self.recv_native_msg()
+ _, mfixed, mdata = self.recv_native_msg()
assert mdata[-1] == 0
result = mdata[:-1].decode("utf-8")
logging.debug("Received GET: %s: %s", mfixed, mdata)
return result
- # def subscribe(self, notif_xpath):
- # # Create the message
- # mdata, req_id = self.get_native_msg_header(MSG_CODE_SUBSCRIBE)
- # mdata += struct.pack(MSG_SUBSCRIBE_FMT, MSG_FORMAT_JSON)
- # mdata += notif_xpath.encode("utf-8") + b"\x00"
+ def add_notify_select(self, replace, notif_xpaths):
+ # Create the message
+ mdata, _ = self.get_native_msg_header(MSG_CODE_NOTIFY_SELECT)
+ mdata += struct.pack(MSG_NOTIFY_SELECT_FMT, replace)
+
+ for xpath in notif_xpaths:
+ mdata += xpath.encode("utf-8") + b"\x00"
- # self.send_native_msg(mdata)
- # logging.debug("Sent SUBSCRIBE")
+ self.send_native_msg(mdata)
+ logging.debug("Sent NOTIFY_SELECT")
def recv_notify(self, xpaths=None):
- while True:
+ if xpaths:
+ self.add_notify_select(True, xpaths)
+
+ for _ in Timeout(60):
logging.debug("Waiting for Notify Message")
mhdr, mfixed, mdata = self.recv_native_msg()
if mhdr[HDR_FIELD_CODE] == MSG_CODE_NOTIFY:
@@ -328,19 +364,11 @@ class Session:
vsplit = mhdr[HDR_FIELD_VSPLIT]
assert mdata[vsplit - 1] == 0
- xpath = mdata[: vsplit - 1].decode("utf-8")
-
assert mdata[-1] == 0
- result = mdata[vsplit:-1].decode("utf-8")
-
- if not xpaths:
- return result
- js = json.loads(result)
- key = [x for x in js.keys()][0]
- for xpath in xpaths:
- if key.startswith(xpath):
- return result
- logging.debug("'%s' didn't match xpath filters", key)
+ # xpath = mdata[: vsplit - 1].decode("utf-8")
+ return mdata[vsplit:-1].decode("utf-8")
+ else:
+ raise TimeoutError("Timeout waiting for notifications")
def __parse_args():
@@ -365,6 +393,9 @@ def __parse_args():
"-q", "--query", nargs="+", metavar="XPATH", help="xpath[s] to query"
)
parser.add_argument("-s", "--server", default=MPATH, help="path to server socket")
+ parser.add_argument(
+ "--use-protobuf", action="store_true", help="Use protobuf when there's a choice"
+ )
parser.add_argument("-v", "--verbose", action="store_true", help="Be verbose")
args = parser.parse_args()
@@ -381,13 +412,15 @@ def __server_connect(spath):
logging.warn("retry server connection in .5s (%s)", os.strerror(ec))
time.sleep(0.5)
logging.info("Connected to server on %s", spath)
+ # Set a timeout of 5 minutes for socket operations.
+ sock.settimeout(60 * 5)
return sock
def __main():
args = __parse_args()
sock = __server_connect(Path(args.server))
- sess = Session(sock)
+ sess = Session(sock, use_protobuf=args.use_protobuf)
if args.query:
# Performa an xpath query
@@ -412,8 +445,12 @@ def main():
__main()
except KeyboardInterrupt:
logging.info("Exiting")
+ except TimeoutError as error:
+ logging.error("Timeout: %s", error)
+ sys.exit(2)
except Exception as error:
logging.error("Unexpected error exiting: %s", error, exc_info=True)
+ sys.exit(1)
if __name__ == "__main__":
diff --git a/tests/topotests/lib/grpc-query.py b/tests/topotests/lib/grpc-query.py
index 13b63614..cc7b1ad2 100755
--- a/tests/topotests/lib/grpc-query.py
+++ b/tests/topotests/lib/grpc-query.py
@@ -40,7 +40,6 @@ try:
try:
sys.path[0:0] = [tmpdir]
- print(sys.path)
import frr_northbound_pb2
import frr_northbound_pb2_grpc
diff --git a/tests/topotests/lib/ospf.py b/tests/topotests/lib/ospf.py
index 5b18f8b6..2c876e19 100644
--- a/tests/topotests/lib/ospf.py
+++ b/tests/topotests/lib/ospf.py
@@ -1545,7 +1545,7 @@ def verify_ospf_database(
)
return errormsg
if ospf_external_lsa:
- for ospf_ext_lsa, ext_lsa_data in ospf_external_lsa.items():
+ for ospf_ext_lsa, _ in ospf_external_lsa.items():
if ospf_ext_lsa in show_ospf_json["AS External Link States"]:
logger.info(
"[DUT: %s] OSPF LSDB:External LSA %s", router, ospf_ext_lsa
@@ -2509,7 +2509,7 @@ def verify_ospf_gr_helper(tgen, topo, dut, input_dict=None):
raise ValueError(errormsg)
return errormsg
- for ospf_gr, gr_data in input_dict.items():
+ for ospf_gr, _ in input_dict.items():
try:
if input_dict[ospf_gr] == show_ospf_json[ospf_gr]:
logger.info(
diff --git a/tests/topotests/lib/pim.py b/tests/topotests/lib/pim.py
index f7440efd..369a794e 100644
--- a/tests/topotests/lib/pim.py
+++ b/tests/topotests/lib/pim.py
@@ -149,7 +149,7 @@ def _add_pim_rp_config(tgen, topo, input_dict, router, build, config_data_dict):
# At least one interface must be enabled for PIM on the router
pim_if_enabled = False
pim6_if_enabled = False
- for destLink, data in topo[dut]["links"].items():
+ for _, data in topo[dut]["links"].items():
if "pim" in data:
pim_if_enabled = True
if "pim6" in data:
@@ -332,6 +332,13 @@ def create_igmp_config(tgen, topo, input_dict=None, build=False):
cmd = "no {}".format(cmd)
config_data.append(cmd)
+ if attribute == "static-group":
+ for group in data:
+ cmd = "ip {} {} {}".format(protocol, attribute, group)
+ if del_attr:
+ cmd = "no {}".format(cmd)
+ config_data.append(cmd)
+
if attribute == "query":
for query, value in data.items():
if query != "delete":
@@ -603,7 +610,7 @@ def find_rp_details(tgen, topo):
# ip address of RP
rp_addr = rp_dict["rp_addr"]
- for link, data in topo["routers"][router]["links"].items():
+ for _, data in topo["routers"][router]["links"].items():
if data["ipv4"].split("/")[0] == rp_addr:
rp_details[router] = rp_addr
@@ -1600,7 +1607,7 @@ def verify_pim_rp_info(
if type(group_addresses) is not list:
group_addresses = [group_addresses]
- if type(oif) is not list:
+ if oif is not None and type(oif) is not list:
oif = [oif]
for grp in group_addresses:
@@ -1739,6 +1746,49 @@ def verify_pim_rp_info(
@retry(retry_timeout=60, diag_pct=0)
+def verify_pim_rp_info_is_empty(tgen, dut, af="ipv4"):
+ """
+ Verify pim rp info by running "show ip pim rp-info" cli
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `dut`: device under test
+
+ Usage
+ -----
+ dut = "r1"
+ result = verify_pim_rp_info_is_empty(tgen, dut)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ if dut not in tgen.routers():
+ return False
+
+ rnode = tgen.routers()[dut]
+
+ ip_cmd = "ip"
+ if af == "ipv6":
+ ip_cmd = "ipv6"
+
+ logger.info("[DUT: %s]: Verifying %s rp info", dut, ip_cmd)
+ cmd = "show {} pim rp-info json".format(ip_cmd)
+ show_ip_rp_info_json = run_frr_cmd(rnode, cmd, isjson=True)
+
+ if show_ip_rp_info_json:
+ errormsg = "[DUT %s]: Verifying empty rp-info [FAILED]!!" % (dut)
+ return errormsg
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True
+
+
+@retry(retry_timeout=60, diag_pct=0)
def verify_pim_state(
tgen,
dut,
@@ -2089,7 +2139,7 @@ def verify_pim_interface(
)
return True
else:
- for destLink, data in topo["routers"][dut]["links"].items():
+ for _, data in topo["routers"][dut]["links"].items():
if "type" in data and data["type"] == "loopback":
continue
@@ -2292,7 +2342,7 @@ def clear_pim_interfaces(tgen, dut):
# Waiting for maximum 60 sec
fail_intf = []
- for retry in range(1, 13):
+ for _ in range(1, 13):
sleep(5)
logger.info("[DUT: %s]: Waiting for 5 sec for PIM neighbors" " to come up", dut)
run_json_after = run_frr_cmd(rnode, "show ip pim neighbor json", isjson=True)
@@ -2368,7 +2418,7 @@ def clear_igmp_interfaces(tgen, dut):
total_groups_before_clear = igmp_json["totalGroups"]
- for key, value in igmp_json.items():
+ for _, value in igmp_json.items():
if type(value) is not dict:
continue
@@ -2381,7 +2431,7 @@ def clear_igmp_interfaces(tgen, dut):
result = run_frr_cmd(rnode, "clear ip igmp interfaces")
# Waiting for maximum 60 sec
- for retry in range(1, 13):
+ for _ in range(1, 13):
logger.info(
"[DUT: %s]: Waiting for 5 sec for igmp interfaces" " to come up", dut
)
@@ -2404,10 +2454,11 @@ def clear_igmp_interfaces(tgen, dut):
# Verify uptime for groups
for group in group_before_clear.keys():
- d1 = datetime.datetime.strptime(group_before_clear[group], "%H:%M:%S")
- d2 = datetime.datetime.strptime(group_after_clear[group], "%H:%M:%S")
- if d2 >= d1:
- errormsg = ("[DUT: %s]: IGMP group is not cleared", " [FAILED!!]", dut)
+ if group in group_after_clear:
+ d1 = datetime.datetime.strptime(group_before_clear[group], "%H:%M:%S")
+ d2 = datetime.datetime.strptime(group_after_clear[group], "%H:%M:%S")
+ if d2 >= d1:
+ errormsg = ("[DUT: %s]: IGMP group is not cleared", " [FAILED!!]", dut)
logger.info("[DUT: %s]: IGMP group is cleared [PASSED!!]")
@@ -2460,7 +2511,7 @@ def clear_mroute_verify(tgen, dut, expected=True):
# RFC 3376: 8.2. Query Interval - Default: 125 seconds
# So waiting for maximum 130 sec to get the igmp report
- for retry in range(1, 26):
+ for _ in range(1, 26):
logger.info("[DUT: %s]: Waiting for 2 sec for mroutes" " to come up", dut)
sleep(5)
keys_json1 = mroute_json_1.keys()
@@ -2671,7 +2722,7 @@ def add_rp_interfaces_and_pim_config(tgen, topo, interface, rp, rp_mapping):
try:
config_data = []
- for group, rp_list in rp_mapping.items():
+ for _, rp_list in rp_mapping.items():
for _rp in rp_list:
config_data.append("interface {}".format(interface))
config_data.append("ip address {}".format(_rp))
@@ -2720,7 +2771,7 @@ def scapy_send_bsr_raw_packet(tgen, topo, senderRouter, receiverRouter, packet=N
script_path = os.path.join(CWD, "send_bsr_packet.py")
node = tgen.net[senderRouter]
- for destLink, data in topo["routers"][senderRouter]["links"].items():
+ for _, data in topo["routers"][senderRouter]["links"].items():
if "type" in data and data["type"] == "loopback":
continue
@@ -2744,6 +2795,48 @@ def scapy_send_bsr_raw_packet(tgen, topo, senderRouter, receiverRouter, packet=N
return True
+def scapy_send_autorp_raw_packet(tgen, senderRouter, senderInterface, packet=None):
+ """
+ Using scapy Raw() method to send AutoRP raw packet from one FRR
+ to other
+
+ Parameters:
+ -----------
+ * `tgen` : Topogen object
+ * `senderRouter` : Sender router
+ * `senderInterface` : SenderInterface
+ * `packet` : AutoRP packet in raw format
+
+ returns:
+ --------
+ errormsg or True
+ """
+
+ global CWD
+ result = ""
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ python3_path = tgen.net.get_exec_path(["python3", "python"])
+ # send_bsr_packet.py has no direct ties to bsr, just sends a raw packet out
+ # a given interface, so just reuse it
+ script_path = os.path.join(CWD, "send_bsr_packet.py")
+ node = tgen.net[senderRouter]
+
+ cmd = [
+ python3_path,
+ script_path,
+ packet,
+ senderInterface,
+ "--interval=1",
+ "--count=1",
+ ]
+ logger.info("Scapy cmd: \n %s", cmd)
+ node.cmd_raises(cmd)
+
+ logger.debug("Exiting lib API: scapy_send_autorp_raw_packet")
+ return True
+
+
def find_rp_from_bsrp_info(tgen, dut, bsr, grp=None):
"""
Find which RP is having lowest prioriy and returns rp IP
@@ -2795,12 +2888,12 @@ def find_rp_from_bsrp_info(tgen, dut, bsr, grp=None):
# RP with lowest priority
if len(priority_dict) != 1:
- rp_p, lowest_priority = sorted(rp_priority.items(), key=lambda x: x[1])[0]
+ rp_p, _ = sorted(rp_priority.items(), key=lambda x: x[1])[0]
rp_details[group] = rp_p
# RP with highest hash value
if len(priority_dict) == 1:
- rp_h, highest_hash = sorted(rp_hash.items(), key=lambda x: x[1])[-1]
+ rp_h, _ = sorted(rp_hash.items(), key=lambda x: x[1])[-1]
rp_details[group] = rp_h
# RP with highest IP address
@@ -3239,7 +3332,7 @@ def verify_pim_join(
interface_json = show_pim_join_json[interface]
grp_addr = grp_addr.split("/")[0]
- for source, data in interface_json[grp_addr].items():
+ for _, data in interface_json[grp_addr].items():
# Verify pim join
if pim_join:
if data["group"] == grp_addr and data["channelJoinName"] == "JOIN":
@@ -4254,6 +4347,143 @@ def verify_local_igmp_groups(tgen, dut, interface, group_addresses):
return True
+@retry(retry_timeout=62)
+def verify_static_groups(tgen, dut, interface, group_addresses):
+ """
+ Verify static groups are received from an intended interface
+ by running "show ip igmp static-group json" command
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `dut`: device under test
+ * `interface`: interface, from which IGMP groups are configured
+ * `group_addresses`: IGMP group address
+
+ Usage
+ -----
+ dut = "r1"
+ interface = "r1-r0-eth0"
+ group_address = "225.1.1.1"
+ result = verify_static_groups(tgen, dut, interface, group_address)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ logger.debug("Entering lib API: {}".format(sys._getframe().f_code.co_name))
+
+ if dut not in tgen.routers():
+ return False
+
+ rnode = tgen.routers()[dut]
+
+ logger.info("[DUT: %s]: Verifying static groups received:", dut)
+ show_static_group_json = run_frr_cmd(
+ rnode, "show ip igmp static-group json", isjson=True
+ )
+
+ if type(group_addresses) is not list:
+ group_addresses = [group_addresses]
+
+ if interface not in show_static_group_json:
+ errormsg = (
+ "[DUT %s]: Verifying static group received"
+ " from interface %s [FAILED]!! " % (dut, interface)
+ )
+ return errormsg
+
+ for grp_addr in group_addresses:
+ found = False
+ for index in show_static_group_json[interface]["groups"]:
+ if index["group"] == grp_addr:
+ found = True
+ break
+ if not found:
+ errormsg = (
+ "[DUT %s]: Verifying static group received"
+ " from interface %s [FAILED]!! "
+ " Expected: %s " % (dut, interface, grp_addr)
+ )
+ return errormsg
+
+ logger.info(
+ "[DUT %s]: Verifying static group %s received "
+ "from interface %s [PASSED]!! ",
+ dut,
+ grp_addr,
+ interface,
+ )
+
+ logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name))
+ return True
+
+
+@retry(retry_timeout=62)
+def verify_local_igmp_proxy_groups(
+ tgen, dut, group_addresses_present, group_addresses_not_present
+):
+ """
+ Verify igmp proxy groups are as expected by running
+ "show ip igmp static-group json" command
+
+ Parameters
+ ----------
+ * `tgen`: topogen object
+ * `dut`: device under test
+ * `group_addresses_present`: IGMP group addresses which should
+ currently be proxied
+ * `group_addresses_not_present`: IGMP group addresses which should
+ not currently be proxied
+
+ Usage
+ -----
+ dut = "r1"
+ group_addresses_present = "225.1.1.1"
+ group_addresses_not_present = "225.2.2.2"
+ result = verify_igmp_proxy_groups(tgen, dut, group_p, group_np)
+
+ Returns
+ -------
+ errormsg(str) or True
+ """
+
+ if dut not in tgen.routers():
+ errormsg = "[DUT %s]: Device not found!"
+ return errormsg
+
+ rnode = tgen.routers()[dut]
+
+ logger.info("[DUT: %s]: Verifying local IGMP proxy groups:", dut)
+
+ out = rnode.vtysh_cmd("show ip igmp proxy json", isjson=True)
+ groups = [g["group"] if "group" in g else None for g in out["r1-eth1"]["groups"]]
+
+ if type(group_addresses_present) is not list:
+ group_addresses_present = [group_addresses_present]
+ if type(group_addresses_not_present) is not list:
+ group_addresses_not_present = [group_addresses_not_present]
+
+ for test_addr in group_addresses_present:
+ if not test_addr in groups:
+ errormsg = (
+ "[DUT %s]: Verifying local IGMP proxy joins FAILED!! "
+ " Expected but not found: %s " % (dut, test_addr)
+ )
+ return errormsg
+
+ for test_addr in group_addresses_not_present:
+ if test_addr in groups:
+ errormsg = (
+ "[DUT %s]: Verifying local IGMP proxy join removed FAILED!! "
+ " Unexpected but found: %s " % (dut, test_addr)
+ )
+ return errormsg
+
+ return True
+
+
def verify_pim_interface_traffic(tgen, input_dict, return_stats=True, addr_type="ipv4"):
"""
Verify ip pim interface traffic by running
diff --git a/tests/topotests/lib/topogen.py b/tests/topotests/lib/topogen.py
index f49e30ea..b1da2963 100644
--- a/tests/topotests/lib/topogen.py
+++ b/tests/topotests/lib/topogen.py
@@ -492,7 +492,16 @@ class Topogen(object):
"Errors found post shutdown - details follow: {}".format(errors)
)
- self.net.stop()
+ try:
+ self.net.stop()
+
+ except OSError as error:
+ # OSError exception is raised when mininet tries to stop switch
+ # though switch is stopped once but mininet tries to stop same
+ # switch again, where it ended up with exception
+
+ logger.info(error)
+ logger.info("Exception ignored: switch is already stopped")
def get_exabgp_cmd(self):
if not self.exabgp_cmd:
@@ -824,6 +833,8 @@ class TopoRouter(TopoGear):
Loads the unified configuration file source
Start the daemons in the list
If daemons is None, try to infer daemons from the config file
+ `daemons` is a tuple (daemon, param) of daemons to start, e.g.:
+ (TopoRouter.RD_ZEBRA, "-s 90000000").
"""
source_path = self.load_config(self.RD_FRR, source)
if not daemons:
@@ -832,16 +843,17 @@ class TopoRouter(TopoGear):
for daemon in self.RD:
# This will not work for all daemons
daemonstr = self.RD.get(daemon).rstrip("d")
- if daemonstr == "pim":
- grep_cmd = "grep 'ip {}' {}".format(daemonstr, source_path)
+ if daemonstr == "path":
+ grep_cmd = "grep 'candidate-path' {}".format(source_path)
else:
- grep_cmd = "grep 'router {}' {}".format(daemonstr, source_path)
+ grep_cmd = "grep -w '{}' {}".format(daemonstr, source_path)
result = self.run(grep_cmd, warn=False).strip()
if result:
self.load_config(daemon, "")
else:
- for daemon in daemons:
- self.load_config(daemon, "")
+ for item in daemons:
+ daemon, param = item
+ self.load_config(daemon, "", param)
def load_config(self, daemon, source=None, param=None):
"""Loads daemon configuration from the specified source
diff --git a/tests/topotests/lib/topotest.py b/tests/topotests/lib/topotest.py
index 087d8454..bd989583 100644
--- a/tests/topotests/lib/topotest.py
+++ b/tests/topotests/lib/topotest.py
@@ -396,6 +396,9 @@ def run_and_expect(func, what, count=20, wait=3):
waiting `wait` seconds between tries. By default it tries 20 times with
3 seconds delay between tries.
+ Changing default count/wait values, please change them below also for
+ `minimum_wait`, and `minimum_count`.
+
Returns (True, func-return) on success or
(False, func-return) on failure.
@@ -414,13 +417,18 @@ def run_and_expect(func, what, count=20, wait=3):
# Just a safety-check to avoid running topotests with very
# small wait/count arguments.
+ # If too low count/wait values are defined, override them
+ # with the minimum values.
+ minimum_count = 20
+ minimum_wait = 3
+ minimum_wait_time = 15 # The overall minimum seconds for the test to wait
wait_time = wait * count
- if wait_time < 5:
- assert (
- wait_time >= 5
- ), "Waiting time is too small (count={}, wait={}), adjust timer values".format(
- count, wait
+ if wait_time < minimum_wait_time:
+ logger.warning(
+ f"Waiting time is too small (count={count}, wait={wait}), using default values (count={minimum_count}, wait={minimum_wait})"
)
+ count = minimum_count
+ wait = minimum_wait
logger.debug(
"'{}' polling started (interval {} secs, maximum {} tries)".format(
@@ -602,6 +610,30 @@ def is_linux():
return False
+def iproute2_is_json_capable():
+ """
+ Checks if the iproute2 version installed on the system is capable of
+ handling JSON outputss
+
+ Returns True if capability can be detected, returns False otherwise.
+ """
+ if is_linux():
+ try:
+ subp = subprocess.Popen(
+ ["ip", "-json", "route", "show"],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ stdin=subprocess.PIPE,
+ )
+ iproute2_err = subp.communicate()[1].splitlines()[0].split()[0]
+
+ if iproute2_err != "Error:":
+ return True
+ except Exception:
+ pass
+ return False
+
+
def iproute2_is_vrf_capable():
"""
Checks if the iproute2 version installed on the system is capable of
@@ -1212,8 +1244,8 @@ def _sysctl_assure(commander, variable, value):
def sysctl_atleast(commander, variable, min_value, raises=False):
try:
if commander is None:
- logger = logging.getLogger("topotest")
- commander = micronet.Commander("sysctl", logger=logger)
+ topotest_logger = logging.getLogger("topotest")
+ commander = micronet.Commander("sysctl", logger=topotest_logger)
return _sysctl_atleast(commander, variable, min_value)
except subprocess.CalledProcessError as error:
@@ -1230,8 +1262,8 @@ def sysctl_atleast(commander, variable, min_value, raises=False):
def sysctl_assure(commander, variable, value, raises=False):
try:
if commander is None:
- logger = logging.getLogger("topotest")
- commander = micronet.Commander("sysctl", logger=logger)
+ topotest_logger = logging.getLogger("topotest")
+ commander = micronet.Commander("sysctl", logger=topotest_logger)
return _sysctl_assure(commander, variable, value)
except subprocess.CalledProcessError as error:
logger.warning(
@@ -1406,7 +1438,7 @@ class Router(Node):
self.daemondir = None
self.hasmpls = False
self.routertype = "frr"
- self.unified_config = None
+ self.unified_config = False
self.daemons = {
"zebra": 0,
"ripd": 0,
@@ -1629,7 +1661,7 @@ class Router(Node):
# print "Daemons before:", self.daemons
if daemon in self.daemons.keys() or daemon == "frr":
if daemon == "frr":
- self.unified_config = 1
+ self.unified_config = True
else:
self.daemons[daemon] = 1
if param is not None: