summaryrefslogtreecommitdiffstats
path: root/mgmtd
diff options
context:
space:
mode:
authorDaniel Baumann <daniel@debian.org>2024-11-09 14:26:35 +0100
committerDaniel Baumann <daniel@debian.org>2024-11-09 14:26:35 +0100
commit47e4d7c791a050deb06e6c0fdfcac94a782a7cb9 (patch)
tree19edcac0f5dbda32bc329fa68773254fb2c488c3 /mgmtd
parentInitial commit. (diff)
downloadfrr-47e4d7c791a050deb06e6c0fdfcac94a782a7cb9.tar.xz
frr-47e4d7c791a050deb06e6c0fdfcac94a782a7cb9.zip
Adding upstream version 10.1.1.upstream/10.1.1
Signed-off-by: Daniel Baumann <daniel@debian.org>
Diffstat (limited to 'mgmtd')
-rw-r--r--mgmtd/.gitignore1
-rw-r--r--mgmtd/Makefile10
-rw-r--r--mgmtd/mgmt.c86
-rw-r--r--mgmtd/mgmt.h104
-rw-r--r--mgmtd/mgmt_be_adapter.c1125
-rw-r--r--mgmtd/mgmt_be_adapter.h268
-rw-r--r--mgmtd/mgmt_be_nb.c6
-rw-r--r--mgmtd/mgmt_ds.c529
-rw-r--r--mgmtd/mgmt_ds.h336
-rw-r--r--mgmtd/mgmt_fe_adapter.c2036
-rw-r--r--mgmtd/mgmt_fe_adapter.h241
-rw-r--r--mgmtd/mgmt_history.c386
-rw-r--r--mgmtd/mgmt_history.h97
-rw-r--r--mgmtd/mgmt_main.c286
-rw-r--r--mgmtd/mgmt_memory.c36
-rw-r--r--mgmtd/mgmt_memory.h32
-rw-r--r--mgmtd/mgmt_testc.c266
-rw-r--r--mgmtd/mgmt_txn.c2946
-rw-r--r--mgmtd/mgmt_txn.h363
-rw-r--r--mgmtd/mgmt_vty.c738
-rw-r--r--mgmtd/subdir.am107
21 files changed, 9999 insertions, 0 deletions
diff --git a/mgmtd/.gitignore b/mgmtd/.gitignore
new file mode 100644
index 00000000..7ce107e9
--- /dev/null
+++ b/mgmtd/.gitignore
@@ -0,0 +1 @@
+mgmtd
diff --git a/mgmtd/Makefile b/mgmtd/Makefile
new file mode 100644
index 00000000..d69ec5f6
--- /dev/null
+++ b/mgmtd/Makefile
@@ -0,0 +1,10 @@
+all: ALWAYS
+ @$(MAKE) -s -C .. mgmtd/mgmtd
+%: ALWAYS
+ @$(MAKE) -s -C .. mgmtd/$@
+
+Makefile:
+ #nothing
+ALWAYS:
+.PHONY: ALWAYS makefiles
+.SUFFIXES:
diff --git a/mgmtd/mgmt.c b/mgmtd/mgmt.c
new file mode 100644
index 00000000..8d416430
--- /dev/null
+++ b/mgmtd/mgmt.c
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * FRR Management Daemon (MGMTD) program
+ *
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar
+ */
+
+#include <zebra.h>
+#include "debug.h"
+#include "mgmtd/mgmt.h"
+#include "mgmtd/mgmt_be_adapter.h"
+#include "mgmtd/mgmt_ds.h"
+#include "mgmtd/mgmt_fe_adapter.h"
+#include "mgmtd/mgmt_history.h"
+#include "mgmtd/mgmt_memory.h"
+
+struct debug mgmt_debug_be = {.desc = "Management backend adapater"};
+struct debug mgmt_debug_ds = {.desc = "Management datastore"};
+struct debug mgmt_debug_fe = {.desc = "Management frontend adapater"};
+struct debug mgmt_debug_txn = {.desc = "Management transaction"};
+
+/* MGMTD process wide configuration. */
+static struct mgmt_master mgmt_master;
+
+/* MGMTD process wide configuration pointer to export. */
+struct mgmt_master *mm;
+
+void mgmt_master_init(struct event_loop *master, const int buffer_size)
+{
+ memset(&mgmt_master, 0, sizeof(struct mgmt_master));
+
+ mm = &mgmt_master;
+ mm->master = master;
+ mm->terminating = false;
+ mm->socket_buffer = buffer_size;
+ mm->perf_stats_en = true;
+}
+
+void mgmt_init(void)
+{
+
+ /* Initialize datastores */
+ mgmt_ds_init(mm);
+
+ /* Initialize history */
+ mgmt_history_init();
+
+ /* Initialize MGMTD Transaction module */
+ mgmt_txn_init(mm, mm->master);
+
+ /* Initialize the MGMTD Frontend Adapter Module */
+ mgmt_fe_adapter_init(mm->master);
+
+ /*
+ * Initialize the CLI frontend client -- this queues an event for the
+ * client to short-circuit connect to the server (ourselves).
+ */
+ vty_init_mgmt_fe();
+
+ /*
+ * MGMTD VTY commands installation -- the frr lib code will queue an
+ * event to read the config files which needs to happen after the
+ * connect from above is made.
+ */
+ mgmt_vty_init();
+
+ /*
+ * Initialize the MGMTD Backend Adapter Module
+ *
+ * We do this after the FE stuff so that we have read our config file
+ * prior to any BE connection. Setting up the server will queue a
+ * "socket read" event to accept BE connections. So the code is counting
+ * on the above 2 events to run prior to any `accept` event from here.
+ */
+ mgmt_be_adapter_init(mm->master);
+}
+
+void mgmt_terminate(void)
+{
+ mgmt_fe_adapter_destroy();
+ mgmt_be_adapter_destroy();
+ mgmt_txn_destroy();
+ mgmt_history_destroy();
+ mgmt_ds_destroy();
+}
diff --git a/mgmtd/mgmt.h b/mgmtd/mgmt.h
new file mode 100644
index 00000000..665e8d8f
--- /dev/null
+++ b/mgmtd/mgmt.h
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MGMTD message definition header.
+ *
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar <spushpasis@vmware.com>
+ */
+
+#ifndef _FRR_MGMTD_H
+#define _FRR_MGMTD_H
+
+#include "debug.h"
+#include "vrf.h"
+#include "defaults.h"
+#include "stream.h"
+#include "mgmt_defines.h"
+
+#include "mgmtd/mgmt_memory.h"
+#include "mgmtd/mgmt_history.h"
+#include "mgmtd/mgmt_txn.h"
+#include "mgmtd/mgmt_ds.h"
+
+#define MGMTD_SOCKET_BUF_SIZE 65535
+#define MGMTD_MAX_COMMIT_LIST 10
+
+extern struct debug mgmt_debug_be;
+extern struct debug mgmt_debug_ds;
+extern struct debug mgmt_debug_fe;
+extern struct debug mgmt_debug_txn;
+
+#define MGMT_DEBUG_BE_CHECK() DEBUG_MODE_CHECK(&mgmt_debug_be, DEBUG_MODE_ALL)
+#define MGMT_DEBUG_DS_CHECK() DEBUG_MODE_CHECK(&mgmt_debug_ds, DEBUG_MODE_ALL)
+#define MGMT_DEBUG_FE_CHECK() DEBUG_MODE_CHECK(&mgmt_debug_fe, DEBUG_MODE_ALL)
+#define MGMT_DEBUG_TXN_CHECK() DEBUG_MODE_CHECK(&mgmt_debug_tx, DEBUG_MODE_ALL)
+
+struct mgmt_txn_ctx;
+
+/*
+ * MGMTD master for system wide configurations and variables.
+ */
+struct mgmt_master {
+ struct event_loop *master;
+
+ /* How big should we set the socket buffer size */
+ uint32_t socket_buffer;
+
+ /* The single instance of config transaction allowed at any time */
+ struct mgmt_txns_head txn_list;
+
+ /* Map of Transactions and its ID */
+ struct hash *txn_hash;
+ uint64_t next_txn_id;
+
+ /* The single instance of config transaction allowed at any time */
+ struct mgmt_txn_ctx *cfg_txn;
+
+ /* Datastores */
+ struct mgmt_ds_ctx *running_ds;
+ struct mgmt_ds_ctx *candidate_ds;
+ struct mgmt_ds_ctx *oper_ds;
+
+ bool terminating; /* global flag that sigint terminate seen */
+ bool perf_stats_en; /* to enable performance stats measurement */
+
+ /* List of commit infos */
+ struct mgmt_cmt_infos_head cmts; /* List of last 10 commits executed. */
+};
+
+extern struct mgmt_master *mm;
+
+/* Inline functions */
+
+/*
+ * Remove trailing separator from a string.
+ *
+ * str
+ * A null terminated string.
+ *
+ * sep
+ * Trailing character that needs to be removed.
+ */
+static inline void mgmt_remove_trailing_separator(char *str, char sep)
+{
+ size_t len;
+
+ len = strlen(str);
+ if (len && str[len - 1] == sep)
+ str[len - 1] = '\0';
+}
+
+/* Prototypes. */
+extern void mgmt_terminate(void);
+extern void mgmt_reset(void);
+extern time_t mgmt_clock(void);
+
+extern int mgmt_config_write(struct vty *vty);
+extern struct vty *mgmt_vty_read_config(const char *config_file,
+ char *config_default_dir);
+extern void mgmt_master_init(struct event_loop *master, const int buffer_size);
+
+extern void mgmt_init(void);
+extern void mgmt_vty_init(void);
+
+#endif /* _FRR_MGMTD_H */
diff --git a/mgmtd/mgmt_be_adapter.c b/mgmtd/mgmt_be_adapter.c
new file mode 100644
index 00000000..c7f4fb9d
--- /dev/null
+++ b/mgmtd/mgmt_be_adapter.c
@@ -0,0 +1,1125 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MGMTD Backend Client Connection Adapter
+ *
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar <spushpasis@vmware.com>
+ * Copyright (c) 2023, LabN Consulting, L.L.C.
+ */
+
+#include <zebra.h>
+#include "darr.h"
+#include "frrevent.h"
+#include "frrstr.h"
+#include "sockopt.h"
+#include "network.h"
+#include "libfrr.h"
+#include "mgmt_msg.h"
+#include "mgmt_msg_native.h"
+#include "mgmt_pb.h"
+#include "mgmtd/mgmt.h"
+#include "mgmtd/mgmt_memory.h"
+#include "mgmt_be_client.h"
+#include "mgmtd/mgmt_be_adapter.h"
+
+#define __dbg(fmt, ...) \
+ DEBUGD(&mgmt_debug_be, "BE-ADAPTER: %s: " fmt, __func__, ##__VA_ARGS__)
+#define __log_err(fmt, ...) \
+ zlog_err("BE-ADAPTER: %s: ERROR: " fmt, __func__, ##__VA_ARGS__)
+
+#define FOREACH_ADAPTER_IN_LIST(adapter) \
+ frr_each_safe (mgmt_be_adapters, &mgmt_be_adapters, (adapter))
+
+/* ---------- */
+/* Client IDs */
+/* ---------- */
+
+const char *mgmt_be_client_names[MGMTD_BE_CLIENT_ID_MAX + 1] = {
+ [MGMTD_BE_CLIENT_ID_TESTC] = "mgmtd-testc", /* always first */
+ [MGMTD_BE_CLIENT_ID_ZEBRA] = "zebra",
+#ifdef HAVE_RIPD
+ [MGMTD_BE_CLIENT_ID_RIPD] = "ripd",
+#endif
+#ifdef HAVE_RIPNGD
+ [MGMTD_BE_CLIENT_ID_RIPNGD] = "ripngd",
+#endif
+#ifdef HAVE_STATICD
+ [MGMTD_BE_CLIENT_ID_STATICD] = "staticd",
+#endif
+ [MGMTD_BE_CLIENT_ID_MAX] = "Unknown/Invalid",
+};
+
+/* ------------- */
+/* XPATH MAPPING */
+/* ------------- */
+
+/*
+ * Mapping of YANG XPath prefixes to their corresponding backend clients.
+ */
+struct mgmt_be_xpath_map {
+ char *xpath_prefix;
+ uint64_t clients;
+};
+
+/*
+ * Each client gets their own map, but also union all the strings into the
+ * above map as well.
+ */
+
+static const char *const zebra_config_xpaths[] = {
+ "/frr-affinity-map:lib",
+ "/frr-filter:lib",
+ "/frr-route-map:lib",
+ "/frr-zebra:zebra",
+ "/frr-interface:lib",
+ "/frr-vrf:lib",
+ NULL,
+};
+
+static const char *const zebra_oper_xpaths[] = {
+ "/frr-interface:lib/interface",
+ "/frr-vrf:lib/vrf/frr-zebra:zebra",
+ "/frr-zebra:zebra",
+ NULL,
+};
+
+#if HAVE_RIPD
+static const char *const ripd_config_xpaths[] = {
+ "/frr-filter:lib",
+ "/frr-interface:lib/interface",
+ "/frr-ripd:ripd",
+ "/frr-route-map:lib",
+ "/frr-vrf:lib",
+ "/ietf-key-chain:key-chains",
+ NULL,
+};
+static const char *const ripd_oper_xpaths[] = {
+ "/frr-ripd:ripd",
+ "/ietf-key-chain:key-chains",
+ NULL,
+};
+static const char *const ripd_rpc_xpaths[] = {
+ "/frr-ripd",
+ NULL,
+};
+#endif
+
+#if HAVE_RIPNGD
+static const char *const ripngd_config_xpaths[] = {
+ "/frr-filter:lib",
+ "/frr-interface:lib/interface",
+ "/frr-ripngd:ripngd",
+ "/frr-route-map:lib",
+ "/frr-vrf:lib",
+ NULL,
+};
+static const char *const ripngd_oper_xpaths[] = {
+ "/frr-ripngd:ripngd",
+ NULL,
+};
+static const char *const ripngd_rpc_xpaths[] = {
+ "/frr-ripngd",
+ NULL,
+};
+#endif
+
+#if HAVE_STATICD
+static const char *const staticd_config_xpaths[] = {
+ "/frr-vrf:lib",
+ "/frr-interface:lib",
+ "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd",
+ NULL,
+};
+#endif
+
+static const char *const *be_client_config_xpaths[MGMTD_BE_CLIENT_ID_MAX] = {
+ [MGMTD_BE_CLIENT_ID_ZEBRA] = zebra_config_xpaths,
+#ifdef HAVE_RIPD
+ [MGMTD_BE_CLIENT_ID_RIPD] = ripd_config_xpaths,
+#endif
+#ifdef HAVE_RIPNGD
+ [MGMTD_BE_CLIENT_ID_RIPNGD] = ripngd_config_xpaths,
+#endif
+#ifdef HAVE_STATICD
+ [MGMTD_BE_CLIENT_ID_STATICD] = staticd_config_xpaths,
+#endif
+};
+
+static const char *const *be_client_oper_xpaths[MGMTD_BE_CLIENT_ID_MAX] = {
+#ifdef HAVE_RIPD
+ [MGMTD_BE_CLIENT_ID_RIPD] = ripd_oper_xpaths,
+#endif
+#ifdef HAVE_RIPNGD
+ [MGMTD_BE_CLIENT_ID_RIPNGD] = ripngd_oper_xpaths,
+#endif
+ [MGMTD_BE_CLIENT_ID_ZEBRA] = zebra_oper_xpaths,
+};
+
+static const char *const *be_client_notif_xpaths[MGMTD_BE_CLIENT_ID_MAX] = {
+};
+
+static const char *const *be_client_rpc_xpaths[MGMTD_BE_CLIENT_ID_MAX] = {
+#ifdef HAVE_RIPD
+ [MGMTD_BE_CLIENT_ID_RIPD] = ripd_rpc_xpaths,
+#endif
+#ifdef HAVE_RIPNGD
+ [MGMTD_BE_CLIENT_ID_RIPNGD] = ripngd_rpc_xpaths,
+#endif
+};
+
+/*
+ * We would like to have a better ADT than one with O(n) comparisons
+ *
+ * Perhaps it's possible to sort this array in a way that allows binary search
+ * to find the start, then walk until no possible match can follow? Intuition
+ * says this probably involves exact match/no-match on a stem in the map array
+ * or something like that.
+ */
+
+static struct mgmt_be_xpath_map *be_cfg_xpath_map;
+static struct mgmt_be_xpath_map *be_oper_xpath_map;
+static struct mgmt_be_xpath_map *be_notif_xpath_map;
+static struct mgmt_be_xpath_map *be_rpc_xpath_map;
+
+static struct event_loop *mgmt_loop;
+static struct msg_server mgmt_be_server = {.fd = -1};
+
+static struct mgmt_be_adapters_head mgmt_be_adapters;
+
+static struct mgmt_be_client_adapter
+ *mgmt_be_adapters_by_id[MGMTD_BE_CLIENT_ID_MAX];
+
+
+/* Forward declarations */
+static void
+mgmt_be_adapter_sched_init_event(struct mgmt_be_client_adapter *adapter);
+
+static bool be_is_client_interested(const char *xpath, enum mgmt_be_client_id id,
+ enum mgmt_be_xpath_subscr_type type);
+
+const char *mgmt_be_client_id2name(enum mgmt_be_client_id id)
+{
+ if (id > MGMTD_BE_CLIENT_ID_MAX)
+ return "invalid client id";
+ return mgmt_be_client_names[id];
+}
+
+static enum mgmt_be_client_id mgmt_be_client_name2id(const char *name)
+{
+ enum mgmt_be_client_id id;
+
+ FOREACH_MGMTD_BE_CLIENT_ID (id) {
+ if (!strncmp(mgmt_be_client_names[id], name,
+ MGMTD_CLIENT_NAME_MAX_LEN))
+ return id;
+ }
+
+ return MGMTD_BE_CLIENT_ID_MAX;
+}
+
+static struct mgmt_be_client_adapter *
+mgmt_be_find_adapter_by_fd(int conn_fd)
+{
+ struct mgmt_be_client_adapter *adapter;
+
+ FOREACH_ADAPTER_IN_LIST (adapter) {
+ if (adapter->conn->fd == conn_fd)
+ return adapter;
+ }
+
+ return NULL;
+}
+
+static struct mgmt_be_client_adapter *
+mgmt_be_find_adapter_by_name(const char *name)
+{
+ struct mgmt_be_client_adapter *adapter;
+
+ FOREACH_ADAPTER_IN_LIST (adapter) {
+ if (!strncmp(adapter->name, name, sizeof(adapter->name)))
+ return adapter;
+ }
+
+ return NULL;
+}
+
+static void mgmt_register_client_xpath(enum mgmt_be_client_id id,
+ const char *xpath,
+ enum mgmt_be_xpath_subscr_type type)
+{
+ struct mgmt_be_xpath_map **maps, *map;
+
+ switch (type) {
+ case MGMT_BE_XPATH_SUBSCR_TYPE_CFG:
+ maps = &be_cfg_xpath_map;
+ break;
+ case MGMT_BE_XPATH_SUBSCR_TYPE_OPER:
+ maps = &be_oper_xpath_map;
+ break;
+ case MGMT_BE_XPATH_SUBSCR_TYPE_NOTIF:
+ maps = &be_notif_xpath_map;
+ break;
+ case MGMT_BE_XPATH_SUBSCR_TYPE_RPC:
+ maps = &be_rpc_xpath_map;
+ break;
+ }
+
+ darr_foreach_p (*maps, map) {
+ if (!strcmp(xpath, map->xpath_prefix)) {
+ map->clients |= (1u << id);
+ return;
+ }
+ }
+ /* we didn't find a matching entry */
+ map = darr_append(*maps);
+ map->xpath_prefix = XSTRDUP(MTYPE_MGMTD_XPATH, xpath);
+ map->clients = (1ul << id);
+}
+
+/*
+ * initial the combined maps from per client maps
+ */
+static void mgmt_be_xpath_map_init(void)
+{
+ enum mgmt_be_client_id id;
+ const char *const *init;
+
+ __dbg("Init XPath Maps");
+
+ FOREACH_MGMTD_BE_CLIENT_ID (id) {
+ /* Initialize the common config init map */
+ for (init = be_client_config_xpaths[id]; init && *init; init++) {
+ __dbg(" - CFG XPATH: '%s'", *init);
+ mgmt_register_client_xpath(id, *init,
+ MGMT_BE_XPATH_SUBSCR_TYPE_CFG);
+ }
+
+ /* Initialize the common oper init map */
+ for (init = be_client_oper_xpaths[id]; init && *init; init++) {
+ __dbg(" - OPER XPATH: '%s'", *init);
+ mgmt_register_client_xpath(id, *init,
+ MGMT_BE_XPATH_SUBSCR_TYPE_OPER);
+ }
+
+ /* Initialize the common NOTIF init map */
+ for (init = be_client_notif_xpaths[id]; init && *init; init++) {
+ __dbg(" - NOTIF XPATH: '%s'", *init);
+ mgmt_register_client_xpath(id, *init,
+ MGMT_BE_XPATH_SUBSCR_TYPE_NOTIF);
+ }
+
+ /* Initialize the common RPC init map */
+ for (init = be_client_rpc_xpaths[id]; init && *init; init++) {
+ __dbg(" - RPC XPATH: '%s'", *init);
+ mgmt_register_client_xpath(id, *init,
+ MGMT_BE_XPATH_SUBSCR_TYPE_RPC);
+ }
+ }
+
+ __dbg("Total Cfg XPath Maps: %u", darr_len(be_cfg_xpath_map));
+ __dbg("Total Oper XPath Maps: %u", darr_len(be_oper_xpath_map));
+ __dbg("Total Noitf XPath Maps: %u", darr_len(be_notif_xpath_map));
+ __dbg("Total RPC XPath Maps: %u", darr_len(be_rpc_xpath_map));
+}
+
+static void mgmt_be_xpath_map_cleanup(void)
+{
+ struct mgmt_be_xpath_map *map;
+
+ darr_foreach_p (be_cfg_xpath_map, map)
+ XFREE(MTYPE_MGMTD_XPATH, map->xpath_prefix);
+ darr_free(be_cfg_xpath_map);
+
+ darr_foreach_p (be_oper_xpath_map, map)
+ XFREE(MTYPE_MGMTD_XPATH, map->xpath_prefix);
+ darr_free(be_oper_xpath_map);
+
+ darr_foreach_p (be_notif_xpath_map, map)
+ XFREE(MTYPE_MGMTD_XPATH, map->xpath_prefix);
+ darr_free(be_notif_xpath_map);
+
+ darr_foreach_p (be_rpc_xpath_map, map)
+ XFREE(MTYPE_MGMTD_XPATH, map->xpath_prefix);
+ darr_free(be_rpc_xpath_map);
+}
+
+
+/*
+ * Check if either path or xpath is a prefix of the other. Before checking the
+ * xpath is converted to a regular path string (e..g, removing key value
+ * specifiers).
+ */
+static bool mgmt_be_xpath_prefix(const char *path, const char *xpath)
+{
+ int xc, pc;
+
+ while ((xc = *xpath++)) {
+ if (xc == '[') {
+ xpath = frrstr_skip_over_char(xpath, ']');
+ if (!xpath)
+ return false;
+ continue;
+ }
+ pc = *path++;
+ if (!pc)
+ return true;
+ if (pc != xc)
+ return false;
+ }
+ return true;
+}
+
+static void mgmt_be_adapter_delete(struct mgmt_be_client_adapter *adapter)
+{
+ __dbg("deleting client adapter '%s'", adapter->name);
+
+ /*
+ * Notify about disconnect for appropriate cleanup
+ */
+ mgmt_txn_notify_be_adapter_conn(adapter, false);
+ if (adapter->id < MGMTD_BE_CLIENT_ID_MAX) {
+ mgmt_be_adapters_by_id[adapter->id] = NULL;
+ adapter->id = MGMTD_BE_CLIENT_ID_MAX;
+ }
+
+ assert(adapter->refcount == 1);
+ mgmt_be_adapter_unlock(&adapter);
+}
+
+static int mgmt_be_adapter_notify_disconnect(struct msg_conn *conn)
+{
+ struct mgmt_be_client_adapter *adapter = conn->user;
+
+ __dbg("notify disconnect for client adapter '%s'", adapter->name);
+
+ mgmt_be_adapter_delete(adapter);
+
+ return 0;
+}
+
+static void
+mgmt_be_adapter_cleanup_old_conn(struct mgmt_be_client_adapter *adapter)
+{
+ struct mgmt_be_client_adapter *old;
+
+ FOREACH_ADAPTER_IN_LIST (old) {
+ if (old != adapter &&
+ !strncmp(adapter->name, old->name, sizeof(adapter->name))) {
+ /*
+ * We have a Zombie lingering around
+ */
+ __dbg("Client '%s' (FD:%d) seems to have reconnected. Removing old connection (FD:%d)!",
+ adapter->name, adapter->conn->fd, old->conn->fd);
+ /* this will/should delete old */
+ msg_conn_disconnect(old->conn, false);
+ }
+ }
+}
+
+static int mgmt_be_adapter_send_msg(struct mgmt_be_client_adapter *adapter,
+ Mgmtd__BeMessage *be_msg)
+{
+ return msg_conn_send_msg(
+ adapter->conn, MGMT_MSG_VERSION_PROTOBUF, be_msg,
+ mgmtd__be_message__get_packed_size(be_msg),
+ (size_t(*)(void *, void *))mgmtd__be_message__pack, false);
+}
+
+static int mgmt_be_send_subscr_reply(struct mgmt_be_client_adapter *adapter,
+ bool success)
+{
+ Mgmtd__BeMessage be_msg;
+ Mgmtd__BeSubscribeReply reply;
+
+ mgmtd__be_subscribe_reply__init(&reply);
+ reply.success = success;
+
+ mgmtd__be_message__init(&be_msg);
+ be_msg.message_case = MGMTD__BE_MESSAGE__MESSAGE_SUBSCR_REPLY;
+ be_msg.subscr_reply = &reply;
+
+ __dbg("Sending SUBSCR_REPLY client: %s sucess: %u", adapter->name,
+ success);
+
+ return mgmt_be_adapter_send_msg(adapter, &be_msg);
+}
+
+static int
+mgmt_be_adapter_handle_msg(struct mgmt_be_client_adapter *adapter,
+ Mgmtd__BeMessage *be_msg)
+{
+ const char *xpath;
+ uint i, num;
+
+ /*
+ * protobuf-c adds a max size enum with an internal, and changing by
+ * version, name; cast to an int to avoid unhandled enum warnings
+ */
+ switch ((int)be_msg->message_case) {
+ case MGMTD__BE_MESSAGE__MESSAGE_SUBSCR_REQ:
+ __dbg("Got SUBSCR_REQ from '%s' to register xpaths config: %zu oper: %zu notif: %zu rpc: %zu",
+ be_msg->subscr_req->client_name,
+ be_msg->subscr_req->n_config_xpaths,
+ be_msg->subscr_req->n_oper_xpaths,
+ be_msg->subscr_req->n_notif_xpaths,
+ be_msg->subscr_req->n_rpc_xpaths);
+
+ if (strlen(be_msg->subscr_req->client_name)) {
+ strlcpy(adapter->name, be_msg->subscr_req->client_name,
+ sizeof(adapter->name));
+ adapter->id = mgmt_be_client_name2id(adapter->name);
+ if (adapter->id >= MGMTD_BE_CLIENT_ID_MAX) {
+ __log_err("Unable to resolve adapter '%s' to a valid ID. Disconnecting!",
+ adapter->name);
+ /* this will/should delete old */
+ msg_conn_disconnect(adapter->conn, false);
+ break;
+ }
+ mgmt_be_adapters_by_id[adapter->id] = adapter;
+ mgmt_be_adapter_cleanup_old_conn(adapter);
+
+ /* schedule INIT sequence now that it is registered */
+ mgmt_be_adapter_sched_init_event(adapter);
+ }
+
+ num = be_msg->subscr_req->n_config_xpaths;
+ for (i = 0; i < num; i++) {
+ xpath = be_msg->subscr_req->config_xpaths[i];
+ mgmt_register_client_xpath(adapter->id, xpath,
+ MGMT_BE_XPATH_SUBSCR_TYPE_CFG);
+ }
+
+ num = be_msg->subscr_req->n_oper_xpaths;
+ for (i = 0; i < num; i++) {
+ xpath = be_msg->subscr_req->oper_xpaths[i];
+ mgmt_register_client_xpath(adapter->id, xpath,
+ MGMT_BE_XPATH_SUBSCR_TYPE_OPER);
+ }
+
+ num = be_msg->subscr_req->n_notif_xpaths;
+ for (i = 0; i < num; i++) {
+ xpath = be_msg->subscr_req->notif_xpaths[i];
+ mgmt_register_client_xpath(adapter->id, xpath,
+ MGMT_BE_XPATH_SUBSCR_TYPE_NOTIF);
+ }
+
+ num = be_msg->subscr_req->n_rpc_xpaths;
+ for (i = 0; i < num; i++) {
+ xpath = be_msg->subscr_req->rpc_xpaths[i];
+ mgmt_register_client_xpath(adapter->id, xpath,
+ MGMT_BE_XPATH_SUBSCR_TYPE_RPC);
+ }
+
+ mgmt_be_send_subscr_reply(adapter, true);
+ break;
+ case MGMTD__BE_MESSAGE__MESSAGE_TXN_REPLY:
+ __dbg("Got %s TXN_REPLY from '%s' txn-id %" PRIx64 " with '%s'",
+ be_msg->txn_reply->create ? "Create" : "Delete",
+ adapter->name, be_msg->txn_reply->txn_id,
+ be_msg->txn_reply->success ? "success" : "failure");
+ /*
+ * Forward the TXN_REPLY to txn module.
+ */
+ mgmt_txn_notify_be_txn_reply(
+ be_msg->txn_reply->txn_id,
+ be_msg->txn_reply->create,
+ be_msg->txn_reply->success, adapter);
+ break;
+ case MGMTD__BE_MESSAGE__MESSAGE_CFG_DATA_REPLY:
+ __dbg("Got CFGDATA_REPLY from '%s' txn-id %" PRIx64 " err:'%s'",
+ adapter->name, be_msg->cfg_data_reply->txn_id,
+ be_msg->cfg_data_reply->error_if_any
+ ? be_msg->cfg_data_reply->error_if_any
+ : "None");
+ /*
+ * Forward the CGFData-create reply to txn module.
+ */
+ mgmt_txn_notify_be_cfgdata_reply(
+ be_msg->cfg_data_reply->txn_id,
+ be_msg->cfg_data_reply->success,
+ be_msg->cfg_data_reply->error_if_any, adapter);
+ break;
+ case MGMTD__BE_MESSAGE__MESSAGE_CFG_APPLY_REPLY:
+ __dbg("Got %s CFG_APPLY_REPLY from '%s' txn-id %" PRIx64
+ " err:'%s'",
+ be_msg->cfg_apply_reply->success ? "successful" : "failed",
+ adapter->name, be_msg->cfg_apply_reply->txn_id,
+ be_msg->cfg_apply_reply->error_if_any
+ ? be_msg->cfg_apply_reply->error_if_any
+ : "None");
+ /*
+ * Forward the CGFData-apply reply to txn module.
+ */
+ mgmt_txn_notify_be_cfg_apply_reply(
+ be_msg->cfg_apply_reply->txn_id,
+ be_msg->cfg_apply_reply->success,
+ be_msg->cfg_apply_reply->error_if_any, adapter);
+ break;
+ /*
+ * NOTE: The following messages are always sent from MGMTD to
+ * Backend clients only and/or need not be handled on MGMTd.
+ */
+ case MGMTD__BE_MESSAGE__MESSAGE_SUBSCR_REPLY:
+ case MGMTD__BE_MESSAGE__MESSAGE_TXN_REQ:
+ case MGMTD__BE_MESSAGE__MESSAGE_CFG_DATA_REQ:
+ case MGMTD__BE_MESSAGE__MESSAGE_CFG_APPLY_REQ:
+ case MGMTD__BE_MESSAGE__MESSAGE__NOT_SET:
+ default:
+ /*
+ * A 'default' case is being added contrary to the
+ * FRR code guidelines to take care of build
+ * failures on certain build systems (courtesy of
+ * the proto-c package).
+ */
+ break;
+ }
+
+ return 0;
+}
+
+int mgmt_be_send_txn_req(struct mgmt_be_client_adapter *adapter,
+ uint64_t txn_id, bool create)
+{
+ Mgmtd__BeMessage be_msg;
+ Mgmtd__BeTxnReq txn_req;
+
+ mgmtd__be_txn_req__init(&txn_req);
+ txn_req.create = create;
+ txn_req.txn_id = txn_id;
+
+ mgmtd__be_message__init(&be_msg);
+ be_msg.message_case = MGMTD__BE_MESSAGE__MESSAGE_TXN_REQ;
+ be_msg.txn_req = &txn_req;
+
+ __dbg("Sending TXN_REQ to '%s' txn-id: %" PRIu64, adapter->name, txn_id);
+
+ return mgmt_be_adapter_send_msg(adapter, &be_msg);
+}
+
+int mgmt_be_send_cfgdata_req(struct mgmt_be_client_adapter *adapter,
+ uint64_t txn_id,
+ Mgmtd__YangCfgDataReq **cfgdata_reqs,
+ size_t num_reqs, bool end_of_data)
+{
+ Mgmtd__BeMessage be_msg;
+ Mgmtd__BeCfgDataCreateReq cfgdata_req;
+
+ mgmtd__be_cfg_data_create_req__init(&cfgdata_req);
+ cfgdata_req.txn_id = txn_id;
+ cfgdata_req.data_req = cfgdata_reqs;
+ cfgdata_req.n_data_req = num_reqs;
+ cfgdata_req.end_of_data = end_of_data;
+
+ mgmtd__be_message__init(&be_msg);
+ be_msg.message_case = MGMTD__BE_MESSAGE__MESSAGE_CFG_DATA_REQ;
+ be_msg.cfg_data_req = &cfgdata_req;
+
+ __dbg("Sending CFGDATA_CREATE_REQ to '%s' txn-id: %" PRIu64 " last: %s",
+ adapter->name, txn_id, end_of_data ? "yes" : "no");
+
+ return mgmt_be_adapter_send_msg(adapter, &be_msg);
+}
+
+int mgmt_be_send_cfgapply_req(struct mgmt_be_client_adapter *adapter,
+ uint64_t txn_id)
+{
+ Mgmtd__BeMessage be_msg;
+ Mgmtd__BeCfgDataApplyReq apply_req;
+
+ mgmtd__be_cfg_data_apply_req__init(&apply_req);
+ apply_req.txn_id = txn_id;
+
+ mgmtd__be_message__init(&be_msg);
+ be_msg.message_case = MGMTD__BE_MESSAGE__MESSAGE_CFG_APPLY_REQ;
+ be_msg.cfg_apply_req = &apply_req;
+
+ __dbg("Sending CFG_APPLY_REQ to '%s' txn-id: %" PRIu64, adapter->name,
+ txn_id);
+
+ return mgmt_be_adapter_send_msg(adapter, &be_msg);
+}
+
+int mgmt_be_send_native(enum mgmt_be_client_id id, void *msg)
+{
+ struct mgmt_be_client_adapter *adapter = mgmt_be_get_adapter_by_id(id);
+
+ if (!adapter)
+ return -1;
+
+ return mgmt_msg_native_send_msg(adapter->conn, msg, false);
+}
+
+static void mgmt_be_adapter_send_notify(struct mgmt_msg_notify_data *msg,
+ size_t msglen)
+{
+ struct mgmt_be_client_adapter *adapter;
+ struct mgmt_be_xpath_map *map;
+ struct nb_node *nb_node;
+ const char *notif;
+ uint id, len;
+
+ if (!darr_len(be_notif_xpath_map))
+ return;
+
+ notif = mgmt_msg_native_xpath_decode(msg, msglen);
+ if (!notif) {
+ __log_err("Corrupt notify msg");
+ return;
+ }
+
+ nb_node = nb_node_find(notif);
+ if (!nb_node) {
+ __log_err("No schema found for notification: %s", notif);
+ return;
+ }
+
+ darr_foreach_p (be_notif_xpath_map, map) {
+ len = strlen(map->xpath_prefix);
+ if (strncmp(map->xpath_prefix, nb_node->xpath, len) &&
+ strncmp(map->xpath_prefix, notif, len))
+ continue;
+
+ FOREACH_BE_CLIENT_BITS (id, map->clients) {
+ adapter = mgmt_be_get_adapter_by_id(id);
+ if (!adapter)
+ continue;
+ msg_conn_send_msg(adapter->conn, MGMT_MSG_VERSION_NATIVE,
+ msg, msglen, NULL, false);
+ }
+ }
+}
+
+/*
+ * Handle a native encoded message
+ */
+static void be_adapter_handle_native_msg(struct mgmt_be_client_adapter *adapter,
+ struct mgmt_msg_header *msg,
+ size_t msg_len)
+{
+ struct mgmt_msg_notify_data *notify_msg;
+ struct mgmt_msg_tree_data *tree_msg;
+ struct mgmt_msg_rpc_reply *rpc_msg;
+ struct mgmt_msg_error *error_msg;
+
+ /* get the transaction */
+
+ switch (msg->code) {
+ case MGMT_MSG_CODE_ERROR:
+ error_msg = (typeof(error_msg))msg;
+ __dbg("Got ERROR from '%s' txn-id %" PRIx64, adapter->name,
+ msg->refer_id);
+
+ /* Forward the reply to the txn module */
+ mgmt_txn_notify_error(adapter, msg->refer_id, msg->req_id,
+ error_msg->error, error_msg->errstr);
+
+ break;
+ case MGMT_MSG_CODE_TREE_DATA:
+ /* tree data from a backend client */
+ tree_msg = (typeof(tree_msg))msg;
+ __dbg("Got TREE_DATA from '%s' txn-id %" PRIx64, adapter->name,
+ msg->refer_id);
+
+ /* Forward the reply to the txn module */
+ mgmt_txn_notify_tree_data_reply(adapter, tree_msg, msg_len);
+ break;
+ case MGMT_MSG_CODE_RPC_REPLY:
+ /* RPC reply from a backend client */
+ rpc_msg = (typeof(rpc_msg))msg;
+ __dbg("Got RPC_REPLY from '%s' txn-id %" PRIx64, adapter->name,
+ msg->refer_id);
+
+ /* Forward the reply to the txn module */
+ mgmt_txn_notify_rpc_reply(adapter, rpc_msg, msg_len);
+ break;
+ case MGMT_MSG_CODE_NOTIFY:
+ notify_msg = (typeof(notify_msg))msg;
+ __dbg("Got NOTIFY from '%s'", adapter->name);
+ mgmt_be_adapter_send_notify(notify_msg, msg_len);
+ mgmt_fe_adapter_send_notify(notify_msg, msg_len);
+ break;
+ default:
+ __log_err("unknown native message txn-id %" PRIu64
+ " req-id %" PRIu64
+ " code %u from BE client for adapter %s",
+ msg->refer_id, msg->req_id, msg->code, adapter->name);
+ break;
+ }
+}
+
+
+static void mgmt_be_adapter_process_msg(uint8_t version, uint8_t *data,
+ size_t len, struct msg_conn *conn)
+{
+ struct mgmt_be_client_adapter *adapter = conn->user;
+ Mgmtd__BeMessage *be_msg;
+
+ if (version == MGMT_MSG_VERSION_NATIVE) {
+ struct mgmt_msg_header *msg = (typeof(msg))data;
+
+ if (len >= sizeof(*msg))
+ be_adapter_handle_native_msg(adapter, msg, len);
+ else
+ __log_err("native message to adapter %s too short %zu",
+ adapter->name, len);
+ return;
+ }
+
+ be_msg = mgmtd__be_message__unpack(NULL, len, data);
+ if (!be_msg) {
+ __dbg("Failed to decode %zu bytes for adapter: %s", len,
+ adapter->name);
+ return;
+ }
+ __dbg("Decoded %zu bytes of message: %u for adapter: %s", len,
+ be_msg->message_case, adapter->name);
+ (void)mgmt_be_adapter_handle_msg(adapter, be_msg);
+ mgmtd__be_message__free_unpacked(be_msg, NULL);
+}
+
+/*
+ * Args for callback
+ */
+struct mgmt_be_get_adapter_config_params {
+ struct mgmt_be_client_adapter *adapter;
+ struct nb_config_cbs *cfg_chgs;
+ uint32_t seq;
+};
+
+/*
+ * Initialize a BE client over a new connection
+ */
+static void mgmt_be_adapter_conn_init(struct event *thread)
+{
+ struct mgmt_be_client_adapter *adapter;
+
+ adapter = (struct mgmt_be_client_adapter *)EVENT_ARG(thread);
+ assert(adapter && adapter->conn->fd >= 0);
+
+ /*
+ * Notify TXN module to create a CONFIG transaction and
+ * download the CONFIGs identified for this new client.
+ * If the TXN module fails to initiate the CONFIG transaction
+ * retry a bit later. It only fails if there's an existing config
+ * transaction in progress.
+ */
+ if (mgmt_txn_notify_be_adapter_conn(adapter, true) != 0) {
+ zlog_err("XXX txn in progress, retry init");
+ mgmt_be_adapter_sched_init_event(adapter);
+ return;
+ }
+}
+
+/*
+ * Schedule the initialization of the BE client connection.
+ */
+static void
+mgmt_be_adapter_sched_init_event(struct mgmt_be_client_adapter *adapter)
+{
+ event_add_timer_msec(mgmt_loop, mgmt_be_adapter_conn_init, adapter,
+ MGMTD_BE_CONN_INIT_DELAY_MSEC,
+ &adapter->conn_init_ev);
+}
+
+void mgmt_be_adapter_lock(struct mgmt_be_client_adapter *adapter)
+{
+ adapter->refcount++;
+}
+
+extern void mgmt_be_adapter_unlock(struct mgmt_be_client_adapter **adapter)
+{
+ struct mgmt_be_client_adapter *a = *adapter;
+ assert(a && a->refcount);
+
+ if (!--a->refcount) {
+ mgmt_be_adapters_del(&mgmt_be_adapters, a);
+ EVENT_OFF(a->conn_init_ev);
+ msg_server_conn_delete(a->conn);
+ XFREE(MTYPE_MGMTD_BE_ADPATER, a);
+ }
+
+ *adapter = NULL;
+}
+
+/*
+ * Initialize the BE adapter module
+ */
+void mgmt_be_adapter_init(struct event_loop *tm)
+{
+ char server_path[MAXPATHLEN];
+
+ assert(!mgmt_loop);
+ mgmt_loop = tm;
+
+ mgmt_be_adapters_init(&mgmt_be_adapters);
+ mgmt_be_xpath_map_init();
+
+ snprintf(server_path, sizeof(server_path), MGMTD_BE_SOCK_NAME);
+
+ if (msg_server_init(&mgmt_be_server, server_path, tm,
+ mgmt_be_create_adapter, "backend", &mgmt_debug_be)) {
+ zlog_err("cannot initialize backend server");
+ exit(1);
+ }
+}
+
+/*
+ * Destroy the BE adapter module
+ */
+void mgmt_be_adapter_destroy(void)
+{
+ struct mgmt_be_client_adapter *adapter;
+
+ msg_server_cleanup(&mgmt_be_server);
+ FOREACH_ADAPTER_IN_LIST (adapter) {
+ mgmt_be_adapter_delete(adapter);
+ }
+ mgmt_be_xpath_map_cleanup();
+}
+
+/*
+ * The server accepted a new connection
+ */
+struct msg_conn *mgmt_be_create_adapter(int conn_fd, union sockunion *from)
+{
+ struct mgmt_be_client_adapter *adapter = NULL;
+
+ assert(!mgmt_be_find_adapter_by_fd(conn_fd));
+
+ adapter = XCALLOC(MTYPE_MGMTD_BE_ADPATER,
+ sizeof(struct mgmt_be_client_adapter));
+ adapter->id = MGMTD_BE_CLIENT_ID_MAX;
+ snprintf(adapter->name, sizeof(adapter->name), "Unknown-FD-%d",
+ conn_fd);
+
+ mgmt_be_adapter_lock(adapter);
+ mgmt_be_adapters_add_tail(&mgmt_be_adapters, adapter);
+ RB_INIT(nb_config_cbs, &adapter->cfg_chgs);
+
+ adapter->conn = msg_server_conn_create(mgmt_loop, conn_fd,
+ mgmt_be_adapter_notify_disconnect,
+ mgmt_be_adapter_process_msg,
+ MGMTD_BE_MAX_NUM_MSG_PROC,
+ MGMTD_BE_MAX_NUM_MSG_WRITE,
+ MGMTD_BE_MAX_MSG_LEN, adapter,
+ "BE-adapter");
+
+ adapter->conn->debug = DEBUG_MODE_CHECK(&mgmt_debug_be, DEBUG_MODE_ALL);
+
+ __dbg("Added new MGMTD Backend adapter '%s'", adapter->name);
+
+ return adapter->conn;
+}
+
+struct mgmt_be_client_adapter *
+mgmt_be_get_adapter_by_id(enum mgmt_be_client_id id)
+{
+ return (id < MGMTD_BE_CLIENT_ID_MAX ? mgmt_be_adapters_by_id[id] : NULL);
+}
+
+struct mgmt_be_client_adapter *
+mgmt_be_get_adapter_by_name(const char *name)
+{
+ return mgmt_be_find_adapter_by_name(name);
+}
+
+void mgmt_be_adapter_toggle_client_debug(bool set)
+{
+ struct mgmt_be_client_adapter *adapter;
+
+ FOREACH_ADAPTER_IN_LIST (adapter)
+ adapter->conn->debug = set;
+}
+
+/*
+ * Get a full set of changes for all the config that an adapter is subscribed to
+ * receive.
+ */
+void mgmt_be_get_adapter_config(struct mgmt_be_client_adapter *adapter,
+ struct nb_config_cbs **changes)
+{
+ const struct lyd_node *root, *dnode;
+ uint32_t seq = 0;
+ char *xpath;
+
+ /* We can't be in the middle of sending other chgs when here. */
+ assert(RB_EMPTY(nb_config_cbs, &adapter->cfg_chgs));
+
+ *changes = &adapter->cfg_chgs;
+ LY_LIST_FOR (running_config->dnode, root) {
+ LYD_TREE_DFS_BEGIN (root, dnode) {
+ if (lysc_is_key(dnode->schema))
+ goto walk_cont;
+
+ xpath = lyd_path(dnode, LYD_PATH_STD, NULL, 0);
+ if (be_is_client_interested(xpath, adapter->id,
+ MGMT_BE_XPATH_SUBSCR_TYPE_CFG))
+ nb_config_diff_add_change(*changes, NB_CB_CREATE, &seq, dnode);
+ else
+ LYD_TREE_DFS_continue = 1; /* skip any subtree */
+ free(xpath);
+ walk_cont:
+ LYD_TREE_DFS_END(root, dnode);
+ }
+ }
+}
+
+uint64_t mgmt_be_interested_clients(const char *xpath,
+ enum mgmt_be_xpath_subscr_type type)
+{
+ struct mgmt_be_xpath_map *maps = NULL, *map;
+ enum mgmt_be_client_id id;
+ uint64_t clients;
+
+ switch (type) {
+ case MGMT_BE_XPATH_SUBSCR_TYPE_CFG:
+ maps = be_cfg_xpath_map;
+ break;
+ case MGMT_BE_XPATH_SUBSCR_TYPE_OPER:
+ maps = be_oper_xpath_map;
+ break;
+ case MGMT_BE_XPATH_SUBSCR_TYPE_NOTIF:
+ maps = be_notif_xpath_map;
+ break;
+ case MGMT_BE_XPATH_SUBSCR_TYPE_RPC:
+ maps = be_rpc_xpath_map;
+ break;
+ }
+
+ clients = 0;
+
+ __dbg("XPATH: '%s'", xpath);
+ darr_foreach_p (maps, map)
+ if (mgmt_be_xpath_prefix(map->xpath_prefix, xpath))
+ clients |= map->clients;
+
+ if (DEBUG_MODE_CHECK(&mgmt_debug_be, DEBUG_MODE_ALL)) {
+ FOREACH_BE_CLIENT_BITS (id, clients)
+ __dbg("Cient: %s: subscribed",
+ mgmt_be_client_id2name(id));
+ }
+ return clients;
+}
+
+/**
+ * Return true if `client_id` is interested in `xpath` for `config`
+ * or oper (!`config`).
+ *
+ * Args:
+ * xpath - the xpath to check for interest.
+ * client_id - the BE client being checked for.
+ * bool - check for config (vs oper) subscription.
+ *
+ * Returns:
+ * Interested or not.
+ */
+static bool be_is_client_interested(const char *xpath, enum mgmt_be_client_id id,
+ enum mgmt_be_xpath_subscr_type type)
+{
+ uint64_t clients;
+
+ assert(id < MGMTD_BE_CLIENT_ID_MAX);
+
+ __dbg("Checking client: %s for xpath: '%s'", mgmt_be_client_id2name(id),
+ xpath);
+
+ clients = mgmt_be_interested_clients(xpath, type);
+ if (IS_IDBIT_SET(clients, id)) {
+ __dbg("client: %s: interested", mgmt_be_client_id2name(id));
+ return true;
+ }
+
+ __dbg("client: %s: not interested", mgmt_be_client_id2name(id));
+ return false;
+}
+
+void mgmt_be_adapter_status_write(struct vty *vty)
+{
+ struct mgmt_be_client_adapter *adapter;
+
+ vty_out(vty, "MGMTD Backend Adapters\n");
+
+ FOREACH_ADAPTER_IN_LIST (adapter) {
+ vty_out(vty, " Client: \t\t\t%s\n", adapter->name);
+ vty_out(vty, " Conn-FD: \t\t\t%d\n", adapter->conn->fd);
+ vty_out(vty, " Client-Id: \t\t\t%d\n", adapter->id);
+ vty_out(vty, " Ref-Count: \t\t\t%u\n", adapter->refcount);
+ vty_out(vty, " Msg-Recvd: \t\t\t%" PRIu64 "\n",
+ adapter->conn->mstate.nrxm);
+ vty_out(vty, " Bytes-Recvd: \t\t%" PRIu64 "\n",
+ adapter->conn->mstate.nrxb);
+ vty_out(vty, " Msg-Sent: \t\t\t%" PRIu64 "\n",
+ adapter->conn->mstate.ntxm);
+ vty_out(vty, " Bytes-Sent: \t\t%" PRIu64 "\n",
+ adapter->conn->mstate.ntxb);
+ }
+ vty_out(vty, " Total: %d\n",
+ (int)mgmt_be_adapters_count(&mgmt_be_adapters));
+}
+
+static void be_show_xpath_register(struct vty *vty,
+ struct mgmt_be_xpath_map *map)
+{
+ enum mgmt_be_client_id id;
+ const char *astr;
+
+ vty_out(vty, " - xpath: '%s'\n", map->xpath_prefix);
+ FOREACH_BE_CLIENT_BITS (id, map->clients) {
+ astr = mgmt_be_get_adapter_by_id(id) ? "active" : "inactive";
+ vty_out(vty, " -- %s-client: '%s'\n", astr,
+ mgmt_be_client_id2name(id));
+ }
+}
+void mgmt_be_xpath_register_write(struct vty *vty)
+{
+ struct mgmt_be_xpath_map *map;
+
+ vty_out(vty, "MGMTD Backend CFG XPath Registry: Count: %u\n",
+ darr_len(be_oper_xpath_map));
+ darr_foreach_p (be_cfg_xpath_map, map)
+ be_show_xpath_register(vty, map);
+
+ vty_out(vty, "\nMGMTD Backend OPER XPath Registry: Count: %u\n",
+ darr_len(be_oper_xpath_map));
+ darr_foreach_p (be_oper_xpath_map, map)
+ be_show_xpath_register(vty, map);
+
+ vty_out(vty, "\nMGMTD Backend NOTIFY XPath Registry: Count: %u\n",
+ darr_len(be_notif_xpath_map));
+ darr_foreach_p (be_notif_xpath_map, map)
+ be_show_xpath_register(vty, map);
+
+ vty_out(vty, "\nMGMTD Backend RPC XPath Registry: Count: %u\n",
+ darr_len(be_rpc_xpath_map));
+ darr_foreach_p (be_rpc_xpath_map, map)
+ be_show_xpath_register(vty, map);
+}
+
+void mgmt_be_show_xpath_registries(struct vty *vty, const char *xpath)
+{
+ enum mgmt_be_client_id id;
+ struct mgmt_be_client_adapter *adapter;
+ uint64_t cclients, nclients, oclients, rclients, combined;
+
+ cclients = mgmt_be_interested_clients(xpath,
+ MGMT_BE_XPATH_SUBSCR_TYPE_CFG);
+ oclients = mgmt_be_interested_clients(xpath,
+ MGMT_BE_XPATH_SUBSCR_TYPE_OPER);
+ nclients = mgmt_be_interested_clients(xpath,
+ MGMT_BE_XPATH_SUBSCR_TYPE_NOTIF);
+ rclients = mgmt_be_interested_clients(xpath,
+ MGMT_BE_XPATH_SUBSCR_TYPE_RPC);
+ combined = cclients | nclients | oclients | rclients;
+
+ vty_out(vty, "XPath: '%s'\n", xpath);
+ FOREACH_BE_CLIENT_BITS (id, combined) {
+ vty_out(vty,
+ " -- Client: '%s'\tconfig:%d notify:%d oper:%d rpc:%d\n",
+ mgmt_be_client_id2name(id), IS_IDBIT_SET(cclients, id),
+ IS_IDBIT_SET(nclients, id), IS_IDBIT_SET(oclients, id),
+ IS_IDBIT_SET(rclients, id));
+ adapter = mgmt_be_get_adapter_by_id(id);
+ if (adapter)
+ vty_out(vty, " -- Adapter: %p\n", adapter);
+ }
+}
diff --git a/mgmtd/mgmt_be_adapter.h b/mgmtd/mgmt_be_adapter.h
new file mode 100644
index 00000000..c9f2ab1b
--- /dev/null
+++ b/mgmtd/mgmt_be_adapter.h
@@ -0,0 +1,268 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MGMTD Backend Client Connection Adapter
+ *
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar <spushpasis@vmware.com>
+ * Copyright (c) 2023, LabN Consulting, L.L.C.
+ */
+
+#ifndef _FRR_MGMTD_BE_ADAPTER_H_
+#define _FRR_MGMTD_BE_ADAPTER_H_
+
+#include "mgmt_be_client.h"
+#include "mgmt_msg.h"
+#include "mgmt_defines.h"
+#include "mgmtd/mgmt_ds.h"
+
+#define MGMTD_BE_CONN_INIT_DELAY_MSEC 50
+
+#define MGMTD_FIND_ADAPTER_BY_INDEX(adapter_index) \
+ mgmt_adaptr_ref[adapter_index]
+
+/**
+ * CLIENT-ID
+ *
+ * Add enum value for each supported component, wrap with
+ * #ifdef HAVE_COMPONENT
+ */
+enum mgmt_be_client_id {
+ MGMTD_BE_CLIENT_ID_TESTC, /* always first */
+ MGMTD_BE_CLIENT_ID_ZEBRA,
+#ifdef HAVE_RIPD
+ MGMTD_BE_CLIENT_ID_RIPD,
+#endif
+#ifdef HAVE_RIPNGD
+ MGMTD_BE_CLIENT_ID_RIPNGD,
+#endif
+#ifdef HAVE_STATICD
+ MGMTD_BE_CLIENT_ID_STATICD,
+#endif
+ MGMTD_BE_CLIENT_ID_MAX
+};
+#define MGMTD_BE_CLIENT_ID_MIN 0
+
+
+enum mgmt_be_req_type {
+ MGMTD_BE_REQ_NONE = 0,
+ MGMTD_BE_REQ_CFG_VALIDATE,
+ MGMTD_BE_REQ_CFG_APPLY,
+ MGMTD_BE_REQ_DATA_GET_ELEM,
+ MGMTD_BE_REQ_DATA_GET_NEXT
+};
+
+struct mgmt_be_cfgreq {
+ Mgmtd__YangCfgDataReq **cfgdata_reqs;
+ size_t num_reqs;
+};
+
+struct mgmt_be_datareq {
+ Mgmtd__YangGetDataReq **getdata_reqs;
+ size_t num_reqs;
+};
+
+PREDECL_LIST(mgmt_be_adapters);
+PREDECL_LIST(mgmt_txn_badapters);
+
+struct mgmt_be_client_adapter {
+ struct msg_conn *conn;
+
+ struct event *conn_init_ev;
+
+ enum mgmt_be_client_id id;
+ uint32_t flags;
+ char name[MGMTD_CLIENT_NAME_MAX_LEN];
+
+ int refcount;
+
+ /*
+ * List of config items that should be sent to the
+ * backend during re/connect. This is temporarily
+ * created and then freed-up as soon as the initial
+ * config items has been applied onto the backend.
+ */
+ struct nb_config_cbs cfg_chgs;
+
+ struct mgmt_be_adapters_item list_linkage;
+};
+
+#define MGMTD_BE_ADAPTER_FLAGS_CFG_SYNCED (1U << 0)
+
+DECLARE_LIST(mgmt_be_adapters, struct mgmt_be_client_adapter, list_linkage);
+
+/*
+ * MGMT_SUBSCR_xxx - flags for subscription types for xpaths registrations
+ *
+ * MGMT_SUBSCR_VALIDATE_CFG :: the client should be asked to validate config
+ * MGMT_SUBSCR_NOTIFY_CFG :: the client should be notified of config changes
+ * MGMT_SUBSCR_OPER_OWN :: the client owns the given oeprational state
+ */
+#define MGMT_SUBSCR_VALIDATE_CFG 0x1
+#define MGMT_SUBSCR_NOTIFY_CFG 0x2
+#define MGMT_SUBSCR_OPER_OWN 0x4
+#define MGMT_SUBSCR_ALL 0x7
+
+/* --------- */
+/* CLIENT-ID */
+/* --------- */
+
+#define FOREACH_MGMTD_BE_CLIENT_ID(id) \
+ for ((id) = MGMTD_BE_CLIENT_ID_MIN; (id) < MGMTD_BE_CLIENT_ID_MAX; \
+ (id)++)
+
+#define IS_IDBIT_SET(v, id) (!IS_IDBIT_UNSET(v, id))
+#define IS_IDBIT_UNSET(v, id) (!((v) & (1ull << (id))))
+
+#define __GET_NEXT_SET(id, bits) \
+ ({ \
+ enum mgmt_be_client_id __id = (id); \
+ \
+ for (; __id < MGMTD_BE_CLIENT_ID_MAX && \
+ IS_IDBIT_UNSET(bits, __id); \
+ __id++) \
+ ; \
+ __id; \
+ })
+
+#define FOREACH_BE_CLIENT_BITS(id, bits) \
+ for ((id) = __GET_NEXT_SET(MGMTD_BE_CLIENT_ID_MIN, bits); \
+ (id) < MGMTD_BE_CLIENT_ID_MAX; \
+ (id) = __GET_NEXT_SET((id) + 1, bits))
+
+/* ---------- */
+/* Prototypes */
+/* ---------- */
+
+/* Initialise backend adapter module. */
+extern void mgmt_be_adapter_init(struct event_loop *tm);
+
+/* Destroy the backend adapter module. */
+extern void mgmt_be_adapter_destroy(void);
+
+/* Acquire lock for backend adapter. */
+extern void mgmt_be_adapter_lock(struct mgmt_be_client_adapter *adapter);
+
+/* Remove lock from backend adapter. */
+extern void mgmt_be_adapter_unlock(struct mgmt_be_client_adapter **adapter);
+
+/* Create backend adapter. */
+extern struct msg_conn *mgmt_be_create_adapter(int conn_fd,
+ union sockunion *su);
+
+/* Fetch backend adapter given an adapter name. */
+extern struct mgmt_be_client_adapter *
+mgmt_be_get_adapter_by_name(const char *name);
+
+/* Fetch backend adapter given an client ID. */
+extern struct mgmt_be_client_adapter *
+mgmt_be_get_adapter_by_id(enum mgmt_be_client_id id);
+
+/* Get the client name given a client ID */
+extern const char *mgmt_be_client_id2name(enum mgmt_be_client_id id);
+
+/* Toggle debug on or off for connected clients. */
+extern void mgmt_be_adapter_toggle_client_debug(bool set);
+
+/* Fetch backend adapter config. */
+extern void mgmt_be_get_adapter_config(struct mgmt_be_client_adapter *adapter,
+ struct nb_config_cbs **changes);
+
+/* Create/destroy a transaction. */
+extern int mgmt_be_send_txn_req(struct mgmt_be_client_adapter *adapter,
+ uint64_t txn_id, bool create);
+
+/*
+ * Send config data create request to backend client.
+ *
+ * adaptr
+ * Backend adapter information.
+ *
+ * txn_id
+ * Unique transaction identifier.
+ *
+ * cfgdata_reqs
+ * An array of pointer to Mgmtd__YangCfgDataReq.
+ *
+ * num_reqs
+ * Length of the cfgdata_reqs array.
+ *
+ * end_of_data
+ * TRUE if the data from last batch, FALSE otherwise.
+ *
+ * Returns:
+ * 0 on success, -1 on failure.
+ */
+extern int mgmt_be_send_cfgdata_req(struct mgmt_be_client_adapter *adapter,
+ uint64_t txn_id,
+ Mgmtd__YangCfgDataReq **cfgdata_reqs,
+ size_t num_reqs, bool end_of_data);
+
+/*
+ * Send config apply request to backend client.
+ *
+ * adapter
+ * Backend adapter information.
+ *
+ * txn_id
+ * Unique transaction identifier.
+ *
+ * Returns:
+ * 0 on success, -1 on failure.
+ */
+extern int mgmt_be_send_cfgapply_req(struct mgmt_be_client_adapter *adapter,
+ uint64_t txn_id);
+
+/*
+ * Dump backend adapter status to vty.
+ */
+extern void mgmt_be_adapter_status_write(struct vty *vty);
+
+/*
+ * Dump xpath registry for each backend client to vty.
+ */
+extern void mgmt_be_xpath_register_write(struct vty *vty);
+
+
+/**
+ * Send a native message to a backend client
+ *
+ * Args:
+ * adapter: the client to send the message to.
+ * msg: a native message from mgmt_msg_native_alloc_msg()
+ *
+ * Return:
+ * Any return value from msg_conn_send_msg().
+ */
+extern int mgmt_be_send_native(enum mgmt_be_client_id id, void *msg);
+
+enum mgmt_be_xpath_subscr_type {
+ MGMT_BE_XPATH_SUBSCR_TYPE_CFG,
+ MGMT_BE_XPATH_SUBSCR_TYPE_OPER,
+ MGMT_BE_XPATH_SUBSCR_TYPE_NOTIF,
+ MGMT_BE_XPATH_SUBSCR_TYPE_RPC,
+};
+
+/**
+ * Lookup the clients which are subscribed to a given `xpath`
+ * and the way they are subscribed.
+ *
+ * Args:
+ * xpath - the xpath to check for subscription information.
+ * type - type of subscription to check for.
+ */
+extern uint64_t mgmt_be_interested_clients(const char *xpath,
+ enum mgmt_be_xpath_subscr_type type);
+
+/**
+ * mgmt_fe_adapter_send_notify() - notify FE clients of a notification.
+ * @msg: the notify message from the backend client.
+ * @msglen: the length of the notify message.
+ */
+extern void mgmt_fe_adapter_send_notify(struct mgmt_msg_notify_data *msg,
+ size_t msglen);
+/*
+ * Dump backend client information for a given xpath to vty.
+ */
+extern void mgmt_be_show_xpath_registries(struct vty *vty, const char *xpath);
+
+#endif /* _FRR_MGMTD_BE_ADAPTER_H_ */
diff --git a/mgmtd/mgmt_be_nb.c b/mgmtd/mgmt_be_nb.c
new file mode 100644
index 00000000..613272d4
--- /dev/null
+++ b/mgmtd/mgmt_be_nb.c
@@ -0,0 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "config.h"
+#include "xref.h"
+
+XREF_SETUP();
diff --git a/mgmtd/mgmt_ds.c b/mgmtd/mgmt_ds.c
new file mode 100644
index 00000000..dabae4af
--- /dev/null
+++ b/mgmtd/mgmt_ds.c
@@ -0,0 +1,529 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MGMTD Datastores
+ *
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar <spushpasis@vmware.com>
+ */
+
+#include <zebra.h>
+#include "md5.h"
+#include "mgmtd/mgmt.h"
+#include "mgmtd/mgmt_memory.h"
+#include "mgmtd/mgmt_ds.h"
+#include "mgmtd/mgmt_history.h"
+#include "mgmtd/mgmt_txn.h"
+#include "libyang/libyang.h"
+
+#define __dbg(fmt, ...) \
+ DEBUGD(&mgmt_debug_ds, "DS: %s: " fmt, __func__, ##__VA_ARGS__)
+#define __log_err(fmt, ...) zlog_err("%s: ERROR: " fmt, __func__, ##__VA_ARGS__)
+
+struct mgmt_ds_ctx {
+ Mgmtd__DatastoreId ds_id;
+
+ bool locked;
+ uint64_t vty_session_id; /* Owner of the lock or 0 */
+
+ bool config_ds;
+
+ union {
+ struct nb_config *cfg_root;
+ struct lyd_node *dnode_root;
+ } root;
+};
+
+const char *mgmt_ds_names[MGMTD_DS_MAX_ID + 1] = {
+ MGMTD_DS_NAME_NONE, /* MGMTD_DS_NONE */
+ MGMTD_DS_NAME_RUNNING, /* MGMTD_DS_RUNNING */
+ MGMTD_DS_NAME_CANDIDATE, /* MGMTD_DS_CANDIDATE */
+ MGMTD_DS_NAME_OPERATIONAL, /* MGMTD_DS_OPERATIONAL */
+ "Unknown/Invalid", /* MGMTD_DS_ID_MAX */
+};
+
+static struct mgmt_master *mgmt_ds_mm;
+static struct mgmt_ds_ctx running, candidate, oper;
+
+/* Dump the data tree of the specified format in the file pointed by the path */
+static int mgmt_ds_dump_in_memory(struct mgmt_ds_ctx *ds_ctx,
+ const char *base_xpath, LYD_FORMAT format,
+ struct ly_out *out)
+{
+ struct lyd_node *root;
+ uint32_t options = 0;
+
+ if (base_xpath[0] == '\0')
+ root = ds_ctx->config_ds ? ds_ctx->root.cfg_root->dnode
+ : ds_ctx->root.dnode_root;
+ else
+ root = yang_dnode_get(ds_ctx->config_ds
+ ? ds_ctx->root.cfg_root->dnode
+ : ds_ctx->root.dnode_root,
+ base_xpath);
+ if (!root)
+ return -1;
+
+ options = ds_ctx->config_ds ? LYD_PRINT_WD_TRIM :
+ LYD_PRINT_WD_EXPLICIT;
+
+ if (base_xpath[0] == '\0')
+ lyd_print_all(out, root, format, options);
+ else
+ lyd_print_tree(out, root, format, options);
+
+ return 0;
+}
+
+static int mgmt_ds_replace_dst_with_src_ds(struct mgmt_ds_ctx *src,
+ struct mgmt_ds_ctx *dst)
+{
+ if (!src || !dst)
+ return -1;
+
+ __dbg("Replacing %s with %s", mgmt_ds_id2name(dst->ds_id),
+ mgmt_ds_id2name(src->ds_id));
+
+ if (src->config_ds && dst->config_ds)
+ nb_config_replace(dst->root.cfg_root, src->root.cfg_root, true);
+ else {
+ assert(!src->config_ds && !dst->config_ds);
+ if (dst->root.dnode_root)
+ yang_dnode_free(dst->root.dnode_root);
+ dst->root.dnode_root = yang_dnode_dup(src->root.dnode_root);
+ }
+
+ return 0;
+}
+
+static int mgmt_ds_merge_src_with_dst_ds(struct mgmt_ds_ctx *src,
+ struct mgmt_ds_ctx *dst)
+{
+ int ret;
+
+ if (!src || !dst)
+ return -1;
+
+ __dbg("Merging DS %d with %d", dst->ds_id, src->ds_id);
+ if (src->config_ds && dst->config_ds)
+ ret = nb_config_merge(dst->root.cfg_root, src->root.cfg_root,
+ true);
+ else {
+ assert(!src->config_ds && !dst->config_ds);
+ ret = lyd_merge_siblings(&dst->root.dnode_root,
+ src->root.dnode_root, 0);
+ }
+ if (ret != 0) {
+ __log_err("merge failed with err: %d", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mgmt_ds_load_cfg_from_file(const char *filepath,
+ struct lyd_node **dnode)
+{
+ LY_ERR ret;
+
+ *dnode = NULL;
+ ret = lyd_parse_data_path(ly_native_ctx, filepath, LYD_JSON,
+ LYD_PARSE_NO_STATE | LYD_PARSE_STRICT,
+ LYD_VALIDATE_NO_STATE, dnode);
+
+ if (ret != LY_SUCCESS) {
+ if (*dnode)
+ yang_dnode_free(*dnode);
+ return -1;
+ }
+
+ return 0;
+}
+
+void mgmt_ds_reset_candidate(void)
+{
+ struct lyd_node *dnode = mm->candidate_ds->root.cfg_root->dnode;
+
+ if (dnode)
+ yang_dnode_free(dnode);
+
+ dnode = yang_dnode_new(ly_native_ctx, true);
+ mm->candidate_ds->root.cfg_root->dnode = dnode;
+}
+
+
+int mgmt_ds_init(struct mgmt_master *mm)
+{
+ if (mgmt_ds_mm || mm->running_ds || mm->candidate_ds || mm->oper_ds)
+ assert(!"MGMTD: Call ds_init only once!");
+
+ /* Use Running DS from NB module??? */
+ if (!running_config)
+ assert(!"MGMTD: Call ds_init after frr_init only!");
+
+ running.root.cfg_root = running_config;
+ running.config_ds = true;
+ running.ds_id = MGMTD_DS_RUNNING;
+
+ candidate.root.cfg_root = nb_config_dup(running.root.cfg_root);
+ candidate.config_ds = true;
+ candidate.ds_id = MGMTD_DS_CANDIDATE;
+
+ /*
+ * Redirect lib/vty candidate-config datastore to the global candidate
+ * config Ds on the MGMTD process.
+ */
+ vty_mgmt_candidate_config = candidate.root.cfg_root;
+
+ oper.root.dnode_root = yang_dnode_new(ly_native_ctx, true);
+ oper.config_ds = false;
+ oper.ds_id = MGMTD_DS_OPERATIONAL;
+
+ mm->running_ds = &running;
+ mm->candidate_ds = &candidate;
+ mm->oper_ds = &oper;
+ mgmt_ds_mm = mm;
+
+ return 0;
+}
+
+void mgmt_ds_destroy(void)
+{
+ nb_config_free(candidate.root.cfg_root);
+ candidate.root.cfg_root = NULL;
+
+ yang_dnode_free(oper.root.dnode_root);
+ oper.root.dnode_root = NULL;
+}
+
+struct mgmt_ds_ctx *mgmt_ds_get_ctx_by_id(struct mgmt_master *mm,
+ Mgmtd__DatastoreId ds_id)
+{
+ switch (ds_id) {
+ case MGMTD_DS_CANDIDATE:
+ return (mm->candidate_ds);
+ case MGMTD_DS_RUNNING:
+ return (mm->running_ds);
+ case MGMTD_DS_OPERATIONAL:
+ return (mm->oper_ds);
+ case MGMTD_DS_NONE:
+ case MGMTD__DATASTORE_ID__STARTUP_DS:
+ case _MGMTD__DATASTORE_ID_IS_INT_SIZE:
+ return 0;
+ }
+
+ return 0;
+}
+
+bool mgmt_ds_is_config(struct mgmt_ds_ctx *ds_ctx)
+{
+ if (!ds_ctx)
+ return false;
+
+ return ds_ctx->config_ds;
+}
+
+bool mgmt_ds_is_locked(struct mgmt_ds_ctx *ds_ctx, uint64_t session_id)
+{
+ assert(ds_ctx);
+ return (ds_ctx->locked && ds_ctx->vty_session_id == session_id);
+}
+
+int mgmt_ds_lock(struct mgmt_ds_ctx *ds_ctx, uint64_t session_id)
+{
+ assert(ds_ctx);
+
+ if (ds_ctx->locked)
+ return EBUSY;
+
+ ds_ctx->locked = true;
+ ds_ctx->vty_session_id = session_id;
+ return 0;
+}
+
+void mgmt_ds_unlock(struct mgmt_ds_ctx *ds_ctx)
+{
+ assert(ds_ctx);
+ if (!ds_ctx->locked)
+ zlog_warn(
+ "%s: WARNING: unlock on unlocked in DS:%s last session-id %" PRIu64,
+ __func__, mgmt_ds_id2name(ds_ctx->ds_id),
+ ds_ctx->vty_session_id);
+ ds_ctx->locked = 0;
+}
+
+int mgmt_ds_copy_dss(struct mgmt_ds_ctx *src_ds_ctx,
+ struct mgmt_ds_ctx *dst_ds_ctx, bool updt_cmt_rec)
+{
+ if (mgmt_ds_replace_dst_with_src_ds(src_ds_ctx, dst_ds_ctx) != 0)
+ return -1;
+
+ if (updt_cmt_rec && dst_ds_ctx->ds_id == MGMTD_DS_RUNNING)
+ mgmt_history_new_record(dst_ds_ctx);
+
+ return 0;
+}
+
+int mgmt_ds_dump_ds_to_file(char *file_name, struct mgmt_ds_ctx *ds_ctx)
+{
+ struct ly_out *out;
+ int ret = 0;
+
+ if (ly_out_new_filepath(file_name, &out) == LY_SUCCESS) {
+ ret = mgmt_ds_dump_in_memory(ds_ctx, "", LYD_JSON, out);
+ ly_out_free(out, NULL, 0);
+ }
+
+ return ret;
+}
+
+struct nb_config *mgmt_ds_get_nb_config(struct mgmt_ds_ctx *ds_ctx)
+{
+ if (!ds_ctx)
+ return NULL;
+
+ return ds_ctx->config_ds ? ds_ctx->root.cfg_root : NULL;
+}
+
+static int mgmt_walk_ds_nodes(
+ struct nb_config *root, const char *base_xpath,
+ struct lyd_node *base_dnode,
+ void (*mgmt_ds_node_iter_fn)(const char *xpath, struct lyd_node *node,
+ struct nb_node *nb_node, void *ctx),
+ void *ctx)
+{
+ /* this is 1k per recursion... */
+ char xpath[MGMTD_MAX_XPATH_LEN];
+ struct lyd_node *dnode;
+ struct nb_node *nbnode;
+ int ret = 0;
+
+ assert(mgmt_ds_node_iter_fn);
+
+ __dbg(" -- START: base xpath: '%s'", base_xpath);
+
+ if (!base_dnode)
+ /*
+ * This function only returns the first node of a possible set
+ * of matches issuing a warning if more than 1 matches
+ */
+ base_dnode = yang_dnode_get(root->dnode, base_xpath);
+ if (!base_dnode)
+ return -1;
+
+ __dbg(" search base schema: '%s'",
+ lysc_path(base_dnode->schema, LYSC_PATH_LOG, xpath,
+ sizeof(xpath)));
+
+ nbnode = (struct nb_node *)base_dnode->schema->priv;
+ (*mgmt_ds_node_iter_fn)(base_xpath, base_dnode, nbnode, ctx);
+
+ /*
+ * If the base_xpath points to a leaf node we can skip the tree walk.
+ */
+ if (base_dnode->schema->nodetype & LYD_NODE_TERM)
+ return 0;
+
+ /*
+ * at this point the xpath matched this container node (or some parent
+ * and we're wildcard descending now) so by walking it's children we
+ * continue to change the meaning of an xpath regex to rather be a
+ * prefix matching path
+ */
+
+ LY_LIST_FOR (lyd_child(base_dnode), dnode) {
+ assert(dnode->schema && dnode->schema->priv);
+
+ (void)lyd_path(dnode, LYD_PATH_STD, xpath, sizeof(xpath));
+
+ __dbg(" -- Child xpath: %s", xpath);
+
+ ret = mgmt_walk_ds_nodes(root, xpath, dnode,
+ mgmt_ds_node_iter_fn, ctx);
+ if (ret != 0)
+ break;
+ }
+
+ __dbg(" -- END: base xpath: '%s'", base_xpath);
+
+ return ret;
+}
+
+struct lyd_node *mgmt_ds_find_data_node_by_xpath(struct mgmt_ds_ctx *ds_ctx,
+ const char *xpath)
+{
+ if (!ds_ctx)
+ return NULL;
+
+ return yang_dnode_get(ds_ctx->config_ds ? ds_ctx->root.cfg_root->dnode
+ : ds_ctx->root.dnode_root,
+ xpath);
+}
+
+int mgmt_ds_delete_data_nodes(struct mgmt_ds_ctx *ds_ctx, const char *xpath)
+{
+ struct nb_node *nb_node;
+ struct lyd_node *dnode, *dep_dnode;
+ char dep_xpath[XPATH_MAXLEN];
+
+ if (!ds_ctx)
+ return -1;
+
+ nb_node = nb_node_find(xpath);
+
+ dnode = yang_dnode_get(ds_ctx->config_ds
+ ? ds_ctx->root.cfg_root->dnode
+ : ds_ctx->root.dnode_root,
+ xpath);
+
+ if (!dnode)
+ /*
+ * Return a special error code so the caller can choose
+ * whether to ignore it or not.
+ */
+ return NB_ERR_NOT_FOUND;
+ /* destroy dependant */
+ if (nb_node && nb_node->dep_cbs.get_dependant_xpath) {
+ nb_node->dep_cbs.get_dependant_xpath(dnode, dep_xpath);
+
+ dep_dnode = yang_dnode_get(
+ ds_ctx->config_ds ? ds_ctx->root.cfg_root->dnode
+ : ds_ctx->root.dnode_root,
+ dep_xpath);
+ if (dep_dnode)
+ lyd_free_tree(dep_dnode);
+ }
+ lyd_free_tree(dnode);
+
+ return 0;
+}
+
+int mgmt_ds_load_config_from_file(struct mgmt_ds_ctx *dst,
+ const char *file_path, bool merge)
+{
+ struct lyd_node *iter;
+ struct mgmt_ds_ctx parsed;
+
+ if (!dst)
+ return -1;
+
+ if (mgmt_ds_load_cfg_from_file(file_path, &iter) != 0) {
+ __log_err("Failed to load config from the file %s", file_path);
+ return -1;
+ }
+
+ parsed.root.cfg_root = nb_config_new(iter);
+ parsed.config_ds = true;
+ parsed.ds_id = dst->ds_id;
+
+ if (merge)
+ mgmt_ds_merge_src_with_dst_ds(&parsed, dst);
+ else
+ mgmt_ds_replace_dst_with_src_ds(&parsed, dst);
+
+ nb_config_free(parsed.root.cfg_root);
+
+ return 0;
+}
+
+int mgmt_ds_iter_data(Mgmtd__DatastoreId ds_id, struct nb_config *root,
+ const char *base_xpath,
+ void (*mgmt_ds_node_iter_fn)(const char *xpath,
+ struct lyd_node *node,
+ struct nb_node *nb_node,
+ void *ctx),
+ void *ctx)
+{
+ int ret = 0;
+ char xpath[MGMTD_MAX_XPATH_LEN];
+ struct lyd_node *base_dnode = NULL;
+ struct lyd_node *node;
+
+ if (!root)
+ return -1;
+
+ strlcpy(xpath, base_xpath, sizeof(xpath));
+ mgmt_remove_trailing_separator(xpath, '/');
+
+ /*
+ * mgmt_ds_iter_data is the only user of mgmt_walk_ds_nodes other than
+ * mgmt_walk_ds_nodes itself, so we can modify the API if we would like.
+ * Oper-state should be kept in mind though for the prefix walk
+ */
+
+ __dbg(" -- START DS walk for DSid: %d", ds_id);
+
+ /* If the base_xpath is empty then crawl the sibblings */
+ if (xpath[0] == 0) {
+ base_dnode = root->dnode;
+
+ /* get first top-level sibling */
+ while (base_dnode->parent)
+ base_dnode = lyd_parent(base_dnode);
+
+ while (base_dnode->prev->next)
+ base_dnode = base_dnode->prev;
+
+ LY_LIST_FOR (base_dnode, node) {
+ ret = mgmt_walk_ds_nodes(root, xpath, node,
+ mgmt_ds_node_iter_fn, ctx);
+ }
+ } else
+ ret = mgmt_walk_ds_nodes(root, xpath, base_dnode,
+ mgmt_ds_node_iter_fn, ctx);
+
+ return ret;
+}
+
+void mgmt_ds_dump_tree(struct vty *vty, struct mgmt_ds_ctx *ds_ctx,
+ const char *xpath, FILE *f, LYD_FORMAT format)
+{
+ struct ly_out *out;
+ char *str;
+ char base_xpath[MGMTD_MAX_XPATH_LEN] = {0};
+
+ if (!ds_ctx) {
+ vty_out(vty, " >>>>> Datastore Not Initialized!\n");
+ return;
+ }
+
+ if (xpath) {
+ strlcpy(base_xpath, xpath, MGMTD_MAX_XPATH_LEN);
+ mgmt_remove_trailing_separator(base_xpath, '/');
+ }
+
+ if (f)
+ ly_out_new_file(f, &out);
+ else
+ ly_out_new_memory(&str, 0, &out);
+
+ mgmt_ds_dump_in_memory(ds_ctx, base_xpath, format, out);
+
+ if (!f)
+ vty_out(vty, "%s\n", str);
+
+ ly_out_free(out, NULL, 0);
+}
+
+void mgmt_ds_status_write_one(struct vty *vty, struct mgmt_ds_ctx *ds_ctx)
+{
+ if (!ds_ctx) {
+ vty_out(vty, " >>>>> Datastore Not Initialized!\n");
+ return;
+ }
+
+ vty_out(vty, " DS: %s\n", mgmt_ds_id2name(ds_ctx->ds_id));
+ vty_out(vty, " DS-Hndl: \t\t\t%p\n", ds_ctx);
+ vty_out(vty, " Config: \t\t\t%s\n",
+ ds_ctx->config_ds ? "True" : "False");
+}
+
+void mgmt_ds_status_write(struct vty *vty)
+{
+ vty_out(vty, "MGMTD Datastores\n");
+
+ mgmt_ds_status_write_one(vty, mgmt_ds_mm->running_ds);
+
+ mgmt_ds_status_write_one(vty, mgmt_ds_mm->candidate_ds);
+
+ mgmt_ds_status_write_one(vty, mgmt_ds_mm->oper_ds);
+}
diff --git a/mgmtd/mgmt_ds.h b/mgmtd/mgmt_ds.h
new file mode 100644
index 00000000..b8e77e33
--- /dev/null
+++ b/mgmtd/mgmt_ds.h
@@ -0,0 +1,336 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MGMTD Datastores
+ *
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar <spushpasis@vmware.com>
+ */
+
+#ifndef _FRR_MGMTD_DS_H_
+#define _FRR_MGMTD_DS_H_
+
+#include "mgmt_fe_client.h"
+#include "northbound.h"
+#include "mgmt_defines.h"
+
+#include "mgmtd/mgmt_be_adapter.h"
+#include "mgmtd/mgmt_fe_adapter.h"
+
+#define MGMTD_MAX_NUM_DSNODES_PER_BATCH 128
+
+#define MGMTD_DS_NAME_MAX_LEN 32
+#define MGMTD_DS_NAME_NONE "none"
+#define MGMTD_DS_NAME_RUNNING "running"
+#define MGMTD_DS_NAME_CANDIDATE "candidate"
+#define MGMTD_DS_NAME_OPERATIONAL "operational"
+
+#define FOREACH_MGMTD_DS_ID(id) \
+ for ((id) = MGMTD_DS_NONE; (id) < MGMTD_DS_MAX_ID; (id)++)
+
+#define MGMTD_MAX_COMMIT_LIST 10
+
+#define MGMTD_COMMIT_FILE_PATH(id) "%s/commit-%s.json", frr_libstatedir, id
+#define MGMTD_COMMIT_INDEX_FILE_PATH "%s/commit-index.dat", frr_libstatedir
+
+extern struct nb_config *running_config;
+
+struct mgmt_ds_ctx;
+
+/***************************************************************
+ * Global data exported
+ ***************************************************************/
+
+extern const char *mgmt_ds_names[MGMTD_DS_MAX_ID + 1];
+
+/*
+ * Convert datastore ID to datastore name.
+ *
+ * id
+ * Datastore ID.
+ *
+ * Returns:
+ * Datastore name.
+ */
+static inline const char *mgmt_ds_id2name(Mgmtd__DatastoreId id)
+{
+ if (id > MGMTD_DS_MAX_ID)
+ id = MGMTD_DS_MAX_ID;
+ return mgmt_ds_names[id];
+}
+
+/*
+ * Convert datastore name to datastore ID.
+ *
+ * id
+ * Datastore name.
+ *
+ * Returns:
+ * Datastore ID.
+ */
+static inline Mgmtd__DatastoreId mgmt_ds_name2id(const char *name)
+{
+ Mgmtd__DatastoreId id;
+
+ FOREACH_MGMTD_DS_ID (id) {
+ if (!strncmp(mgmt_ds_names[id], name, MGMTD_DS_NAME_MAX_LEN))
+ return id;
+ }
+
+ return MGMTD_DS_NONE;
+}
+
+/*
+ * Convert datastore ID to datastore name.
+ *
+ * similar to above funtion.
+ */
+static inline Mgmtd__DatastoreId mgmt_get_ds_id_by_name(const char *ds_name)
+{
+ if (!strncmp(ds_name, "candidate", sizeof("candidate")))
+ return MGMTD_DS_CANDIDATE;
+ else if (!strncmp(ds_name, "running", sizeof("running")))
+ return MGMTD_DS_RUNNING;
+ else if (!strncmp(ds_name, "operational", sizeof("operational")))
+ return MGMTD_DS_OPERATIONAL;
+ return MGMTD_DS_NONE;
+}
+
+/*
+ * Appends trail wildcard '/' '*' to a given xpath.
+ *
+ * xpath
+ * YANG xpath.
+ *
+ * path_len
+ * xpath length.
+ */
+static inline void mgmt_xpath_append_trail_wildcard(char *xpath,
+ size_t *xpath_len)
+{
+ if (!xpath || !xpath_len)
+ return;
+
+ if (!*xpath_len)
+ *xpath_len = strlen(xpath);
+
+ if (*xpath_len > 2 && *xpath_len < MGMTD_MAX_XPATH_LEN - 2) {
+ if (xpath[*xpath_len - 1] == '/') {
+ xpath[*xpath_len] = '*';
+ xpath[*xpath_len + 1] = 0;
+ (*xpath_len)++;
+ } else if (xpath[*xpath_len - 1] != '*') {
+ xpath[*xpath_len] = '/';
+ xpath[*xpath_len + 1] = '*';
+ xpath[*xpath_len + 2] = 0;
+ (*xpath_len) += 2;
+ }
+ }
+}
+
+/*
+ * Removes trail wildcard '/' '*' from a given xpath.
+ *
+ * xpath
+ * YANG xpath.
+ *
+ * path_len
+ * xpath length.
+ */
+static inline void mgmt_xpath_remove_trail_wildcard(char *xpath,
+ size_t *xpath_len)
+{
+ if (!xpath || !xpath_len)
+ return;
+
+ if (!*xpath_len)
+ *xpath_len = strlen(xpath);
+
+ if (*xpath_len > 2 && xpath[*xpath_len - 2] == '/'
+ && xpath[*xpath_len - 1] == '*') {
+ xpath[*xpath_len - 2] = 0;
+ (*xpath_len) -= 2;
+ }
+}
+
+/* Initialise datastore */
+extern int mgmt_ds_init(struct mgmt_master *cm);
+
+/* Destroy datastore */
+extern void mgmt_ds_destroy(void);
+
+/*
+ * Get datastore handler by ID
+ *
+ * mm
+ * Management master structure.
+ *
+ * ds_id
+ * Datastore ID.
+ *
+ * Returns:
+ * Datastore context (Holds info about ID, lock, root node etc).
+ */
+extern struct mgmt_ds_ctx *mgmt_ds_get_ctx_by_id(struct mgmt_master *mm,
+ Mgmtd__DatastoreId ds_id);
+
+/*
+ * Check if a given datastore is config ds
+ */
+extern bool mgmt_ds_is_config(struct mgmt_ds_ctx *ds_ctx);
+
+/*
+ * Check if a given datastore is locked by a given session
+ */
+extern bool mgmt_ds_is_locked(struct mgmt_ds_ctx *ds_ctx, uint64_t session_id);
+
+/*
+ * Acquire write lock to a ds given a ds_handle
+ */
+extern int mgmt_ds_lock(struct mgmt_ds_ctx *ds_ctx, uint64_t session_id);
+
+/*
+ * Remove a lock from ds given a ds_handle
+ */
+extern void mgmt_ds_unlock(struct mgmt_ds_ctx *ds_ctx);
+
+/*
+ * Copy from source to destination datastore.
+ *
+ * src_ds
+ * Source datastore handle (ds to be copied from).
+ *
+ * dst_ds
+ * Destination datastore handle (ds to be copied to).
+ *
+ * update_cmd_rec
+ * TRUE if need to update commit record, FALSE otherwise.
+ *
+ * Returns:
+ * 0 on success, -1 on failure.
+ */
+extern int mgmt_ds_copy_dss(struct mgmt_ds_ctx *src_ds_ctx,
+ struct mgmt_ds_ctx *dst_ds_ctx,
+ bool update_cmt_rec);
+
+/*
+ * Fetch northbound configuration for a given datastore context.
+ */
+extern struct nb_config *mgmt_ds_get_nb_config(struct mgmt_ds_ctx *ds_ctx);
+
+/*
+ * Find YANG data node given a datastore handle YANG xpath.
+ */
+extern struct lyd_node *
+mgmt_ds_find_data_node_by_xpath(struct mgmt_ds_ctx *ds_ctx,
+ const char *xpath);
+
+/*
+ * Delete YANG data node given a datastore handle and YANG xpath.
+ */
+extern int mgmt_ds_delete_data_nodes(struct mgmt_ds_ctx *ds_ctx,
+ const char *xpath);
+
+/*
+ * Iterate over datastore data.
+ *
+ * ds_id
+ * Datastore ID..
+ *
+ * root
+ * The root of the tree to iterate over.
+ *
+ * base_xpath
+ * Base YANG xpath from where needs to be iterated.
+ *
+ * iter_fn
+ * function that will be called during each iteration.
+ *
+ * ctx
+ * User defined opaque value normally used to pass
+ * reference to some user private context that will
+ * be passed to the iterator function provided in
+ * 'iter_fn'.
+ *
+ * Returns:
+ * 0 on success, -1 on failure.
+ */
+extern int mgmt_ds_iter_data(
+ Mgmtd__DatastoreId ds_id, struct nb_config *root,
+ const char *base_xpath,
+ void (*mgmt_ds_node_iter_fn)(const char *xpath, struct lyd_node *node,
+ struct nb_node *nb_node, void *ctx),
+ void *ctx);
+
+/*
+ * Load config to datastore from a file.
+ *
+ * ds_ctx
+ * Datastore context.
+ *
+ * file_path
+ * File path of the configuration file.
+ *
+ * merge
+ * TRUE if you want to merge with existing config,
+ * FALSE if you want to replace with existing config
+ *
+ * Returns:
+ * 0 on success, -1 on failure.
+ */
+extern int mgmt_ds_load_config_from_file(struct mgmt_ds_ctx *ds_ctx,
+ const char *file_path, bool merge);
+
+/*
+ * Dump the data tree to a file with JSON/XML format.
+ *
+ * vty
+ * VTY context.
+ *
+ * ds_ctx
+ * Datastore context.
+ *
+ * xpath
+ * Base YANG xpath from where data needs to be dumped.
+ *
+ * f
+ * File pointer to where data to be dumped.
+ *
+ * format
+ * JSON/XML
+ */
+extern void mgmt_ds_dump_tree(struct vty *vty, struct mgmt_ds_ctx *ds_ctx,
+ const char *xpath, FILE *f, LYD_FORMAT format);
+
+/*
+ * Dump the complete data tree to a file with JSON format.
+ *
+ * file_name
+ * File path to where data to be dumped.
+ *
+ * ds
+ * Datastore context.
+ *
+ * Returns:
+ * 0 on success, -1 on failure.
+ */
+extern int mgmt_ds_dump_ds_to_file(char *file_name,
+ struct mgmt_ds_ctx *ds_ctx);
+
+/*
+ * Dump information about specific datastore.
+ */
+extern void mgmt_ds_status_write_one(struct vty *vty,
+ struct mgmt_ds_ctx *ds_ctx);
+
+/*
+ * Dump information about all the datastores.
+ */
+extern void mgmt_ds_status_write(struct vty *vty);
+
+
+/*
+ * Reset the candidate DS to empty state
+ */
+void mgmt_ds_reset_candidate(void);
+
+#endif /* _FRR_MGMTD_DS_H_ */
diff --git a/mgmtd/mgmt_fe_adapter.c b/mgmtd/mgmt_fe_adapter.c
new file mode 100644
index 00000000..fc1bde0b
--- /dev/null
+++ b/mgmtd/mgmt_fe_adapter.c
@@ -0,0 +1,2036 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MGMTD Frontend Client Connection Adapter
+ *
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar <spushpasis@vmware.com>
+ * Copyright (c) 2023, LabN Consulting, L.L.C.
+ */
+
+#include <zebra.h>
+#include "darr.h"
+#include "sockopt.h"
+#include "network.h"
+#include "libfrr.h"
+#include "mgmt_fe_client.h"
+#include "mgmt_msg.h"
+#include "mgmt_msg_native.h"
+#include "mgmt_pb.h"
+#include "hash.h"
+#include "jhash.h"
+#include "mgmtd/mgmt.h"
+#include "mgmtd/mgmt_ds.h"
+#include "mgmtd/mgmt_memory.h"
+#include "mgmtd/mgmt_fe_adapter.h"
+
+#define __dbg(fmt, ...) \
+ DEBUGD(&mgmt_debug_fe, "FE-ADAPTER: %s: " fmt, __func__, ##__VA_ARGS__)
+#define __log_err(fmt, ...) \
+ zlog_err("FE-ADAPTER: %s: ERROR: " fmt, __func__, ##__VA_ARGS__)
+
+#define FOREACH_ADAPTER_IN_LIST(adapter) \
+ frr_each_safe (mgmt_fe_adapters, &mgmt_fe_adapters, (adapter))
+
+enum mgmt_session_event {
+ MGMTD_FE_SESSION_CFG_TXN_CLNUP = 1,
+ MGMTD_FE_SESSION_SHOW_TXN_CLNUP,
+};
+
+struct mgmt_fe_session_ctx {
+ struct mgmt_fe_client_adapter *adapter;
+ uint64_t session_id;
+ uint64_t client_id;
+ uint64_t txn_id;
+ uint64_t cfg_txn_id;
+ uint8_t ds_locked[MGMTD_DS_MAX_ID];
+ struct event *proc_cfg_txn_clnp;
+ struct event *proc_show_txn_clnp;
+
+ struct mgmt_fe_sessions_item list_linkage;
+};
+
+DECLARE_LIST(mgmt_fe_sessions, struct mgmt_fe_session_ctx, list_linkage);
+
+#define FOREACH_SESSION_IN_LIST(adapter, session) \
+ frr_each_safe (mgmt_fe_sessions, &(adapter)->fe_sessions, (session))
+
+static struct event_loop *mgmt_loop;
+static struct msg_server mgmt_fe_server = {.fd = -1};
+
+static struct mgmt_fe_adapters_head mgmt_fe_adapters;
+
+static struct hash *mgmt_fe_sessions;
+static uint64_t mgmt_fe_next_session_id;
+
+/* Forward declarations */
+static void
+mgmt_fe_session_register_event(struct mgmt_fe_session_ctx *session,
+ enum mgmt_session_event event);
+
+static int
+mgmt_fe_session_write_lock_ds(Mgmtd__DatastoreId ds_id,
+ struct mgmt_ds_ctx *ds_ctx,
+ struct mgmt_fe_session_ctx *session)
+{
+ if (session->ds_locked[ds_id])
+ zlog_warn("multiple lock taken by session-id: %" PRIu64
+ " on DS:%s",
+ session->session_id, mgmt_ds_id2name(ds_id));
+ else {
+ if (mgmt_ds_lock(ds_ctx, session->session_id)) {
+ __dbg("Failed to lock the DS:%s for session-id: %" PRIu64
+ " from %s!",
+ mgmt_ds_id2name(ds_id), session->session_id,
+ session->adapter->name);
+ return -1;
+ }
+
+ session->ds_locked[ds_id] = true;
+ __dbg("Write-Locked the DS:%s for session-id: %" PRIu64
+ " from %s",
+ mgmt_ds_id2name(ds_id), session->session_id,
+ session->adapter->name);
+ }
+
+ return 0;
+}
+
+static void mgmt_fe_session_unlock_ds(Mgmtd__DatastoreId ds_id,
+ struct mgmt_ds_ctx *ds_ctx,
+ struct mgmt_fe_session_ctx *session)
+{
+ if (!session->ds_locked[ds_id])
+ zlog_warn("unlock unlocked by session-id: %" PRIu64 " on DS:%s",
+ session->session_id, mgmt_ds_id2name(ds_id));
+
+ session->ds_locked[ds_id] = false;
+ mgmt_ds_unlock(ds_ctx);
+ __dbg("Unlocked DS:%s write-locked earlier by session-id: %" PRIu64
+ " from %s",
+ mgmt_ds_id2name(ds_id), session->session_id,
+ session->adapter->name);
+}
+
+static void
+mgmt_fe_session_cfg_txn_cleanup(struct mgmt_fe_session_ctx *session)
+{
+ /*
+ * Ensure any uncommitted changes in Candidate DS
+ * is discarded.
+ */
+ mgmt_ds_copy_dss(mm->running_ds, mm->candidate_ds, false);
+
+ /*
+ * Destroy the actual transaction created earlier.
+ */
+ if (session->cfg_txn_id != MGMTD_TXN_ID_NONE)
+ mgmt_destroy_txn(&session->cfg_txn_id);
+}
+
+static void
+mgmt_fe_session_show_txn_cleanup(struct mgmt_fe_session_ctx *session)
+{
+ /*
+ * Destroy the transaction created recently.
+ */
+ if (session->txn_id != MGMTD_TXN_ID_NONE)
+ mgmt_destroy_txn(&session->txn_id);
+}
+
+static void
+mgmt_fe_adapter_compute_set_cfg_timers(struct mgmt_setcfg_stats *setcfg_stats)
+{
+ setcfg_stats->last_exec_tm = timeval_elapsed(setcfg_stats->last_end,
+ setcfg_stats->last_start);
+ if (setcfg_stats->last_exec_tm > setcfg_stats->max_tm)
+ setcfg_stats->max_tm = setcfg_stats->last_exec_tm;
+
+ if (setcfg_stats->last_exec_tm < setcfg_stats->min_tm)
+ setcfg_stats->min_tm = setcfg_stats->last_exec_tm;
+
+ setcfg_stats->avg_tm =
+ (((setcfg_stats->avg_tm * (setcfg_stats->set_cfg_count - 1))
+ + setcfg_stats->last_exec_tm)
+ / setcfg_stats->set_cfg_count);
+}
+
+static void
+mgmt_fe_session_compute_commit_timers(struct mgmt_commit_stats *cmt_stats)
+{
+ cmt_stats->last_exec_tm =
+ timeval_elapsed(cmt_stats->last_end, cmt_stats->last_start);
+ if (cmt_stats->last_exec_tm > cmt_stats->max_tm) {
+ cmt_stats->max_tm = cmt_stats->last_exec_tm;
+ cmt_stats->max_batch_cnt = cmt_stats->last_batch_cnt;
+ }
+
+ if (cmt_stats->last_exec_tm < cmt_stats->min_tm) {
+ cmt_stats->min_tm = cmt_stats->last_exec_tm;
+ cmt_stats->min_batch_cnt = cmt_stats->last_batch_cnt;
+ }
+}
+
+static void mgmt_fe_cleanup_session(struct mgmt_fe_session_ctx **sessionp)
+{
+ Mgmtd__DatastoreId ds_id;
+ struct mgmt_ds_ctx *ds_ctx;
+ struct mgmt_fe_session_ctx *session = *sessionp;
+
+ if (session->adapter) {
+ mgmt_fe_session_cfg_txn_cleanup(session);
+ mgmt_fe_session_show_txn_cleanup(session);
+ for (ds_id = 0; ds_id < MGMTD_DS_MAX_ID; ds_id++) {
+ ds_ctx = mgmt_ds_get_ctx_by_id(mm, ds_id);
+ if (ds_ctx && session->ds_locked[ds_id])
+ mgmt_fe_session_unlock_ds(ds_id, ds_ctx,
+ session);
+ }
+ mgmt_fe_sessions_del(&session->adapter->fe_sessions, session);
+ assert(session->adapter->refcount > 1);
+ mgmt_fe_adapter_unlock(&session->adapter);
+ }
+
+ hash_release(mgmt_fe_sessions, session);
+ XFREE(MTYPE_MGMTD_FE_SESSION, session);
+ *sessionp = NULL;
+}
+
+static struct mgmt_fe_session_ctx *
+mgmt_fe_find_session_by_client_id(struct mgmt_fe_client_adapter *adapter,
+ uint64_t client_id)
+{
+ struct mgmt_fe_session_ctx *session;
+
+ FOREACH_SESSION_IN_LIST (adapter, session) {
+ if (session->client_id == client_id) {
+ __dbg("Found session-id %" PRIu64
+ " using client-id %" PRIu64,
+ session->session_id, client_id);
+ return session;
+ }
+ }
+ __dbg("Session not found using client-id %" PRIu64, client_id);
+ return NULL;
+}
+
+static unsigned int mgmt_fe_session_hash_key(const void *data)
+{
+ const struct mgmt_fe_session_ctx *session = data;
+
+ return jhash2((uint32_t *) &session->session_id,
+ sizeof(session->session_id) / sizeof(uint32_t), 0);
+}
+
+static bool mgmt_fe_session_hash_cmp(const void *d1, const void *d2)
+{
+ const struct mgmt_fe_session_ctx *session1 = d1;
+ const struct mgmt_fe_session_ctx *session2 = d2;
+
+ return (session1->session_id == session2->session_id);
+}
+
+static inline struct mgmt_fe_session_ctx *
+mgmt_session_id2ctx(uint64_t session_id)
+{
+ struct mgmt_fe_session_ctx key = {0};
+ struct mgmt_fe_session_ctx *session;
+
+ if (!mgmt_fe_sessions)
+ return NULL;
+
+ key.session_id = session_id;
+ session = hash_lookup(mgmt_fe_sessions, &key);
+
+ return session;
+}
+
+void mgmt_fe_adapter_toggle_client_debug(bool set)
+{
+ struct mgmt_fe_client_adapter *adapter;
+
+ FOREACH_ADAPTER_IN_LIST (adapter)
+ adapter->conn->debug = set;
+}
+
+static struct mgmt_fe_session_ctx *fe_adapter_session_by_txn_id(uint64_t txn_id)
+{
+ uint64_t session_id = mgmt_txn_get_session_id(txn_id);
+
+ if (session_id == MGMTD_SESSION_ID_NONE)
+ return NULL;
+ return mgmt_session_id2ctx(session_id);
+}
+
+static struct mgmt_fe_session_ctx *
+mgmt_fe_create_session(struct mgmt_fe_client_adapter *adapter,
+ uint64_t client_id)
+{
+ struct mgmt_fe_session_ctx *session;
+
+ session = mgmt_fe_find_session_by_client_id(adapter, client_id);
+ if (session)
+ mgmt_fe_cleanup_session(&session);
+
+ session = XCALLOC(MTYPE_MGMTD_FE_SESSION,
+ sizeof(struct mgmt_fe_session_ctx));
+ assert(session);
+ session->client_id = client_id;
+ session->adapter = adapter;
+ session->txn_id = MGMTD_TXN_ID_NONE;
+ session->cfg_txn_id = MGMTD_TXN_ID_NONE;
+ mgmt_fe_adapter_lock(adapter);
+ mgmt_fe_sessions_add_tail(&adapter->fe_sessions, session);
+ if (!mgmt_fe_next_session_id)
+ mgmt_fe_next_session_id++;
+ session->session_id = mgmt_fe_next_session_id++;
+ hash_get(mgmt_fe_sessions, session, hash_alloc_intern);
+
+ return session;
+}
+
+static int fe_adapter_send_native_msg(struct mgmt_fe_client_adapter *adapter,
+ void *msg, size_t len,
+ bool short_circuit_ok)
+{
+ return msg_conn_send_msg(adapter->conn, MGMT_MSG_VERSION_NATIVE, msg,
+ len, NULL, short_circuit_ok);
+}
+
+static int fe_adapter_send_msg(struct mgmt_fe_client_adapter *adapter,
+ Mgmtd__FeMessage *fe_msg, bool short_circuit_ok)
+{
+ return msg_conn_send_msg(
+ adapter->conn, MGMT_MSG_VERSION_PROTOBUF, fe_msg,
+ mgmtd__fe_message__get_packed_size(fe_msg),
+ (size_t(*)(void *, void *))mgmtd__fe_message__pack,
+ short_circuit_ok);
+}
+
+static int fe_adapter_send_session_reply(struct mgmt_fe_client_adapter *adapter,
+ struct mgmt_fe_session_ctx *session,
+ bool create, bool success)
+{
+ Mgmtd__FeMessage fe_msg;
+ Mgmtd__FeSessionReply session_reply;
+
+ mgmtd__fe_session_reply__init(&session_reply);
+ session_reply.create = create;
+ if (create) {
+ session_reply.has_client_conn_id = 1;
+ session_reply.client_conn_id = session->client_id;
+ }
+ session_reply.session_id = session->session_id;
+ session_reply.success = success;
+
+ mgmtd__fe_message__init(&fe_msg);
+ fe_msg.message_case = MGMTD__FE_MESSAGE__MESSAGE_SESSION_REPLY;
+ fe_msg.session_reply = &session_reply;
+
+ __dbg("Sending SESSION_REPLY message to MGMTD Frontend client '%s'",
+ adapter->name);
+
+ return fe_adapter_send_msg(adapter, &fe_msg, true);
+}
+
+static int fe_adapter_send_lockds_reply(struct mgmt_fe_session_ctx *session,
+ Mgmtd__DatastoreId ds_id,
+ uint64_t req_id, bool lock_ds,
+ bool success, const char *error_if_any)
+{
+ Mgmtd__FeMessage fe_msg;
+ Mgmtd__FeLockDsReply lockds_reply;
+ bool scok = session->adapter->conn->is_short_circuit;
+
+ assert(session->adapter);
+
+ mgmtd__fe_lock_ds_reply__init(&lockds_reply);
+ lockds_reply.session_id = session->session_id;
+ lockds_reply.ds_id = ds_id;
+ lockds_reply.req_id = req_id;
+ lockds_reply.lock = lock_ds;
+ lockds_reply.success = success;
+ if (error_if_any)
+ lockds_reply.error_if_any = (char *)error_if_any;
+
+ mgmtd__fe_message__init(&fe_msg);
+ fe_msg.message_case = MGMTD__FE_MESSAGE__MESSAGE_LOCKDS_REPLY;
+ fe_msg.lockds_reply = &lockds_reply;
+
+ __dbg("Sending LOCK_DS_REPLY message to MGMTD Frontend client '%s' scok: %d",
+ session->adapter->name, scok);
+
+ return fe_adapter_send_msg(session->adapter, &fe_msg, scok);
+}
+
+static int fe_adapter_send_set_cfg_reply(struct mgmt_fe_session_ctx *session,
+ Mgmtd__DatastoreId ds_id,
+ uint64_t req_id, bool success,
+ const char *error_if_any,
+ bool implicit_commit)
+{
+ Mgmtd__FeMessage fe_msg;
+ Mgmtd__FeSetConfigReply setcfg_reply;
+
+ assert(session->adapter);
+
+ if (implicit_commit && session->cfg_txn_id)
+ mgmt_fe_session_register_event(
+ session, MGMTD_FE_SESSION_CFG_TXN_CLNUP);
+
+ mgmtd__fe_set_config_reply__init(&setcfg_reply);
+ setcfg_reply.session_id = session->session_id;
+ setcfg_reply.ds_id = ds_id;
+ setcfg_reply.req_id = req_id;
+ setcfg_reply.success = success;
+ setcfg_reply.implicit_commit = implicit_commit;
+ if (error_if_any)
+ setcfg_reply.error_if_any = (char *)error_if_any;
+
+ mgmtd__fe_message__init(&fe_msg);
+ fe_msg.message_case = MGMTD__FE_MESSAGE__MESSAGE_SETCFG_REPLY;
+ fe_msg.setcfg_reply = &setcfg_reply;
+
+ __dbg("Sending SETCFG_REPLY message to MGMTD Frontend client '%s'",
+ session->adapter->name);
+
+ if (implicit_commit) {
+ if (mm->perf_stats_en)
+ gettimeofday(&session->adapter->cmt_stats.last_end,
+ NULL);
+ mgmt_fe_session_compute_commit_timers(
+ &session->adapter->cmt_stats);
+ }
+
+ if (mm->perf_stats_en)
+ gettimeofday(&session->adapter->setcfg_stats.last_end, NULL);
+ mgmt_fe_adapter_compute_set_cfg_timers(&session->adapter->setcfg_stats);
+
+ return fe_adapter_send_msg(session->adapter, &fe_msg, false);
+}
+
+static int fe_adapter_send_commit_cfg_reply(
+ struct mgmt_fe_session_ctx *session, Mgmtd__DatastoreId src_ds_id,
+ Mgmtd__DatastoreId dst_ds_id, uint64_t req_id, enum mgmt_result result,
+ bool validate_only, const char *error_if_any)
+{
+ Mgmtd__FeMessage fe_msg;
+ Mgmtd__FeCommitConfigReply commcfg_reply;
+
+ assert(session->adapter);
+
+ mgmtd__fe_commit_config_reply__init(&commcfg_reply);
+ commcfg_reply.session_id = session->session_id;
+ commcfg_reply.src_ds_id = src_ds_id;
+ commcfg_reply.dst_ds_id = dst_ds_id;
+ commcfg_reply.req_id = req_id;
+ commcfg_reply.success =
+ (result == MGMTD_SUCCESS || result == MGMTD_NO_CFG_CHANGES)
+ ? true
+ : false;
+ commcfg_reply.validate_only = validate_only;
+ if (error_if_any)
+ commcfg_reply.error_if_any = (char *)error_if_any;
+
+ mgmtd__fe_message__init(&fe_msg);
+ fe_msg.message_case = MGMTD__FE_MESSAGE__MESSAGE_COMMCFG_REPLY;
+ fe_msg.commcfg_reply = &commcfg_reply;
+
+ __dbg("Sending COMMIT_CONFIG_REPLY message to MGMTD Frontend client '%s'",
+ session->adapter->name);
+
+ /*
+ * Cleanup the CONFIG transaction associated with this session.
+ */
+ if (session->cfg_txn_id
+ && ((result == MGMTD_SUCCESS && !validate_only)
+ || (result == MGMTD_NO_CFG_CHANGES)))
+ mgmt_fe_session_register_event(
+ session, MGMTD_FE_SESSION_CFG_TXN_CLNUP);
+
+ if (mm->perf_stats_en)
+ gettimeofday(&session->adapter->cmt_stats.last_end, NULL);
+ mgmt_fe_session_compute_commit_timers(&session->adapter->cmt_stats);
+ return fe_adapter_send_msg(session->adapter, &fe_msg, false);
+}
+
+static int fe_adapter_send_get_reply(struct mgmt_fe_session_ctx *session,
+ Mgmtd__DatastoreId ds_id, uint64_t req_id,
+ bool success, Mgmtd__YangDataReply *data,
+ const char *error_if_any)
+{
+ Mgmtd__FeMessage fe_msg;
+ Mgmtd__FeGetReply get_reply;
+
+ assert(session->adapter);
+
+ mgmtd__fe_get_reply__init(&get_reply);
+ get_reply.session_id = session->session_id;
+ get_reply.ds_id = ds_id;
+ get_reply.req_id = req_id;
+ get_reply.success = success;
+ get_reply.data = data;
+ if (error_if_any)
+ get_reply.error_if_any = (char *)error_if_any;
+
+ mgmtd__fe_message__init(&fe_msg);
+ fe_msg.message_case = MGMTD__FE_MESSAGE__MESSAGE_GET_REPLY;
+ fe_msg.get_reply = &get_reply;
+
+ __dbg("Sending GET_REPLY message to MGMTD Frontend client '%s'",
+ session->adapter->name);
+
+ /*
+ * Cleanup the SHOW transaction associated with this session.
+ */
+ if (session->txn_id && (!success || (data && data->next_indx < 0)))
+ mgmt_fe_session_register_event(session,
+ MGMTD_FE_SESSION_SHOW_TXN_CLNUP);
+
+ return fe_adapter_send_msg(session->adapter, &fe_msg, false);
+}
+
+static int fe_adapter_send_error(struct mgmt_fe_session_ctx *session,
+ uint64_t req_id, bool short_circuit_ok,
+ int16_t error, const char *errfmt, ...)
+ PRINTFRR(5, 6);
+
+static int fe_adapter_send_error(struct mgmt_fe_session_ctx *session,
+ uint64_t req_id, bool short_circuit_ok,
+ int16_t error, const char *errfmt, ...)
+{
+ va_list ap;
+ int ret;
+
+ va_start(ap, errfmt);
+ ret = vmgmt_msg_native_send_error(session->adapter->conn,
+ session->session_id, req_id,
+ short_circuit_ok, error, errfmt, ap);
+ va_end(ap);
+
+ return ret;
+}
+
+
+static void mgmt_fe_session_cfg_txn_clnup(struct event *thread)
+{
+ struct mgmt_fe_session_ctx *session;
+
+ session = (struct mgmt_fe_session_ctx *)EVENT_ARG(thread);
+
+ mgmt_fe_session_cfg_txn_cleanup(session);
+}
+
+static void mgmt_fe_session_show_txn_clnup(struct event *thread)
+{
+ struct mgmt_fe_session_ctx *session;
+
+ session = (struct mgmt_fe_session_ctx *)EVENT_ARG(thread);
+
+ mgmt_fe_session_show_txn_cleanup(session);
+}
+
+static void
+mgmt_fe_session_register_event(struct mgmt_fe_session_ctx *session,
+ enum mgmt_session_event event)
+{
+ struct timeval tv = {.tv_sec = 0,
+ .tv_usec = MGMTD_FE_MSG_PROC_DELAY_USEC};
+
+ switch (event) {
+ case MGMTD_FE_SESSION_CFG_TXN_CLNUP:
+ event_add_timer_tv(mgmt_loop, mgmt_fe_session_cfg_txn_clnup,
+ session, &tv, &session->proc_cfg_txn_clnp);
+ break;
+ case MGMTD_FE_SESSION_SHOW_TXN_CLNUP:
+ event_add_timer_tv(mgmt_loop, mgmt_fe_session_show_txn_clnup,
+ session, &tv, &session->proc_show_txn_clnp);
+ break;
+ }
+}
+
+static struct mgmt_fe_client_adapter *
+mgmt_fe_find_adapter_by_fd(int conn_fd)
+{
+ struct mgmt_fe_client_adapter *adapter;
+
+ FOREACH_ADAPTER_IN_LIST (adapter) {
+ if (adapter->conn->fd == conn_fd)
+ return adapter;
+ }
+
+ return NULL;
+}
+
+static void mgmt_fe_adapter_delete(struct mgmt_fe_client_adapter *adapter)
+{
+ struct mgmt_fe_session_ctx *session;
+ __dbg("deleting client adapter '%s'", adapter->name);
+
+ /* TODO: notify about client disconnect for appropriate cleanup */
+ FOREACH_SESSION_IN_LIST (adapter, session)
+ mgmt_fe_cleanup_session(&session);
+ mgmt_fe_sessions_fini(&adapter->fe_sessions);
+
+ assert(adapter->refcount == 1);
+ mgmt_fe_adapter_unlock(&adapter);
+}
+
+static int mgmt_fe_adapter_notify_disconnect(struct msg_conn *conn)
+{
+ struct mgmt_fe_client_adapter *adapter = conn->user;
+
+ __dbg("notify disconnect for client adapter '%s'", adapter->name);
+
+ mgmt_fe_adapter_delete(adapter);
+
+ return 0;
+}
+
+/*
+ * Purge any old connections that share the same client name with `adapter`
+ */
+static void
+mgmt_fe_adapter_cleanup_old_conn(struct mgmt_fe_client_adapter *adapter)
+{
+ struct mgmt_fe_client_adapter *old;
+
+ FOREACH_ADAPTER_IN_LIST (old) {
+ if (old == adapter)
+ continue;
+ if (strncmp(adapter->name, old->name, sizeof(adapter->name)))
+ continue;
+
+ __dbg("Client '%s' (FD:%d) seems to have reconnected. Removing old connection (FD:%d)",
+ adapter->name, adapter->conn->fd, old->conn->fd);
+ msg_conn_disconnect(old->conn, false);
+ }
+}
+
+static int
+mgmt_fe_session_handle_lockds_req_msg(struct mgmt_fe_session_ctx *session,
+ Mgmtd__FeLockDsReq *lockds_req)
+{
+ struct mgmt_ds_ctx *ds_ctx;
+
+ if (lockds_req->ds_id != MGMTD_DS_CANDIDATE &&
+ lockds_req->ds_id != MGMTD_DS_RUNNING) {
+ fe_adapter_send_lockds_reply(
+ session, lockds_req->ds_id, lockds_req->req_id,
+ lockds_req->lock, false,
+ "Lock/Unlock on DS other than candidate or running DS not supported");
+ return -1;
+ }
+
+ ds_ctx = mgmt_ds_get_ctx_by_id(mm, lockds_req->ds_id);
+ if (!ds_ctx) {
+ fe_adapter_send_lockds_reply(session, lockds_req->ds_id,
+ lockds_req->req_id,
+ lockds_req->lock, false,
+ "Failed to retrieve handle for DS!");
+ return -1;
+ }
+
+ if (lockds_req->lock) {
+ if (mgmt_fe_session_write_lock_ds(lockds_req->ds_id, ds_ctx,
+ session)) {
+ fe_adapter_send_lockds_reply(
+ session, lockds_req->ds_id, lockds_req->req_id,
+ lockds_req->lock, false,
+ "Lock already taken on DS by another session!");
+ return -1;
+ }
+ } else {
+ if (!session->ds_locked[lockds_req->ds_id]) {
+ fe_adapter_send_lockds_reply(
+ session, lockds_req->ds_id, lockds_req->req_id,
+ lockds_req->lock, false,
+ "Lock on DS was not taken by this session!");
+ return 0;
+ }
+
+ mgmt_fe_session_unlock_ds(lockds_req->ds_id, ds_ctx, session);
+ }
+
+ if (fe_adapter_send_lockds_reply(session, lockds_req->ds_id,
+ lockds_req->req_id, lockds_req->lock,
+ true, NULL) != 0) {
+ __dbg("Failed to send LOCK_DS_REPLY for DS %u session-id: %" PRIu64
+ " from %s",
+ lockds_req->ds_id, session->session_id,
+ session->adapter->name);
+ }
+
+ return 0;
+}
+
+/*
+ * TODO: this function has too many conditionals relating to complex error
+ * conditions. It needs to be simplified and these complex error conditions
+ * probably need to just disconnect the client with a suitably loud log message.
+ */
+static int
+mgmt_fe_session_handle_setcfg_req_msg(struct mgmt_fe_session_ctx *session,
+ Mgmtd__FeSetConfigReq *setcfg_req)
+{
+ struct mgmt_ds_ctx *ds_ctx, *dst_ds_ctx = NULL;
+ bool txn_created = false;
+
+ if (mm->perf_stats_en)
+ gettimeofday(&session->adapter->setcfg_stats.last_start, NULL);
+
+ /* MGMTD currently only supports editing the candidate DS. */
+ if (setcfg_req->ds_id != MGMTD_DS_CANDIDATE) {
+ fe_adapter_send_set_cfg_reply(
+ session, setcfg_req->ds_id, setcfg_req->req_id, false,
+ "Set-Config on datastores other than Candidate DS not supported",
+ setcfg_req->implicit_commit);
+ return 0;
+ }
+ ds_ctx = mgmt_ds_get_ctx_by_id(mm, setcfg_req->ds_id);
+ assert(ds_ctx);
+
+ /* MGMTD currently only supports targetting the running DS. */
+ if (setcfg_req->implicit_commit &&
+ setcfg_req->commit_ds_id != MGMTD_DS_RUNNING) {
+ fe_adapter_send_set_cfg_reply(
+ session, setcfg_req->ds_id, setcfg_req->req_id, false,
+ "Implicit commit on datastores other than running DS not supported",
+ setcfg_req->implicit_commit);
+ return 0;
+ }
+ dst_ds_ctx = mgmt_ds_get_ctx_by_id(mm, setcfg_req->commit_ds_id);
+ assert(dst_ds_ctx);
+
+ /* User should have write lock to change the DS */
+ if (!session->ds_locked[setcfg_req->ds_id]) {
+ fe_adapter_send_set_cfg_reply(session, setcfg_req->ds_id,
+ setcfg_req->req_id, false,
+ "Candidate DS is not locked",
+ setcfg_req->implicit_commit);
+ return 0;
+ }
+
+ if (session->cfg_txn_id == MGMTD_TXN_ID_NONE) {
+ /* Start a CONFIG Transaction (if not started already) */
+ session->cfg_txn_id = mgmt_create_txn(session->session_id,
+ MGMTD_TXN_TYPE_CONFIG);
+ if (session->cfg_txn_id == MGMTD_SESSION_ID_NONE) {
+ fe_adapter_send_set_cfg_reply(
+ session, setcfg_req->ds_id, setcfg_req->req_id,
+ false,
+ "Failed to create a Configuration session!",
+ setcfg_req->implicit_commit);
+ return 0;
+ }
+ txn_created = true;
+
+ __dbg("Created new Config txn-id: %" PRIu64
+ " for session-id %" PRIu64,
+ session->cfg_txn_id, session->session_id);
+ } else {
+ __dbg("Config txn-id: %" PRIu64 " for session-id: %" PRIu64
+ " already created",
+ session->cfg_txn_id, session->session_id);
+
+ if (setcfg_req->implicit_commit) {
+ /*
+ * In this scenario need to skip cleanup of the txn,
+ * so setting implicit commit to false.
+ */
+ fe_adapter_send_set_cfg_reply(
+ session, setcfg_req->ds_id, setcfg_req->req_id,
+ false,
+ "A Configuration transaction is already in progress!",
+ false);
+ return 0;
+ }
+ }
+
+ /* Create the SETConfig request under the transaction. */
+ if (mgmt_txn_send_set_config_req(session->cfg_txn_id, setcfg_req->req_id,
+ setcfg_req->ds_id, ds_ctx,
+ setcfg_req->data, setcfg_req->n_data,
+ setcfg_req->implicit_commit,
+ setcfg_req->commit_ds_id,
+ dst_ds_ctx) != 0) {
+ fe_adapter_send_set_cfg_reply(session, setcfg_req->ds_id,
+ setcfg_req->req_id, false,
+ "Request processing for SET-CONFIG failed!",
+ setcfg_req->implicit_commit);
+
+ /* delete transaction if we just created it */
+ if (txn_created)
+ mgmt_destroy_txn(&session->cfg_txn_id);
+ }
+
+ return 0;
+}
+
+static int mgmt_fe_session_handle_get_req_msg(struct mgmt_fe_session_ctx *session,
+ Mgmtd__FeGetReq *get_req)
+{
+ struct mgmt_ds_ctx *ds_ctx;
+ struct nb_config *cfg_root = NULL;
+ Mgmtd__DatastoreId ds_id = get_req->ds_id;
+ uint64_t req_id = get_req->req_id;
+
+ if (ds_id != MGMTD_DS_CANDIDATE && ds_id != MGMTD_DS_RUNNING) {
+ fe_adapter_send_get_reply(session, ds_id, req_id, false, NULL,
+ "get-req on unsupported datastore");
+ return 0;
+ }
+ ds_ctx = mgmt_ds_get_ctx_by_id(mm, ds_id);
+ assert(ds_ctx);
+
+ if (session->txn_id == MGMTD_TXN_ID_NONE) {
+ /*
+ * Start a SHOW Transaction (if not started already)
+ */
+ session->txn_id = mgmt_create_txn(session->session_id,
+ MGMTD_TXN_TYPE_SHOW);
+ if (session->txn_id == MGMTD_SESSION_ID_NONE) {
+ fe_adapter_send_get_reply(session, ds_id, req_id, false,
+ NULL,
+ "Failed to create a Show transaction!");
+ return -1;
+ }
+
+ __dbg("Created new show txn-id: %" PRIu64
+ " for session-id: %" PRIu64,
+ session->txn_id, session->session_id);
+ } else {
+ fe_adapter_send_get_reply(session, ds_id, req_id, false, NULL,
+ "Request processing for GET failed!");
+ __dbg("Transaction in progress txn-id: %" PRIu64
+ " for session-id: %" PRIu64,
+ session->txn_id, session->session_id);
+ return -1;
+ }
+
+ /*
+ * Get a copy of the datastore config root, avoids locking.
+ */
+ cfg_root = nb_config_dup(mgmt_ds_get_nb_config(ds_ctx));
+
+ /*
+ * Create a GET request under the transaction.
+ */
+ if (mgmt_txn_send_get_req(session->txn_id, req_id, ds_id, cfg_root,
+ get_req->data, get_req->n_data)) {
+ fe_adapter_send_get_reply(session, ds_id, req_id, false, NULL,
+ "Request processing for GET failed!");
+
+ goto failed;
+ }
+
+ return 0;
+failed:
+ if (cfg_root)
+ nb_config_free(cfg_root);
+ /*
+ * Destroy the transaction created recently.
+ */
+ if (session->txn_id != MGMTD_TXN_ID_NONE)
+ mgmt_destroy_txn(&session->txn_id);
+
+ return -1;
+}
+
+
+static int mgmt_fe_session_handle_commit_config_req_msg(
+ struct mgmt_fe_session_ctx *session,
+ Mgmtd__FeCommitConfigReq *commcfg_req)
+{
+ struct mgmt_ds_ctx *src_ds_ctx, *dst_ds_ctx;
+
+ if (mm->perf_stats_en)
+ gettimeofday(&session->adapter->cmt_stats.last_start, NULL);
+ session->adapter->cmt_stats.commit_cnt++;
+
+ /* Validate source and dest DS */
+ if (commcfg_req->src_ds_id != MGMTD_DS_CANDIDATE ||
+ commcfg_req->dst_ds_id != MGMTD_DS_RUNNING) {
+ fe_adapter_send_commit_cfg_reply(
+ session, commcfg_req->src_ds_id, commcfg_req->dst_ds_id,
+ commcfg_req->req_id, MGMTD_INTERNAL_ERROR,
+ commcfg_req->validate_only,
+ "Source/Dest for commit must be candidate/running DS");
+ return 0;
+ }
+ src_ds_ctx = mgmt_ds_get_ctx_by_id(mm, commcfg_req->src_ds_id);
+ assert(src_ds_ctx);
+ dst_ds_ctx = mgmt_ds_get_ctx_by_id(mm, commcfg_req->dst_ds_id);
+ assert(dst_ds_ctx);
+
+ /* User should have lock on both source and dest DS */
+ if (!session->ds_locked[commcfg_req->dst_ds_id] ||
+ !session->ds_locked[commcfg_req->src_ds_id]) {
+ fe_adapter_send_commit_cfg_reply(
+ session, commcfg_req->src_ds_id, commcfg_req->dst_ds_id,
+ commcfg_req->req_id, MGMTD_DS_LOCK_FAILED,
+ commcfg_req->validate_only,
+ "Commit requires lock on candidate and/or running DS");
+ return 0;
+ }
+
+ if (session->cfg_txn_id == MGMTD_TXN_ID_NONE) {
+ /* as we have the lock no-one else should have a config txn */
+ assert(!mgmt_config_txn_in_progress());
+
+ /*
+ * Start a CONFIG Transaction (if not started already)
+ */
+ session->cfg_txn_id = mgmt_create_txn(session->session_id,
+ MGMTD_TXN_TYPE_CONFIG);
+ if (session->cfg_txn_id == MGMTD_SESSION_ID_NONE) {
+ fe_adapter_send_commit_cfg_reply(
+ session, commcfg_req->src_ds_id,
+ commcfg_req->dst_ds_id, commcfg_req->req_id,
+ MGMTD_INTERNAL_ERROR, commcfg_req->validate_only,
+ "Failed to create a Configuration session!");
+ return 0;
+ }
+ __dbg("Created txn-id: %" PRIu64 " for session-id %" PRIu64
+ " for COMMIT-CFG-REQ",
+ session->cfg_txn_id, session->session_id);
+ }
+
+ /*
+ * Create COMMITConfig request under the transaction
+ */
+ if (mgmt_txn_send_commit_config_req(session->cfg_txn_id,
+ commcfg_req->req_id,
+ commcfg_req->src_ds_id, src_ds_ctx,
+ commcfg_req->dst_ds_id, dst_ds_ctx,
+ commcfg_req->validate_only,
+ commcfg_req->abort, false,
+ NULL) != 0) {
+ fe_adapter_send_commit_cfg_reply(
+ session, commcfg_req->src_ds_id, commcfg_req->dst_ds_id,
+ commcfg_req->req_id, MGMTD_INTERNAL_ERROR,
+ commcfg_req->validate_only,
+ "Request processing for COMMIT-CONFIG failed!");
+ return 0;
+ }
+
+ return 0;
+}
+
+static int
+mgmt_fe_adapter_handle_msg(struct mgmt_fe_client_adapter *adapter,
+ Mgmtd__FeMessage *fe_msg)
+{
+ struct mgmt_fe_session_ctx *session;
+
+ /*
+ * protobuf-c adds a max size enum with an internal, and changing by
+ * version, name; cast to an int to avoid unhandled enum warnings
+ */
+ switch ((int)fe_msg->message_case) {
+ case MGMTD__FE_MESSAGE__MESSAGE_REGISTER_REQ:
+ __dbg("Got REGISTER_REQ from '%s'",
+ fe_msg->register_req->client_name);
+
+ if (strlen(fe_msg->register_req->client_name)) {
+ strlcpy(adapter->name,
+ fe_msg->register_req->client_name,
+ sizeof(adapter->name));
+ mgmt_fe_adapter_cleanup_old_conn(adapter);
+ }
+ break;
+ case MGMTD__FE_MESSAGE__MESSAGE_SESSION_REQ:
+ if (fe_msg->session_req->create
+ && fe_msg->session_req->id_case
+ == MGMTD__FE_SESSION_REQ__ID_CLIENT_CONN_ID) {
+ __dbg("Got SESSION_REQ (create) for client-id %" PRIu64
+ " from '%s'",
+ fe_msg->session_req->client_conn_id,
+ adapter->name);
+
+ session = mgmt_fe_create_session(
+ adapter, fe_msg->session_req->client_conn_id);
+ fe_adapter_send_session_reply(adapter, session, true,
+ session ? true : false);
+ } else if (
+ !fe_msg->session_req->create
+ && fe_msg->session_req->id_case
+ == MGMTD__FE_SESSION_REQ__ID_SESSION_ID) {
+ __dbg("Got SESSION_REQ (destroy) for session-id %" PRIu64
+ "from '%s'",
+ fe_msg->session_req->session_id, adapter->name);
+
+ session = mgmt_session_id2ctx(
+ fe_msg->session_req->session_id);
+ fe_adapter_send_session_reply(adapter, session, false,
+ true);
+ mgmt_fe_cleanup_session(&session);
+ }
+ break;
+ case MGMTD__FE_MESSAGE__MESSAGE_LOCKDS_REQ:
+ session = mgmt_session_id2ctx(
+ fe_msg->lockds_req->session_id);
+ __dbg("Got LOCKDS_REQ (%sLOCK) for DS:%s for session-id %" PRIu64
+ " from '%s'",
+ fe_msg->lockds_req->lock ? "" : "UN",
+ mgmt_ds_id2name(fe_msg->lockds_req->ds_id),
+ fe_msg->lockds_req->session_id, adapter->name);
+ mgmt_fe_session_handle_lockds_req_msg(
+ session, fe_msg->lockds_req);
+ break;
+ case MGMTD__FE_MESSAGE__MESSAGE_SETCFG_REQ:
+ session = mgmt_session_id2ctx(
+ fe_msg->setcfg_req->session_id);
+ session->adapter->setcfg_stats.set_cfg_count++;
+ __dbg("Got SETCFG_REQ (%d Xpaths, Implicit:%c) on DS:%s for session-id %" PRIu64
+ " from '%s'",
+ (int)fe_msg->setcfg_req->n_data,
+ fe_msg->setcfg_req->implicit_commit ? 'T' : 'F',
+ mgmt_ds_id2name(fe_msg->setcfg_req->ds_id),
+ fe_msg->setcfg_req->session_id, adapter->name);
+
+ mgmt_fe_session_handle_setcfg_req_msg(
+ session, fe_msg->setcfg_req);
+ break;
+ case MGMTD__FE_MESSAGE__MESSAGE_COMMCFG_REQ:
+ session = mgmt_session_id2ctx(
+ fe_msg->commcfg_req->session_id);
+ __dbg("Got COMMCFG_REQ for src-DS:%s dst-DS:%s (Abort:%c) on session-id %" PRIu64
+ " from '%s'",
+ mgmt_ds_id2name(fe_msg->commcfg_req->src_ds_id),
+ mgmt_ds_id2name(fe_msg->commcfg_req->dst_ds_id),
+ fe_msg->commcfg_req->abort ? 'T' : 'F',
+ fe_msg->commcfg_req->session_id, adapter->name);
+ mgmt_fe_session_handle_commit_config_req_msg(
+ session, fe_msg->commcfg_req);
+ break;
+ case MGMTD__FE_MESSAGE__MESSAGE_GET_REQ:
+ session = mgmt_session_id2ctx(fe_msg->get_req->session_id);
+ __dbg("Got GET_REQ for DS:%s (xpaths: %d) on session-id %" PRIu64
+ " from '%s'",
+ mgmt_ds_id2name(fe_msg->get_req->ds_id),
+ (int)fe_msg->get_req->n_data, fe_msg->get_req->session_id,
+ adapter->name);
+ mgmt_fe_session_handle_get_req_msg(session, fe_msg->get_req);
+ break;
+ case MGMTD__FE_MESSAGE__MESSAGE_NOTIFY_DATA_REQ:
+ case MGMTD__FE_MESSAGE__MESSAGE_REGNOTIFY_REQ:
+ __log_err("Got unhandled message of type %u from '%s'",
+ fe_msg->message_case, adapter->name);
+ /*
+ * TODO: Add handling code in future.
+ */
+ break;
+ /*
+ * NOTE: The following messages are always sent from MGMTD to
+ * Frontend clients only and/or need not be handled on MGMTd.
+ */
+ case MGMTD__FE_MESSAGE__MESSAGE_SESSION_REPLY:
+ case MGMTD__FE_MESSAGE__MESSAGE_LOCKDS_REPLY:
+ case MGMTD__FE_MESSAGE__MESSAGE_SETCFG_REPLY:
+ case MGMTD__FE_MESSAGE__MESSAGE_COMMCFG_REPLY:
+ case MGMTD__FE_MESSAGE__MESSAGE_GET_REPLY:
+ case MGMTD__FE_MESSAGE__MESSAGE__NOT_SET:
+ default:
+ /*
+ * A 'default' case is being added contrary to the
+ * FRR code guidelines to take care of build
+ * failures on certain build systems (courtesy of
+ * the proto-c package).
+ */
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * Send result of get-tree request back to the FE client.
+ *
+ * Args:
+ * session: the session.
+ * req_id: the request ID.
+ * short_circuit_ok: if allowed to short circuit the message.
+ * result_format: LYD_FORMAT for the sent output.
+ * tree: the tree to send, can be NULL which will send an empty tree.
+ * partial_error: if an error occurred during gathering results.
+ *
+ * Return:
+ * Any error that occurs -- the message is likely not sent if non-zero.
+ */
+static int fe_adapter_send_tree_data(struct mgmt_fe_session_ctx *session,
+ uint64_t req_id, bool short_circuit_ok,
+ uint8_t result_type, uint32_t wd_options,
+ const struct lyd_node *tree,
+ int partial_error)
+
+{
+ struct mgmt_msg_tree_data *msg;
+ uint8_t **darrp = NULL;
+ int ret = 0;
+
+ msg = mgmt_msg_native_alloc_msg(struct mgmt_msg_tree_data, 0,
+ MTYPE_MSG_NATIVE_TREE_DATA);
+ msg->refer_id = session->session_id;
+ msg->req_id = req_id;
+ msg->code = MGMT_MSG_CODE_TREE_DATA;
+ msg->partial_error = partial_error;
+ msg->result_type = result_type;
+
+ darrp = mgmt_msg_native_get_darrp(msg);
+ ret = yang_print_tree_append(darrp, tree, result_type,
+ (wd_options | LYD_PRINT_WITHSIBLINGS));
+ if (ret != LY_SUCCESS) {
+ __log_err("Error building get-tree result for client %s session-id %" PRIu64
+ " req-id %" PRIu64 " scok %d result type %u",
+ session->adapter->name, session->session_id, req_id,
+ short_circuit_ok, result_type);
+ goto done;
+ }
+
+ __dbg("Sending get-tree result from adapter %s to session-id %" PRIu64
+ " req-id %" PRIu64 " scok %d result type %u len %u",
+ session->adapter->name, session->session_id, req_id,
+ short_circuit_ok, result_type, mgmt_msg_native_get_msg_len(msg));
+
+ ret = fe_adapter_send_native_msg(session->adapter, msg,
+ mgmt_msg_native_get_msg_len(msg),
+ short_circuit_ok);
+done:
+ mgmt_msg_native_free_msg(msg);
+
+ return ret;
+}
+
+static int fe_adapter_send_rpc_reply(struct mgmt_fe_session_ctx *session,
+ uint64_t req_id, uint8_t result_type,
+ const struct lyd_node *result)
+{
+ struct mgmt_msg_rpc_reply *msg;
+ uint8_t **darrp = NULL;
+ int ret;
+
+ msg = mgmt_msg_native_alloc_msg(struct mgmt_msg_rpc_reply, 0,
+ MTYPE_MSG_NATIVE_RPC_REPLY);
+ msg->refer_id = session->session_id;
+ msg->req_id = req_id;
+ msg->code = MGMT_MSG_CODE_RPC_REPLY;
+ msg->result_type = result_type;
+
+ if (result) {
+ darrp = mgmt_msg_native_get_darrp(msg);
+ ret = yang_print_tree_append(darrp, result, result_type, 0);
+ if (ret != LY_SUCCESS) {
+ __log_err("Error building rpc-reply result for client %s session-id %" PRIu64
+ " req-id %" PRIu64 " result type %u",
+ session->adapter->name, session->session_id,
+ req_id, result_type);
+ goto done;
+ }
+ }
+
+ __dbg("Sending rpc-reply from adapter %s to session-id %" PRIu64
+ " req-id %" PRIu64 " len %u",
+ session->adapter->name, session->session_id, req_id,
+ mgmt_msg_native_get_msg_len(msg));
+
+ ret = fe_adapter_send_native_msg(session->adapter, msg,
+ mgmt_msg_native_get_msg_len(msg),
+ false);
+done:
+ mgmt_msg_native_free_msg(msg);
+
+ return ret;
+}
+
+static int fe_adapter_send_edit_reply(struct mgmt_fe_session_ctx *session,
+ uint64_t req_id, const char *xpath)
+{
+ struct mgmt_msg_edit_reply *msg;
+ int ret;
+
+ msg = mgmt_msg_native_alloc_msg(struct mgmt_msg_edit_reply, 0,
+ MTYPE_MSG_NATIVE_EDIT_REPLY);
+ msg->refer_id = session->session_id;
+ msg->req_id = req_id;
+ msg->code = MGMT_MSG_CODE_EDIT_REPLY;
+
+ mgmt_msg_native_xpath_encode(msg, xpath);
+
+ __dbg("Sending edit-reply from adapter %s to session-id %" PRIu64
+ " req-id %" PRIu64 " len %u",
+ session->adapter->name, session->session_id, req_id,
+ mgmt_msg_native_get_msg_len(msg));
+
+ ret = fe_adapter_send_native_msg(session->adapter, msg,
+ mgmt_msg_native_get_msg_len(msg),
+ false);
+ mgmt_msg_native_free_msg(msg);
+
+ return ret;
+}
+
+/**
+ * fe_adapter_handle_get_data() - Handle a get-tree message from a FE client.
+ * @session: the client session.
+ * @msg_raw: the message data.
+ * @msg_len: the length of the message data.
+ */
+static void fe_adapter_handle_get_data(struct mgmt_fe_session_ctx *session,
+ void *__msg, size_t msg_len)
+{
+ struct mgmt_msg_get_data *msg = __msg;
+ struct lysc_node **snodes = NULL;
+ char *xpath_resolved = NULL;
+ uint64_t req_id = msg->req_id;
+ Mgmtd__DatastoreId ds_id;
+ uint64_t clients;
+ uint32_t wd_options;
+ bool simple_xpath;
+ LY_ERR err;
+ int ret;
+
+ __dbg("Received get-data request from client %s for session-id %" PRIu64
+ " req-id %" PRIu64,
+ session->adapter->name, session->session_id, msg->req_id);
+
+ if (!MGMT_MSG_VALIDATE_NUL_TERM(msg, msg_len)) {
+ fe_adapter_send_error(session, req_id, false, -EINVAL,
+ "Invalid message rcvd from session-id: %" PRIu64,
+ session->session_id);
+ goto done;
+ }
+
+ if (session->txn_id != MGMTD_TXN_ID_NONE) {
+ fe_adapter_send_error(session, req_id, false, -EINPROGRESS,
+ "Transaction in progress txn-id: %" PRIu64
+ " for session-id: %" PRIu64,
+ session->txn_id, session->session_id);
+ goto done;
+ }
+
+ switch (msg->defaults) {
+ case GET_DATA_DEFAULTS_EXPLICIT:
+ wd_options = LYD_PRINT_WD_EXPLICIT;
+ break;
+ case GET_DATA_DEFAULTS_TRIM:
+ wd_options = LYD_PRINT_WD_TRIM;
+ break;
+ case GET_DATA_DEFAULTS_ALL:
+ wd_options = LYD_PRINT_WD_ALL;
+ break;
+ case GET_DATA_DEFAULTS_ALL_ADD_TAG:
+ wd_options = LYD_PRINT_WD_IMPL_TAG;
+ break;
+ default:
+ fe_adapter_send_error(session, req_id, false, -EINVAL,
+ "Invalid defaults value %u for session-id: %" PRIu64,
+ msg->defaults, session->session_id);
+ goto done;
+ }
+
+ switch (msg->datastore) {
+ case MGMT_MSG_DATASTORE_CANDIDATE:
+ ds_id = MGMTD_DS_CANDIDATE;
+ break;
+ case MGMT_MSG_DATASTORE_RUNNING:
+ ds_id = MGMTD_DS_RUNNING;
+ break;
+ case MGMT_MSG_DATASTORE_OPERATIONAL:
+ ds_id = MGMTD_DS_OPERATIONAL;
+ break;
+ default:
+ fe_adapter_send_error(session, req_id, false, -EINVAL,
+ "Unsupported datastore %" PRIu8
+ " requested from session-id: %" PRIu64,
+ msg->datastore, session->session_id);
+ goto done;
+ }
+
+ err = yang_resolve_snode_xpath(ly_native_ctx, msg->xpath, &snodes,
+ &simple_xpath);
+ if (err) {
+ fe_adapter_send_error(session, req_id, false, -EINPROGRESS,
+ "XPath doesn't resolve for session-id: %" PRIu64,
+ session->session_id);
+ goto done;
+ }
+ darr_free(snodes);
+
+ clients = mgmt_be_interested_clients(msg->xpath,
+ MGMT_BE_XPATH_SUBSCR_TYPE_OPER);
+ if (!clients && !CHECK_FLAG(msg->flags, GET_DATA_FLAG_CONFIG)) {
+ __dbg("No backends provide xpath: %s for txn-id: %" PRIu64
+ " session-id: %" PRIu64,
+ msg->xpath, session->txn_id, session->session_id);
+
+ fe_adapter_send_tree_data(session, req_id, false,
+ msg->result_type, wd_options, NULL, 0);
+ goto done;
+ }
+
+ /* Start a SHOW Transaction */
+ session->txn_id = mgmt_create_txn(session->session_id,
+ MGMTD_TXN_TYPE_SHOW);
+ if (session->txn_id == MGMTD_SESSION_ID_NONE) {
+ fe_adapter_send_error(session, req_id, false, -EINPROGRESS,
+ "failed to create a 'show' txn");
+ goto done;
+ }
+
+ __dbg("Created new show txn-id: %" PRIu64 " for session-id: %" PRIu64,
+ session->txn_id, session->session_id);
+
+ /* Create a GET-TREE request under the transaction */
+ ret = mgmt_txn_send_get_tree_oper(session->txn_id, req_id, clients,
+ ds_id, msg->result_type, msg->flags,
+ wd_options, simple_xpath, msg->xpath);
+ if (ret) {
+ /* destroy the just created txn */
+ mgmt_destroy_txn(&session->txn_id);
+ fe_adapter_send_error(session, req_id, false, -EINPROGRESS,
+ "failed to create a 'show' txn");
+ }
+done:
+ darr_free(snodes);
+ darr_free(xpath_resolved);
+}
+
+static void fe_adapter_handle_edit(struct mgmt_fe_session_ctx *session,
+ void *__msg, size_t msg_len)
+{
+ struct mgmt_msg_edit *msg = __msg;
+ Mgmtd__DatastoreId ds_id, rds_id;
+ struct mgmt_ds_ctx *ds_ctx, *rds_ctx;
+ const char *xpath, *data;
+ bool lock, commit;
+ int ret;
+
+ if (msg->datastore != MGMT_MSG_DATASTORE_CANDIDATE) {
+ fe_adapter_send_error(session, msg->req_id, false, -EINVAL,
+ "Unsupported datastore");
+ return;
+ }
+
+ xpath = mgmt_msg_native_xpath_data_decode(msg, msg_len, data);
+ if (!xpath) {
+ fe_adapter_send_error(session, msg->req_id, false, -EINVAL,
+ "Invalid message");
+ return;
+ }
+
+ ds_id = MGMTD_DS_CANDIDATE;
+ ds_ctx = mgmt_ds_get_ctx_by_id(mm, ds_id);
+ assert(ds_ctx);
+
+ rds_id = MGMTD_DS_RUNNING;
+ rds_ctx = mgmt_ds_get_ctx_by_id(mm, rds_id);
+ assert(rds_ctx);
+
+ lock = CHECK_FLAG(msg->flags, EDIT_FLAG_IMPLICIT_LOCK);
+ commit = CHECK_FLAG(msg->flags, EDIT_FLAG_IMPLICIT_COMMIT);
+
+ if (lock) {
+ if (mgmt_fe_session_write_lock_ds(ds_id, ds_ctx, session)) {
+ fe_adapter_send_error(session, msg->req_id, false,
+ -EBUSY,
+ "Candidate DS is locked by another session");
+ return;
+ }
+
+ if (commit) {
+ if (mgmt_fe_session_write_lock_ds(rds_id, rds_ctx,
+ session)) {
+ mgmt_fe_session_unlock_ds(ds_id, ds_ctx,
+ session);
+ fe_adapter_send_error(
+ session, msg->req_id, false, -EBUSY,
+ "Running DS is locked by another session");
+ return;
+ }
+ }
+ } else {
+ if (!session->ds_locked[ds_id]) {
+ fe_adapter_send_error(session, msg->req_id, false,
+ -EBUSY,
+ "Candidate DS is not locked");
+ return;
+ }
+
+ if (commit) {
+ if (!session->ds_locked[rds_id]) {
+ fe_adapter_send_error(session, msg->req_id,
+ false, -EBUSY,
+ "Running DS is not locked");
+ return;
+ }
+ }
+ }
+
+ session->cfg_txn_id = mgmt_create_txn(session->session_id,
+ MGMTD_TXN_TYPE_CONFIG);
+ if (session->cfg_txn_id == MGMTD_SESSION_ID_NONE) {
+ if (lock) {
+ mgmt_fe_session_unlock_ds(ds_id, ds_ctx, session);
+ if (commit)
+ mgmt_fe_session_unlock_ds(rds_id, rds_ctx,
+ session);
+ }
+ fe_adapter_send_error(session, msg->req_id, false, -EBUSY,
+ "Failed to create a configuration transaction");
+ return;
+ }
+
+ __dbg("Created new config txn-id: %" PRIu64 " for session-id: %" PRIu64,
+ session->cfg_txn_id, session->session_id);
+
+ ret = mgmt_txn_send_edit(session->cfg_txn_id, msg->req_id, ds_id,
+ ds_ctx, rds_id, rds_ctx, lock, commit,
+ msg->request_type, msg->flags, msg->operation,
+ xpath, data);
+ if (ret) {
+ /* destroy the just created txn */
+ mgmt_destroy_txn(&session->cfg_txn_id);
+ if (lock) {
+ mgmt_fe_session_unlock_ds(ds_id, ds_ctx, session);
+ if (commit)
+ mgmt_fe_session_unlock_ds(rds_id, rds_ctx,
+ session);
+ }
+ fe_adapter_send_error(session, msg->req_id, false, -EBUSY,
+ "Failed to create a configuration transaction");
+ }
+}
+
+/**
+ * fe_adapter_handle_rpc() - Handle an RPC message from an FE client.
+ * @session: the client session.
+ * @msg_raw: the message data.
+ * @msg_len: the length of the message data.
+ */
+static void fe_adapter_handle_rpc(struct mgmt_fe_session_ctx *session,
+ void *__msg, size_t msg_len)
+{
+ struct mgmt_msg_rpc *msg = __msg;
+ const struct lysc_node *snode;
+ const char *xpath, *data;
+ uint64_t req_id = msg->req_id;
+ uint64_t clients;
+ int ret;
+
+ __dbg("Received RPC request from client %s for session-id %" PRIu64
+ " req-id %" PRIu64,
+ session->adapter->name, session->session_id, msg->req_id);
+
+ xpath = mgmt_msg_native_xpath_data_decode(msg, msg_len, data);
+ if (!xpath) {
+ fe_adapter_send_error(session, req_id, false, -EINVAL,
+ "Invalid message");
+ return;
+ }
+
+ if (session->txn_id != MGMTD_TXN_ID_NONE) {
+ fe_adapter_send_error(session, req_id, false, -EINPROGRESS,
+ "Transaction in progress txn-id: %" PRIu64
+ " for session-id: %" PRIu64,
+ session->txn_id, session->session_id);
+ return;
+ }
+
+ snode = lys_find_path(ly_native_ctx, NULL, xpath, 0);
+ if (!snode) {
+ fe_adapter_send_error(session, req_id, false, -ENOENT,
+ "No such path: %s", xpath);
+ return;
+ }
+
+ if (snode->nodetype != LYS_RPC && snode->nodetype != LYS_ACTION) {
+ fe_adapter_send_error(session, req_id, false, -EINVAL,
+ "Not an RPC or action path: %s", xpath);
+ return;
+ }
+
+ clients = mgmt_be_interested_clients(xpath,
+ MGMT_BE_XPATH_SUBSCR_TYPE_RPC);
+ if (!clients) {
+ __dbg("No backends implement xpath: %s for txn-id: %" PRIu64
+ " session-id: %" PRIu64,
+ xpath, session->txn_id, session->session_id);
+
+ fe_adapter_send_error(session, req_id, false, -ENOENT,
+ "No backends implement xpath: %s", xpath);
+ return;
+ }
+
+ /* Start a RPC Transaction */
+ session->txn_id = mgmt_create_txn(session->session_id,
+ MGMTD_TXN_TYPE_RPC);
+ if (session->txn_id == MGMTD_SESSION_ID_NONE) {
+ fe_adapter_send_error(session, req_id, false, -EINPROGRESS,
+ "Failed to create an RPC transaction");
+ return;
+ }
+
+ __dbg("Created new rpc txn-id: %" PRIu64 " for session-id: %" PRIu64,
+ session->txn_id, session->session_id);
+
+ /* Create an RPC request under the transaction */
+ ret = mgmt_txn_send_rpc(session->txn_id, req_id, clients,
+ msg->request_type, xpath, data,
+ mgmt_msg_native_data_len_decode(msg, msg_len));
+ if (ret) {
+ /* destroy the just created txn */
+ mgmt_destroy_txn(&session->txn_id);
+ fe_adapter_send_error(session, req_id, false, -EINPROGRESS,
+ "Failed to create an RPC transaction");
+ }
+}
+
+/**
+ * Handle a native encoded message from the FE client.
+ */
+static void fe_adapter_handle_native_msg(struct mgmt_fe_client_adapter *adapter,
+ struct mgmt_msg_header *msg,
+ size_t msg_len)
+{
+ struct mgmt_fe_session_ctx *session;
+
+ session = mgmt_session_id2ctx(msg->refer_id);
+ if (!session) {
+ __log_err("adapter %s: recv msg unknown session-id %" PRIu64,
+ adapter->name, msg->refer_id);
+ return;
+ }
+ assert(session->adapter == adapter);
+
+ switch (msg->code) {
+ case MGMT_MSG_CODE_GET_DATA:
+ fe_adapter_handle_get_data(session, msg, msg_len);
+ break;
+ case MGMT_MSG_CODE_EDIT:
+ fe_adapter_handle_edit(session, msg, msg_len);
+ break;
+ case MGMT_MSG_CODE_RPC:
+ fe_adapter_handle_rpc(session, msg, msg_len);
+ break;
+ default:
+ __log_err("unknown native message session-id %" PRIu64
+ " req-id %" PRIu64 " code %u to FE adapter %s",
+ msg->refer_id, msg->req_id, msg->code, adapter->name);
+ break;
+ }
+}
+
+
+static void mgmt_fe_adapter_process_msg(uint8_t version, uint8_t *data,
+ size_t len, struct msg_conn *conn)
+{
+ struct mgmt_fe_client_adapter *adapter = conn->user;
+ Mgmtd__FeMessage *fe_msg;
+
+ if (version == MGMT_MSG_VERSION_NATIVE) {
+ struct mgmt_msg_header *msg = (typeof(msg))data;
+
+ if (len >= sizeof(*msg))
+ fe_adapter_handle_native_msg(adapter, msg, len);
+ else
+ __log_err("native message to adapter %s too short %zu",
+ adapter->name, len);
+ return;
+ }
+
+ fe_msg = mgmtd__fe_message__unpack(NULL, len, data);
+ if (!fe_msg) {
+ __dbg("Failed to decode %zu bytes for adapter: %s", len,
+ adapter->name);
+ return;
+ }
+ __dbg("Decoded %zu bytes of message: %u from adapter: %s", len,
+ fe_msg->message_case, adapter->name);
+ (void)mgmt_fe_adapter_handle_msg(adapter, fe_msg);
+ mgmtd__fe_message__free_unpacked(fe_msg, NULL);
+}
+
+void mgmt_fe_adapter_send_notify(struct mgmt_msg_notify_data *msg, size_t msglen)
+{
+ struct mgmt_fe_client_adapter *adapter;
+ struct mgmt_fe_session_ctx *session;
+
+ assert(msg->refer_id == 0);
+
+ FOREACH_ADAPTER_IN_LIST (adapter) {
+ FOREACH_SESSION_IN_LIST (adapter, session) {
+ msg->refer_id = session->session_id;
+ (void)fe_adapter_send_native_msg(adapter, msg, msglen,
+ false);
+ }
+ }
+ msg->refer_id = 0;
+}
+
+void mgmt_fe_adapter_lock(struct mgmt_fe_client_adapter *adapter)
+{
+ adapter->refcount++;
+}
+
+extern void mgmt_fe_adapter_unlock(struct mgmt_fe_client_adapter **adapter)
+{
+ struct mgmt_fe_client_adapter *a = *adapter;
+ assert(a && a->refcount);
+
+ if (!--a->refcount) {
+ mgmt_fe_adapters_del(&mgmt_fe_adapters, a);
+ msg_server_conn_delete(a->conn);
+ XFREE(MTYPE_MGMTD_FE_ADPATER, a);
+ }
+ *adapter = NULL;
+}
+
+/*
+ * Initialize the FE adapter module
+ */
+void mgmt_fe_adapter_init(struct event_loop *tm)
+{
+ char server_path[MAXPATHLEN];
+
+ assert(!mgmt_loop);
+ mgmt_loop = tm;
+
+ mgmt_fe_adapters_init(&mgmt_fe_adapters);
+
+ assert(!mgmt_fe_sessions);
+ mgmt_fe_sessions =
+ hash_create(mgmt_fe_session_hash_key, mgmt_fe_session_hash_cmp,
+ "MGMT Frontend Sessions");
+
+ snprintf(server_path, sizeof(server_path), MGMTD_FE_SOCK_NAME);
+
+ if (msg_server_init(&mgmt_fe_server, server_path, tm,
+ mgmt_fe_create_adapter, "frontend", &mgmt_debug_fe)) {
+ zlog_err("cannot initialize frontend server");
+ exit(1);
+ }
+}
+
+static void mgmt_fe_abort_if_session(void *data)
+{
+ struct mgmt_fe_session_ctx *session = data;
+
+ __log_err("found orphaned session id %" PRIu64 " client id %" PRIu64
+ " adapter %s",
+ session->session_id, session->client_id,
+ session->adapter ? session->adapter->name : "NULL");
+ abort();
+}
+
+/*
+ * Destroy the FE adapter module
+ */
+void mgmt_fe_adapter_destroy(void)
+{
+ struct mgmt_fe_client_adapter *adapter;
+
+ msg_server_cleanup(&mgmt_fe_server);
+
+ /* Deleting the adapters will delete all the sessions */
+ FOREACH_ADAPTER_IN_LIST (adapter)
+ mgmt_fe_adapter_delete(adapter);
+
+ hash_clean_and_free(&mgmt_fe_sessions, mgmt_fe_abort_if_session);
+}
+
+/*
+ * The server accepted a new connection
+ */
+struct msg_conn *mgmt_fe_create_adapter(int conn_fd, union sockunion *from)
+{
+ struct mgmt_fe_client_adapter *adapter = NULL;
+
+ adapter = mgmt_fe_find_adapter_by_fd(conn_fd);
+ if (!adapter) {
+ adapter = XCALLOC(MTYPE_MGMTD_FE_ADPATER,
+ sizeof(struct mgmt_fe_client_adapter));
+ snprintf(adapter->name, sizeof(adapter->name), "Unknown-FD-%d",
+ conn_fd);
+
+ mgmt_fe_sessions_init(&adapter->fe_sessions);
+ mgmt_fe_adapter_lock(adapter);
+ mgmt_fe_adapters_add_tail(&mgmt_fe_adapters, adapter);
+
+ adapter->conn = msg_server_conn_create(
+ mgmt_loop, conn_fd, mgmt_fe_adapter_notify_disconnect,
+ mgmt_fe_adapter_process_msg, MGMTD_FE_MAX_NUM_MSG_PROC,
+ MGMTD_FE_MAX_NUM_MSG_WRITE, MGMTD_FE_MAX_MSG_LEN,
+ adapter, "FE-adapter");
+
+ adapter->conn->debug = DEBUG_MODE_CHECK(&mgmt_debug_fe,
+ DEBUG_MODE_ALL);
+
+ adapter->setcfg_stats.min_tm = ULONG_MAX;
+ adapter->cmt_stats.min_tm = ULONG_MAX;
+ __dbg("Added new MGMTD Frontend adapter '%s'", adapter->name);
+ }
+ return adapter->conn;
+}
+
+int mgmt_fe_send_set_cfg_reply(uint64_t session_id, uint64_t txn_id,
+ Mgmtd__DatastoreId ds_id, uint64_t req_id,
+ enum mgmt_result result,
+ const char *error_if_any,
+ bool implicit_commit)
+{
+ struct mgmt_fe_session_ctx *session;
+
+ session = mgmt_session_id2ctx(session_id);
+ if (!session || session->cfg_txn_id != txn_id) {
+ if (session)
+ __log_err("txn-id doesn't match, session txn-id is %" PRIu64
+ " current txnid: %" PRIu64,
+ session->cfg_txn_id, txn_id);
+ return -1;
+ }
+
+ return fe_adapter_send_set_cfg_reply(session, ds_id, req_id,
+ result == MGMTD_SUCCESS,
+ error_if_any, implicit_commit);
+}
+
+int mgmt_fe_send_commit_cfg_reply(uint64_t session_id, uint64_t txn_id,
+ Mgmtd__DatastoreId src_ds_id,
+ Mgmtd__DatastoreId dst_ds_id,
+ uint64_t req_id, bool validate_only,
+ enum mgmt_result result,
+ const char *error_if_any)
+{
+ struct mgmt_fe_session_ctx *session;
+
+ session = mgmt_session_id2ctx(session_id);
+ if (!session || session->cfg_txn_id != txn_id)
+ return -1;
+
+ return fe_adapter_send_commit_cfg_reply(session, src_ds_id, dst_ds_id,
+ req_id, result, validate_only,
+ error_if_any);
+}
+
+int mgmt_fe_send_get_reply(uint64_t session_id, uint64_t txn_id,
+ Mgmtd__DatastoreId ds_id, uint64_t req_id,
+ enum mgmt_result result,
+ Mgmtd__YangDataReply *data_resp,
+ const char *error_if_any)
+{
+ struct mgmt_fe_session_ctx *session;
+
+ session = mgmt_session_id2ctx(session_id);
+ if (!session || session->txn_id != txn_id)
+ return -1;
+
+ return fe_adapter_send_get_reply(session, ds_id, req_id,
+ result == MGMTD_SUCCESS, data_resp,
+ error_if_any);
+}
+
+int mgmt_fe_adapter_send_tree_data(uint64_t session_id, uint64_t txn_id,
+ uint64_t req_id, LYD_FORMAT result_type,
+ uint32_t wd_options,
+ const struct lyd_node *tree,
+ int partial_error, bool short_circuit_ok)
+{
+ struct mgmt_fe_session_ctx *session;
+ int ret;
+
+ session = mgmt_session_id2ctx(session_id);
+ if (!session || session->txn_id != txn_id)
+ return -1;
+
+ ret = fe_adapter_send_tree_data(session, req_id, short_circuit_ok,
+ result_type, wd_options, tree,
+ partial_error);
+
+ mgmt_destroy_txn(&session->txn_id);
+
+ return ret;
+}
+
+int mgmt_fe_adapter_send_rpc_reply(uint64_t session_id, uint64_t txn_id,
+ uint64_t req_id, LYD_FORMAT result_type,
+ const struct lyd_node *result)
+{
+ struct mgmt_fe_session_ctx *session;
+ int ret;
+
+ session = mgmt_session_id2ctx(session_id);
+ if (!session || session->txn_id != txn_id)
+ return -1;
+
+ ret = fe_adapter_send_rpc_reply(session, req_id, result_type, result);
+
+ mgmt_destroy_txn(&session->txn_id);
+
+ return ret;
+}
+
+int mgmt_fe_adapter_send_edit_reply(uint64_t session_id, uint64_t txn_id,
+ uint64_t req_id, bool unlock, bool commit,
+ const char *xpath, int16_t error,
+ const char *errstr)
+{
+ struct mgmt_fe_session_ctx *session;
+ Mgmtd__DatastoreId ds_id, rds_id;
+ struct mgmt_ds_ctx *ds_ctx, *rds_ctx;
+ int ret;
+
+ session = mgmt_session_id2ctx(session_id);
+ if (!session || session->cfg_txn_id != txn_id)
+ return -1;
+
+ if (session->cfg_txn_id != MGMTD_TXN_ID_NONE && commit)
+ mgmt_fe_session_register_event(session,
+ MGMTD_FE_SESSION_CFG_TXN_CLNUP);
+
+ if (unlock) {
+ ds_id = MGMTD_DS_CANDIDATE;
+ ds_ctx = mgmt_ds_get_ctx_by_id(mm, ds_id);
+ assert(ds_ctx);
+
+ mgmt_fe_session_unlock_ds(ds_id, ds_ctx, session);
+
+ if (commit) {
+ rds_id = MGMTD_DS_RUNNING;
+ rds_ctx = mgmt_ds_get_ctx_by_id(mm, rds_id);
+ assert(rds_ctx);
+
+ mgmt_fe_session_unlock_ds(rds_id, rds_ctx, session);
+ }
+ }
+
+ if (error)
+ ret = fe_adapter_send_error(session, req_id, false, error, "%s",
+ errstr);
+ else
+ ret = fe_adapter_send_edit_reply(session, req_id, xpath);
+
+ if (session->cfg_txn_id != MGMTD_TXN_ID_NONE && !commit)
+ mgmt_destroy_txn(&session->cfg_txn_id);
+
+ return ret;
+}
+
+/**
+ * Send an error back to the FE client and cleanup any in-progress txn.
+ */
+int mgmt_fe_adapter_txn_error(uint64_t txn_id, uint64_t req_id,
+ bool short_circuit_ok, int16_t error,
+ const char *errstr)
+{
+ struct mgmt_fe_session_ctx *session;
+ int ret;
+
+ session = fe_adapter_session_by_txn_id(txn_id);
+ if (!session) {
+ __log_err("failed sending error for txn-id %" PRIu64
+ " session not found",
+ txn_id);
+ return -ENOENT;
+ }
+
+
+ ret = fe_adapter_send_error(session, req_id, false, error, "%s", errstr);
+
+ mgmt_destroy_txn(&session->txn_id);
+
+ return ret;
+}
+
+
+struct mgmt_setcfg_stats *mgmt_fe_get_session_setcfg_stats(uint64_t session_id)
+{
+ struct mgmt_fe_session_ctx *session;
+
+ session = mgmt_session_id2ctx(session_id);
+ if (!session || !session->adapter)
+ return NULL;
+
+ return &session->adapter->setcfg_stats;
+}
+
+struct mgmt_commit_stats *
+mgmt_fe_get_session_commit_stats(uint64_t session_id)
+{
+ struct mgmt_fe_session_ctx *session;
+
+ session = mgmt_session_id2ctx(session_id);
+ if (!session || !session->adapter)
+ return NULL;
+
+ return &session->adapter->cmt_stats;
+}
+
+static void
+mgmt_fe_adapter_cmt_stats_write(struct vty *vty,
+ struct mgmt_fe_client_adapter *adapter)
+{
+ char buf[MGMT_LONG_TIME_MAX_LEN];
+
+ if (!mm->perf_stats_en)
+ return;
+
+ vty_out(vty, " Num-Commits: \t\t\t%lu\n",
+ adapter->cmt_stats.commit_cnt);
+ if (adapter->cmt_stats.commit_cnt > 0) {
+ if (mm->perf_stats_en)
+ vty_out(vty, " Max-Commit-Duration: \t\t%lu uSecs\n",
+ adapter->cmt_stats.max_tm);
+ vty_out(vty, " Max-Commit-Batch-Size: \t\t%lu\n",
+ adapter->cmt_stats.max_batch_cnt);
+ if (mm->perf_stats_en)
+ vty_out(vty, " Min-Commit-Duration: \t\t%lu uSecs\n",
+ adapter->cmt_stats.min_tm);
+ vty_out(vty, " Min-Commit-Batch-Size: \t\t%lu\n",
+ adapter->cmt_stats.min_batch_cnt);
+ if (mm->perf_stats_en)
+ vty_out(vty,
+ " Last-Commit-Duration: \t\t%lu uSecs\n",
+ adapter->cmt_stats.last_exec_tm);
+ vty_out(vty, " Last-Commit-Batch-Size: \t\t%lu\n",
+ adapter->cmt_stats.last_batch_cnt);
+ vty_out(vty, " Last-Commit-CfgData-Reqs: \t\t%lu\n",
+ adapter->cmt_stats.last_num_cfgdata_reqs);
+ vty_out(vty, " Last-Commit-CfgApply-Reqs: \t\t%lu\n",
+ adapter->cmt_stats.last_num_apply_reqs);
+ if (mm->perf_stats_en) {
+ vty_out(vty, " Last-Commit-Details:\n");
+ vty_out(vty, " Commit Start: \t\t\t%s\n",
+ mgmt_realtime_to_string(
+ &adapter->cmt_stats.last_start, buf,
+ sizeof(buf)));
+#ifdef MGMTD_LOCAL_VALIDATIONS_ENABLED
+ vty_out(vty, " Config-Validate Start: \t\t%s\n",
+ mgmt_realtime_to_string(
+ &adapter->cmt_stats.validate_start, buf,
+ sizeof(buf)));
+#endif
+ vty_out(vty, " Prep-Config Start: \t\t%s\n",
+ mgmt_realtime_to_string(
+ &adapter->cmt_stats.prep_cfg_start, buf,
+ sizeof(buf)));
+ vty_out(vty, " Txn-Create Start: \t\t%s\n",
+ mgmt_realtime_to_string(
+ &adapter->cmt_stats.txn_create_start,
+ buf, sizeof(buf)));
+ vty_out(vty, " Apply-Config Start: \t\t%s\n",
+ mgmt_realtime_to_string(
+ &adapter->cmt_stats.apply_cfg_start,
+ buf, sizeof(buf)));
+ vty_out(vty, " Apply-Config End: \t\t%s\n",
+ mgmt_realtime_to_string(
+ &adapter->cmt_stats.apply_cfg_end, buf,
+ sizeof(buf)));
+ vty_out(vty, " Txn-Delete Start: \t\t%s\n",
+ mgmt_realtime_to_string(
+ &adapter->cmt_stats.txn_del_start, buf,
+ sizeof(buf)));
+ vty_out(vty, " Commit End: \t\t\t%s\n",
+ mgmt_realtime_to_string(
+ &adapter->cmt_stats.last_end, buf,
+ sizeof(buf)));
+ }
+ }
+}
+
+static void
+mgmt_fe_adapter_setcfg_stats_write(struct vty *vty,
+ struct mgmt_fe_client_adapter *adapter)
+{
+ char buf[MGMT_LONG_TIME_MAX_LEN];
+
+ if (!mm->perf_stats_en)
+ return;
+
+ vty_out(vty, " Num-Set-Cfg: \t\t\t%lu\n",
+ adapter->setcfg_stats.set_cfg_count);
+ if (mm->perf_stats_en && adapter->setcfg_stats.set_cfg_count > 0) {
+ vty_out(vty, " Max-Set-Cfg-Duration: \t\t%lu uSec\n",
+ adapter->setcfg_stats.max_tm);
+ vty_out(vty, " Min-Set-Cfg-Duration: \t\t%lu uSec\n",
+ adapter->setcfg_stats.min_tm);
+ vty_out(vty, " Avg-Set-Cfg-Duration: \t\t%lu uSec\n",
+ adapter->setcfg_stats.avg_tm);
+ vty_out(vty, " Last-Set-Cfg-Details:\n");
+ vty_out(vty, " Set-Cfg Start: \t\t\t%s\n",
+ mgmt_realtime_to_string(
+ &adapter->setcfg_stats.last_start, buf,
+ sizeof(buf)));
+ vty_out(vty, " Set-Cfg End: \t\t\t%s\n",
+ mgmt_realtime_to_string(&adapter->setcfg_stats.last_end,
+ buf, sizeof(buf)));
+ }
+}
+
+void mgmt_fe_adapter_status_write(struct vty *vty, bool detail)
+{
+ struct mgmt_fe_client_adapter *adapter;
+ struct mgmt_fe_session_ctx *session;
+ Mgmtd__DatastoreId ds_id;
+ bool locked = false;
+
+ vty_out(vty, "MGMTD Frontend Adpaters\n");
+
+ FOREACH_ADAPTER_IN_LIST (adapter) {
+ vty_out(vty, " Client: \t\t\t\t%s\n", adapter->name);
+ vty_out(vty, " Conn-FD: \t\t\t\t%d\n", adapter->conn->fd);
+ if (detail) {
+ mgmt_fe_adapter_setcfg_stats_write(vty, adapter);
+ mgmt_fe_adapter_cmt_stats_write(vty, adapter);
+ }
+ vty_out(vty, " Sessions\n");
+ FOREACH_SESSION_IN_LIST (adapter, session) {
+ vty_out(vty, " Session: \t\t\t\t%p\n", session);
+ vty_out(vty, " Client-Id: \t\t\t%" PRIu64 "\n",
+ session->client_id);
+ vty_out(vty, " Session-Id: \t\t\t%" PRIu64 "\n",
+ session->session_id);
+ vty_out(vty, " DS-Locks:\n");
+ FOREACH_MGMTD_DS_ID (ds_id) {
+ if (session->ds_locked[ds_id]) {
+ locked = true;
+ vty_out(vty, " %s\n",
+ mgmt_ds_id2name(ds_id));
+ }
+ }
+ if (!locked)
+ vty_out(vty, " None\n");
+ }
+ vty_out(vty, " Total-Sessions: \t\t\t%d\n",
+ (int)mgmt_fe_sessions_count(&adapter->fe_sessions));
+ vty_out(vty, " Msg-Recvd: \t\t\t\t%" PRIu64 "\n",
+ adapter->conn->mstate.nrxm);
+ vty_out(vty, " Bytes-Recvd: \t\t\t%" PRIu64 "\n",
+ adapter->conn->mstate.nrxb);
+ vty_out(vty, " Msg-Sent: \t\t\t\t%" PRIu64 "\n",
+ adapter->conn->mstate.ntxm);
+ vty_out(vty, " Bytes-Sent: \t\t\t%" PRIu64 "\n",
+ adapter->conn->mstate.ntxb);
+ }
+ vty_out(vty, " Total: %d\n",
+ (int)mgmt_fe_adapters_count(&mgmt_fe_adapters));
+}
+
+void mgmt_fe_adapter_perf_measurement(struct vty *vty, bool config)
+{
+ mm->perf_stats_en = config;
+}
+
+void mgmt_fe_adapter_reset_perf_stats(struct vty *vty)
+{
+ struct mgmt_fe_client_adapter *adapter;
+ struct mgmt_fe_session_ctx *session;
+
+ FOREACH_ADAPTER_IN_LIST (adapter) {
+ memset(&adapter->setcfg_stats, 0,
+ sizeof(adapter->setcfg_stats));
+ FOREACH_SESSION_IN_LIST (adapter, session) {
+ memset(&adapter->cmt_stats, 0,
+ sizeof(adapter->cmt_stats));
+ }
+ }
+}
diff --git a/mgmtd/mgmt_fe_adapter.h b/mgmtd/mgmt_fe_adapter.h
new file mode 100644
index 00000000..61d6cfae
--- /dev/null
+++ b/mgmtd/mgmt_fe_adapter.h
@@ -0,0 +1,241 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MGMTD Frontend Client Connection Adapter
+ *
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar <spushpasis@vmware.com>
+ * Copyright (c) 2023, LabN Consulting, L.L.C.
+ */
+
+#ifndef _FRR_MGMTD_FE_ADAPTER_H_
+#define _FRR_MGMTD_FE_ADAPTER_H_
+
+#include "mgmt_fe_client.h"
+#include "mgmt_msg.h"
+#include "mgmt_defines.h"
+
+struct mgmt_fe_client_adapter;
+struct mgmt_master;
+
+struct mgmt_commit_stats {
+ struct timeval last_start;
+#ifdef MGMTD_LOCAL_VALIDATIONS_ENABLED
+ struct timeval validate_start;
+#endif
+ struct timeval prep_cfg_start;
+ struct timeval txn_create_start;
+ struct timeval apply_cfg_start;
+ struct timeval apply_cfg_end;
+ struct timeval txn_del_start;
+ struct timeval last_end;
+ unsigned long last_exec_tm;
+ unsigned long max_tm;
+ unsigned long min_tm;
+ unsigned long last_batch_cnt;
+ unsigned long last_num_cfgdata_reqs;
+ unsigned long last_num_apply_reqs;
+ unsigned long max_batch_cnt;
+ unsigned long min_batch_cnt;
+ unsigned long commit_cnt;
+};
+
+struct mgmt_setcfg_stats {
+ struct timeval last_start;
+ struct timeval last_end;
+ unsigned long last_exec_tm;
+ unsigned long max_tm;
+ unsigned long min_tm;
+ unsigned long avg_tm;
+ unsigned long set_cfg_count;
+};
+
+PREDECL_LIST(mgmt_fe_sessions);
+
+PREDECL_LIST(mgmt_fe_adapters);
+
+struct mgmt_fe_client_adapter {
+ struct msg_conn *conn;
+ char name[MGMTD_CLIENT_NAME_MAX_LEN];
+
+ /* List of sessions created and being maintained for this client. */
+ struct mgmt_fe_sessions_head fe_sessions;
+
+ int refcount;
+ struct mgmt_commit_stats cmt_stats;
+ struct mgmt_setcfg_stats setcfg_stats;
+
+ struct mgmt_fe_adapters_item list_linkage;
+};
+
+DECLARE_LIST(mgmt_fe_adapters, struct mgmt_fe_client_adapter, list_linkage);
+
+/* Initialise frontend adapter module */
+extern void mgmt_fe_adapter_init(struct event_loop *tm);
+
+/* Destroy frontend adapter module */
+extern void mgmt_fe_adapter_destroy(void);
+
+/* Acquire lock for frontend adapter */
+extern void mgmt_fe_adapter_lock(struct mgmt_fe_client_adapter *adapter);
+
+/* Remove lock from frontend adapter */
+extern void
+mgmt_fe_adapter_unlock(struct mgmt_fe_client_adapter **adapter);
+
+/* Create frontend adapter */
+extern struct msg_conn *mgmt_fe_create_adapter(int conn_fd,
+ union sockunion *su);
+
+/*
+ * Send set-config reply to the frontend client.
+ *
+ * session
+ * Unique session identifier.
+ *
+ * txn_id
+ * Unique transaction identifier.
+ *
+ * ds_id
+ * Datastore ID.
+ *
+ * req_id
+ * Config request ID.
+ *
+ * result
+ * Config request result (MGMT_*).
+ *
+ * error_if_any
+ * Buffer to store human-readable error message in case of error.
+ *
+ * implicit_commit
+ * TRUE if the commit is implicit, FALSE otherwise.
+ *
+ * Returns:
+ * 0 on success, -1 on failures.
+ */
+extern int mgmt_fe_send_set_cfg_reply(uint64_t session_id, uint64_t txn_id,
+ Mgmtd__DatastoreId ds_id,
+ uint64_t req_id,
+ enum mgmt_result result,
+ const char *error_if_any,
+ bool implcit_commit);
+
+/*
+ * Send commit-config reply to the frontend client.
+ */
+extern int mgmt_fe_send_commit_cfg_reply(
+ uint64_t session_id, uint64_t txn_id, Mgmtd__DatastoreId src_ds_id,
+ Mgmtd__DatastoreId dst_ds_id, uint64_t req_id, bool validate_only,
+ enum mgmt_result result, const char *error_if_any);
+
+/*
+ * Send get-config/get-data reply to the frontend client.
+ */
+extern int mgmt_fe_send_get_reply(uint64_t session_id, uint64_t txn_id,
+ Mgmtd__DatastoreId ds_id, uint64_t req_id,
+ enum mgmt_result result,
+ Mgmtd__YangDataReply *data_resp,
+ const char *error_if_any);
+
+/**
+ * Send get-tree data reply back to client.
+ *
+ * This also cleans up and frees the transaction.
+ *
+ * Args:
+ * session_id: the session.
+ * txn_id: the txn_id this data pertains to
+ * req_id: the req id for the get_tree message
+ * result_type: the format of the result data.
+ * wd_options: with-defaults options.
+ * tree: the results.
+ * partial_error: if there were errors while gather results.
+ * short_circuit_ok: True if OK to short-circuit the call.
+ *
+ * Return:
+ * the return value from the underlying send function.
+ *
+ */
+extern int
+mgmt_fe_adapter_send_tree_data(uint64_t session_id, uint64_t txn_id,
+ uint64_t req_id, LYD_FORMAT result_type,
+ uint32_t wd_options, const struct lyd_node *tree,
+ int partial_error, bool short_circuit_ok);
+
+/**
+ * Send RPC reply back to client.
+ *
+ * This also cleans up and frees the transaction.
+ *
+ * Args:
+ * session_id: the session.
+ * txn_id: the txn_id this data pertains to
+ * req_id: the req id for the rpc message
+ * result_type: the format of the result data.
+ * result: the results.
+ *
+ * Return:
+ * the return value from the underlying send function.
+ */
+extern int mgmt_fe_adapter_send_rpc_reply(uint64_t session_id, uint64_t txn_id,
+ uint64_t req_id,
+ LYD_FORMAT result_type,
+ const struct lyd_node *result);
+
+/**
+ * Send edit reply back to client. If error is not 0, a native error is sent.
+ *
+ * This also cleans up and frees the transaction.
+ *
+ * Args:
+ * session_id: the session.
+ * txn_id: the txn_id this data pertains to
+ * req_id: the req id for the edit message
+ * unlock: implicit-lock flag was set in the request
+ * commit: implicit-commit flag was set in the request
+ * xpath: the xpath of the data node that was created
+ * error: the error code, zero for successful request
+ * errstr: the error string, if error is non-zero
+ */
+extern int mgmt_fe_adapter_send_edit_reply(uint64_t session_id, uint64_t txn_id,
+ uint64_t req_id, bool unlock,
+ bool commit, const char *xpath,
+ int16_t error, const char *errstr);
+
+/**
+ * Send an error back to the FE client using native messaging.
+ *
+ * This also cleans up and frees the transaction.
+ *
+ * Args:
+ * txn_id: the txn_id this error pertains to.
+ * short_circuit_ok: True if OK to short-circuit the call.
+ * error: An integer error value.
+ * errfmt: An error format string (i.e., printfrr)
+ * ...: args for use by the `errfmt` format string.
+ *
+ * Return:
+ * the return value from the underlying send function.
+ *
+ */
+extern int mgmt_fe_adapter_txn_error(uint64_t txn_id, uint64_t req_id,
+ bool short_circuit_ok, int16_t error,
+ const char *errstr);
+
+
+/* Fetch frontend client session set-config stats */
+extern struct mgmt_setcfg_stats *
+mgmt_fe_get_session_setcfg_stats(uint64_t session_id);
+
+/* Fetch frontend client session commit stats */
+extern struct mgmt_commit_stats *
+mgmt_fe_get_session_commit_stats(uint64_t session_id);
+
+extern void mgmt_fe_adapter_status_write(struct vty *vty, bool detail);
+extern void mgmt_fe_adapter_perf_measurement(struct vty *vty, bool config);
+extern void mgmt_fe_adapter_reset_perf_stats(struct vty *vty);
+
+/* Toggle debug on or off for connected clients. */
+extern void mgmt_fe_adapter_toggle_client_debug(bool set);
+
+#endif /* _FRR_MGMTD_FE_ADAPTER_H_ */
diff --git a/mgmtd/mgmt_history.c b/mgmtd/mgmt_history.c
new file mode 100644
index 00000000..c97cb7f0
--- /dev/null
+++ b/mgmtd/mgmt_history.c
@@ -0,0 +1,386 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar <spushpasis@vmware.com>
+ * Copyright (c) 2023, LabN Consulting, L.L.C.
+ */
+
+#include <zebra.h>
+#include "md5.h"
+#include "frrevent.h"
+#include "xref.h"
+
+#include "mgmt_fe_client.h"
+#include "mgmtd/mgmt.h"
+#include "mgmtd/mgmt_ds.h"
+#include "mgmtd/mgmt_history.h"
+
+struct mgmt_cmt_info_t {
+ struct mgmt_cmt_infos_item cmts;
+
+ char cmtid_str[MGMT_SHORT_TIME_MAX_LEN];
+ char time_str[MGMT_LONG_TIME_MAX_LEN];
+ char cmt_json_file[PATH_MAX];
+};
+
+
+DECLARE_DLIST(mgmt_cmt_infos, struct mgmt_cmt_info_t, cmts);
+
+#define FOREACH_CMT_REC(mm, cmt_info) \
+ frr_each_safe (mgmt_cmt_infos, &mm->cmts, cmt_info)
+
+/*
+ * The only instance of VTY session that has triggered an ongoing
+ * config rollback operation.
+ */
+static struct vty *rollback_vty;
+
+static bool file_exists(const char *path)
+{
+ return !access(path, F_OK);
+}
+
+static void remove_file(const char *path)
+{
+ if (!file_exists(path))
+ return;
+ if (unlink(path))
+ zlog_err("Failed to remove commit history file %s: %s", path,
+ safe_strerror(errno));
+}
+
+static struct mgmt_cmt_info_t *mgmt_history_new_cmt_info(void)
+{
+ struct mgmt_cmt_info_t *new;
+ struct timespec tv;
+ struct tm tm;
+
+ new = XCALLOC(MTYPE_MGMTD_CMT_INFO, sizeof(struct mgmt_cmt_info_t));
+
+ clock_gettime(CLOCK_REALTIME, &tv);
+ localtime_r(&tv.tv_sec, &tm);
+
+ mgmt_time_to_string(&tv, true, new->time_str, sizeof(new->time_str));
+ mgmt_time_to_string(&tv, false, new->cmtid_str, sizeof(new->cmtid_str));
+ snprintf(new->cmt_json_file, sizeof(new->cmt_json_file),
+ MGMTD_COMMIT_FILE_PATH(new->cmtid_str));
+
+ return new;
+}
+
+static struct mgmt_cmt_info_t *mgmt_history_create_cmt_rec(void)
+{
+ struct mgmt_cmt_info_t *new = mgmt_history_new_cmt_info();
+ struct mgmt_cmt_info_t *cmt_info;
+ struct mgmt_cmt_info_t *last_cmt_info = NULL;
+
+ if (mgmt_cmt_infos_count(&mm->cmts) == MGMTD_MAX_COMMIT_LIST) {
+ FOREACH_CMT_REC (mm, cmt_info)
+ last_cmt_info = cmt_info;
+
+ if (last_cmt_info) {
+ remove_file(last_cmt_info->cmt_json_file);
+ mgmt_cmt_infos_del(&mm->cmts, last_cmt_info);
+ XFREE(MTYPE_MGMTD_CMT_INFO, last_cmt_info);
+ }
+ }
+
+ mgmt_cmt_infos_add_head(&mm->cmts, new);
+ return new;
+}
+
+static struct mgmt_cmt_info_t *
+mgmt_history_find_cmt_record(const char *cmtid_str)
+{
+ struct mgmt_cmt_info_t *cmt_info;
+
+ FOREACH_CMT_REC (mm, cmt_info) {
+ if (strcmp(cmt_info->cmtid_str, cmtid_str) == 0)
+ return cmt_info;
+ }
+
+ return NULL;
+}
+
+static bool mgmt_history_read_cmt_record_index(void)
+{
+ char index_path[MAXPATHLEN];
+ FILE *fp;
+ struct mgmt_cmt_info_t cmt_info;
+ struct mgmt_cmt_info_t *new;
+ int cnt = 0;
+
+ snprintf(index_path, sizeof(index_path), MGMTD_COMMIT_INDEX_FILE_PATH);
+
+ fp = fopen(index_path, "rb");
+ if (!fp) {
+ if (errno == ENOENT || errno == ENOTDIR)
+ return false;
+
+ zlog_err("Failed to open commit history %pSQq for reading: %m",
+ index_path);
+ return false;
+ }
+
+ while ((fread(&cmt_info, sizeof(cmt_info), 1, fp)) > 0) {
+ if (cnt < MGMTD_MAX_COMMIT_LIST) {
+ if (!file_exists(cmt_info.cmt_json_file)) {
+ zlog_err("Commit in index, but file %s missing",
+ cmt_info.cmt_json_file);
+ continue;
+ }
+
+ new = XCALLOC(MTYPE_MGMTD_CMT_INFO,
+ sizeof(struct mgmt_cmt_info_t));
+ memcpy(new, &cmt_info, sizeof(struct mgmt_cmt_info_t));
+ mgmt_cmt_infos_add_tail(&mm->cmts, new);
+ } else {
+ zlog_warn("More records found in commit history file %pSQq than expected",
+ index_path);
+ fclose(fp);
+ return false;
+ }
+
+ cnt++;
+ }
+
+ fclose(fp);
+ return true;
+}
+
+static bool mgmt_history_dump_cmt_record_index(void)
+{
+ char index_path[MAXPATHLEN];
+ FILE *fp;
+ int ret = 0;
+ struct mgmt_cmt_info_t *cmt_info;
+ struct mgmt_cmt_info_t cmt_info_set[10];
+ int cnt = 0;
+
+ snprintf(index_path, sizeof(index_path), MGMTD_COMMIT_INDEX_FILE_PATH);
+
+ fp = fopen(index_path, "wb");
+ if (!fp) {
+ zlog_err("Failed to open commit history %pSQq for writing: %m",
+ index_path);
+ return false;
+ }
+
+ FOREACH_CMT_REC (mm, cmt_info) {
+ memcpy(&cmt_info_set[cnt], cmt_info,
+ sizeof(struct mgmt_cmt_info_t));
+ cnt++;
+ }
+
+ if (!cnt) {
+ fclose(fp);
+ return false;
+ }
+
+ ret = fwrite(&cmt_info_set, sizeof(struct mgmt_cmt_info_t), cnt, fp);
+ fclose(fp);
+ if (ret != cnt) {
+ zlog_err("Failed to write full commit history, removing file");
+ remove_file(index_path);
+ return false;
+ }
+ return true;
+}
+
+static int mgmt_history_rollback_to_cmt(struct vty *vty,
+ struct mgmt_cmt_info_t *cmt_info,
+ bool skip_file_load)
+{
+ struct mgmt_ds_ctx *src_ds_ctx;
+ struct mgmt_ds_ctx *dst_ds_ctx;
+ int ret = 0;
+
+ if (rollback_vty) {
+ vty_out(vty, "ERROR: Rollback already in progress!\n");
+ return -1;
+ }
+
+ src_ds_ctx = mgmt_ds_get_ctx_by_id(mm, MGMTD_DS_CANDIDATE);
+ dst_ds_ctx = mgmt_ds_get_ctx_by_id(mm, MGMTD_DS_RUNNING);
+ assert(src_ds_ctx);
+ assert(dst_ds_ctx);
+
+ ret = mgmt_ds_lock(src_ds_ctx, vty->mgmt_session_id);
+ if (ret != 0) {
+ vty_out(vty,
+ "Failed to lock the DS %u for rollback Reason: %s!\n",
+ MGMTD_DS_RUNNING, strerror(ret));
+ return -1;
+ }
+
+ ret = mgmt_ds_lock(dst_ds_ctx, vty->mgmt_session_id);
+ if (ret != 0) {
+ mgmt_ds_unlock(src_ds_ctx);
+ vty_out(vty,
+ "Failed to lock the DS %u for rollback Reason: %s!\n",
+ MGMTD_DS_RUNNING, strerror(ret));
+ return -1;
+ }
+
+ if (!skip_file_load) {
+ ret = mgmt_ds_load_config_from_file(
+ src_ds_ctx, cmt_info->cmt_json_file, false);
+ if (ret != 0) {
+ vty_out(vty,
+ "Error with parsing the file with error code %d\n",
+ ret);
+ goto failed_unlock;
+ }
+ }
+
+ /* Internally trigger a commit-request. */
+ ret = mgmt_txn_rollback_trigger_cfg_apply(src_ds_ctx, dst_ds_ctx);
+ if (ret != 0) {
+ vty_out(vty,
+ "Error with creating commit apply txn with error code %d\n",
+ ret);
+ goto failed_unlock;
+ }
+
+ mgmt_history_dump_cmt_record_index();
+
+ /*
+ * TODO: Cleanup: the generic TXN code currently checks for rollback
+ * and does the unlock when it completes.
+ */
+
+ /*
+ * Block the rollback command from returning till the rollback
+ * is completed. On rollback completion mgmt_history_rollback_complete()
+ * shall be called to resume the rollback command return to VTYSH.
+ */
+ vty->mgmt_req_pending_cmd = "ROLLBACK";
+ rollback_vty = vty;
+ return 0;
+
+failed_unlock:
+ mgmt_ds_unlock(src_ds_ctx);
+ mgmt_ds_unlock(dst_ds_ctx);
+ return ret;
+}
+
+void mgmt_history_rollback_complete(bool success)
+{
+ vty_mgmt_resume_response(rollback_vty,
+ success ? CMD_SUCCESS
+ : CMD_WARNING_CONFIG_FAILED);
+ rollback_vty = NULL;
+}
+
+int mgmt_history_rollback_by_id(struct vty *vty, const char *cmtid_str)
+{
+ int ret = 0;
+ struct mgmt_cmt_info_t *cmt_info;
+
+ if (!mgmt_cmt_infos_count(&mm->cmts) ||
+ !mgmt_history_find_cmt_record(cmtid_str)) {
+ vty_out(vty, "Invalid commit Id\n");
+ return -1;
+ }
+
+ FOREACH_CMT_REC (mm, cmt_info) {
+ if (strcmp(cmt_info->cmtid_str, cmtid_str) == 0) {
+ ret = mgmt_history_rollback_to_cmt(vty, cmt_info,
+ false);
+ return ret;
+ }
+
+ remove_file(cmt_info->cmt_json_file);
+ mgmt_cmt_infos_del(&mm->cmts, cmt_info);
+ XFREE(MTYPE_MGMTD_CMT_INFO, cmt_info);
+ }
+
+ return 0;
+}
+
+int mgmt_history_rollback_n(struct vty *vty, int num_cmts)
+{
+ int ret = 0;
+ int cnt = 0;
+ struct mgmt_cmt_info_t *cmt_info;
+ size_t cmts;
+
+ if (!num_cmts)
+ num_cmts = 1;
+
+ cmts = mgmt_cmt_infos_count(&mm->cmts);
+ if ((int)cmts < num_cmts) {
+ vty_out(vty,
+ "Number of commits found (%d) less than required to rollback\n",
+ (int)cmts);
+ return -1;
+ }
+
+ if ((int)cmts == 1 || (int)cmts == num_cmts) {
+ vty_out(vty,
+ "Number of commits found (%d), Rollback of last commit is not supported\n",
+ (int)cmts);
+ return -1;
+ }
+
+ FOREACH_CMT_REC (mm, cmt_info) {
+ if (cnt == num_cmts) {
+ ret = mgmt_history_rollback_to_cmt(vty, cmt_info,
+ false);
+ return ret;
+ }
+
+ cnt++;
+ remove_file(cmt_info->cmt_json_file);
+ mgmt_cmt_infos_del(&mm->cmts, cmt_info);
+ XFREE(MTYPE_MGMTD_CMT_INFO, cmt_info);
+ }
+
+ if (!mgmt_cmt_infos_count(&mm->cmts)) {
+ mgmt_ds_reset_candidate();
+ ret = mgmt_history_rollback_to_cmt(vty, cmt_info, true);
+ }
+
+ return ret;
+}
+
+void show_mgmt_cmt_history(struct vty *vty)
+{
+ struct mgmt_cmt_info_t *cmt_info;
+ int slno = 0;
+
+ vty_out(vty, "Last 10 commit history:\n");
+ vty_out(vty, "Slot Commit-ID Commit-Record-Time\n");
+ FOREACH_CMT_REC (mm, cmt_info) {
+ vty_out(vty, "%4d %23s %s\n", slno, cmt_info->cmtid_str,
+ cmt_info->time_str);
+ slno++;
+ }
+}
+
+void mgmt_history_new_record(struct mgmt_ds_ctx *ds_ctx)
+{
+ struct mgmt_cmt_info_t *cmt_info = mgmt_history_create_cmt_rec();
+
+ mgmt_ds_dump_ds_to_file(cmt_info->cmt_json_file, ds_ctx);
+ mgmt_history_dump_cmt_record_index();
+}
+
+void mgmt_history_init(void)
+{
+ /* Create commit record for previously stored commit-apply */
+ mgmt_cmt_infos_init(&mm->cmts);
+ mgmt_history_read_cmt_record_index();
+}
+
+void mgmt_history_destroy(void)
+{
+ struct mgmt_cmt_info_t *cmt_info;
+
+ FOREACH_CMT_REC(mm, cmt_info) {
+ mgmt_cmt_infos_del(&mm->cmts, cmt_info);
+ XFREE(MTYPE_MGMTD_CMT_INFO, cmt_info);
+ }
+
+ mgmt_cmt_infos_fini(&mm->cmts);
+}
diff --git a/mgmtd/mgmt_history.h b/mgmtd/mgmt_history.h
new file mode 100644
index 00000000..5d9b6626
--- /dev/null
+++ b/mgmtd/mgmt_history.h
@@ -0,0 +1,97 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar <spushpasis@vmware.com>
+ * Copyright (c) 2023, LabN Consulting, L.L.C.
+ *
+ */
+#ifndef _FRR_MGMTD_HISTORY_H_
+#define _FRR_MGMTD_HISTORY_H_
+
+#include "vrf.h"
+
+PREDECL_DLIST(mgmt_cmt_infos);
+
+struct mgmt_ds_ctx;
+
+/*
+ * Rollback specific commit from commit history.
+ *
+ * vty
+ * VTY context.
+ *
+ * cmtid_str
+ * Specific commit id from commit history.
+ *
+ * Returns:
+ * 0 on success, -1 on failure.
+ */
+extern int mgmt_history_rollback_by_id(struct vty *vty, const char *cmtid_str);
+
+/*
+ * Rollback n commits from commit history.
+ *
+ * vty
+ * VTY context.
+ *
+ * num_cmts
+ * Number of commits to be rolled back.
+ *
+ * Returns:
+ * 0 on success, -1 on failure.
+ */
+extern int mgmt_history_rollback_n(struct vty *vty, int num_cmts);
+
+extern void mgmt_history_rollback_complete(bool success);
+
+/*
+ * Show mgmt commit history.
+ */
+extern void show_mgmt_cmt_history(struct vty *vty);
+
+extern void mgmt_history_new_record(struct mgmt_ds_ctx *ds_ctx);
+
+extern void mgmt_history_destroy(void);
+extern void mgmt_history_init(void);
+
+/*
+ * 012345678901234567890123456789
+ * 2023-12-31T12:12:12,012345678
+ * 20231231121212012345678
+ */
+#define MGMT_LONG_TIME_FMT "%Y-%m-%dT%H:%M:%S"
+#define MGMT_LONG_TIME_MAX_LEN 30
+#define MGMT_SHORT_TIME_FMT "%Y%m%d%H%M%S"
+#define MGMT_SHORT_TIME_MAX_LEN 24
+
+static inline const char *
+mgmt_time_to_string(struct timespec *tv, bool long_fmt, char *buffer, size_t sz)
+{
+ struct tm tm;
+ size_t n;
+
+ localtime_r(&tv->tv_sec, &tm);
+
+ if (long_fmt) {
+ n = strftime(buffer, sz, MGMT_LONG_TIME_FMT, &tm);
+ assert(n < sz);
+ snprintf(&buffer[n], sz - n, ",%09lu", tv->tv_nsec);
+ } else {
+ n = strftime(buffer, sz, MGMT_SHORT_TIME_FMT, &tm);
+ assert(n < sz);
+ snprintf(&buffer[n], sz - n, "%09lu", tv->tv_nsec);
+ }
+
+ return buffer;
+}
+
+static inline const char *mgmt_realtime_to_string(struct timeval *tv, char *buf,
+ size_t sz)
+{
+ struct timespec ts = {.tv_sec = tv->tv_sec,
+ .tv_nsec = tv->tv_usec * 1000};
+
+ return mgmt_time_to_string(&ts, true, buf, sz);
+}
+
+#endif /* _FRR_MGMTD_HISTORY_H_ */
diff --git a/mgmtd/mgmt_main.c b/mgmtd/mgmt_main.c
new file mode 100644
index 00000000..e181d0da
--- /dev/null
+++ b/mgmtd/mgmt_main.c
@@ -0,0 +1,286 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Main routine of mgmt.
+ *
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar
+ */
+
+#include <zebra.h>
+#include "lib/version.h"
+#include "routemap.h"
+#include "filter.h"
+#include "keychain.h"
+#include "libfrr.h"
+#include "frr_pthread.h"
+#include "mgmtd/mgmt.h"
+#include "mgmtd/mgmt_ds.h"
+#include "ripd/rip_nb.h"
+#include "ripngd/ripng_nb.h"
+#include "routing_nb.h"
+#include "affinitymap.h"
+#include "zebra/zebra_cli.h"
+
+/* mgmt options, we use GNU getopt library. */
+static const struct option longopts[] = {
+ {"skip_runas", no_argument, NULL, 'S'},
+ {"no_zebra", no_argument, NULL, 'Z'},
+ {"socket_size", required_argument, NULL, 's'},
+ {"vrfwnetns", no_argument, NULL, 'n'},
+ {0}};
+
+static void mgmt_exit(int);
+
+/* privileges */
+static zebra_capabilities_t _caps_p[] = {ZCAP_BIND, ZCAP_NET_RAW,
+ ZCAP_NET_ADMIN, ZCAP_SYS_ADMIN};
+
+struct zebra_privs_t mgmt_privs = {
+#if defined(FRR_USER) && defined(FRR_GROUP)
+ .user = FRR_USER,
+ .group = FRR_GROUP,
+#endif
+#ifdef VTY_GROUP
+ .vty_group = VTY_GROUP,
+#endif
+ .caps_p = _caps_p,
+ .cap_num_p = array_size(_caps_p),
+ .cap_num_i = 0,
+};
+
+static struct frr_daemon_info mgmtd_di;
+
+/* SIGHUP handler. */
+static void sighup(void)
+{
+ zlog_info("SIGHUP received, ignoring");
+
+ return;
+
+ /*
+ * This is turned off for the moment. There is all
+ * sorts of config turned off by mgmt_terminate
+ * that is not setup properly again in mgmt_reset.
+ * I see no easy way to do this nor do I see that
+ * this is a desirable way to reload config
+ * given the yang work.
+ */
+ /* Terminate all thread. */
+ mgmt_terminate();
+
+ /*
+ * mgmt_reset();
+ */
+ zlog_info("MGMTD restarting!");
+
+ /*
+ * Reload config file.
+ * vty_read_config(NULL, mgmtd_di.config_file, config_default);
+ */
+ /* Try to return to normal operation. */
+}
+
+/* SIGINT handler. */
+static __attribute__((__noreturn__)) void sigint(void)
+{
+ zlog_notice("Terminating on signal");
+ assert(mm->terminating == false);
+ mm->terminating = true; /* global flag that shutting down */
+
+ mgmt_terminate();
+
+ mgmt_exit(0);
+
+ exit(0);
+}
+
+/* SIGUSR1 handler. */
+static void sigusr1(void)
+{
+ zlog_rotate();
+}
+
+/*
+ * Try to free up allocations we know about so that diagnostic tools such as
+ * valgrind are able to better illuminate leaks.
+ *
+ * Zebra route removal and protocol teardown are not meant to be done here.
+ * For example, "retain_mode" may be set.
+ */
+static __attribute__((__noreturn__)) void mgmt_exit(int status)
+{
+ /* it only makes sense for this to be called on a clean exit */
+ assert(status == 0);
+
+ frr_early_fini();
+
+ /* stop pthreads (if any) */
+ frr_pthread_stop_all();
+
+ frr_fini();
+ exit(status);
+}
+
+static struct frr_signal_t mgmt_signals[] = {
+ {
+ .signal = SIGHUP,
+ .handler = &sighup,
+ },
+ {
+ .signal = SIGUSR1,
+ .handler = &sigusr1,
+ },
+ {
+ .signal = SIGINT,
+ .handler = &sigint,
+ },
+ {
+ .signal = SIGTERM,
+ .handler = &sigint,
+ },
+};
+
+#ifdef HAVE_STATICD
+extern const struct frr_yang_module_info frr_staticd_cli_info;
+#endif
+
+/*
+ * These are modules that are only needed by mgmtd and hence not included into
+ * the lib and backend daemons.
+ */
+const struct frr_yang_module_info ietf_netconf_with_defaults_info = {
+ .name = "ietf-netconf-with-defaults",
+ .ignore_cfg_cbs = true,
+ .nodes = { { .xpath = NULL } },
+};
+
+/*
+ * These are stub info structs that are used to load the modules used by backend
+ * clients into mgmtd. The modules are used by libyang in order to support
+ * parsing binary data returns from the backend.
+ */
+const struct frr_yang_module_info zebra_route_map_info = {
+ .name = "frr-zebra-route-map",
+ .ignore_cfg_cbs = true,
+ .nodes = { { .xpath = NULL } },
+};
+
+/*
+ * List of YANG modules to be loaded in the process context of
+ * MGMTd.
+ */
+static const struct frr_yang_module_info *const mgmt_yang_modules[] = {
+ &frr_filter_cli_info,
+ &frr_interface_cli_info,
+ &frr_route_map_cli_info,
+ &frr_routing_cli_info,
+ &frr_vrf_cli_info,
+ &frr_affinity_map_cli_info,
+
+ /* mgmtd-only modules */
+ &ietf_netconf_with_defaults_info,
+
+ /*
+ * YANG module info used by backend clients get added here.
+ */
+
+ &frr_zebra_cli_info,
+ &zebra_route_map_info,
+ &ietf_key_chain_cli_info,
+ &ietf_key_chain_deviation_info,
+
+#ifdef HAVE_RIPD
+ &frr_ripd_cli_info,
+#endif
+#ifdef HAVE_RIPNGD
+ &frr_ripngd_cli_info,
+#endif
+#ifdef HAVE_STATICD
+ &frr_staticd_cli_info,
+#endif
+};
+
+/* clang-format off */
+FRR_DAEMON_INFO(mgmtd, MGMTD,
+ .vty_port = MGMTD_VTY_PORT,
+ .proghelp = "FRR Management Daemon.",
+
+ .signals = mgmt_signals,
+ .n_signals = array_size(mgmt_signals),
+
+ .privs = &mgmt_privs,
+
+ .yang_modules = mgmt_yang_modules,
+ .n_yang_modules = array_size(mgmt_yang_modules),
+
+ /* avoid libfrr trying to read our config file for us */
+ .flags = FRR_MANUAL_VTY_START | FRR_NO_SPLIT_CONFIG,
+ );
+/* clang-format on */
+
+#define DEPRECATED_OPTIONS ""
+
+struct frr_daemon_info *mgmt_daemon_info = &mgmtd_di;
+
+/* Main routine of mgmt. Treatment of argument and start mgmt finite
+ * state machine is handled at here.
+ */
+int main(int argc, char **argv)
+{
+ int opt;
+ int buffer_size = MGMTD_SOCKET_BUF_SIZE;
+
+ frr_preinit(&mgmtd_di, argc, argv);
+ frr_opt_add(
+ "s:n" DEPRECATED_OPTIONS, longopts,
+ " -s, --socket_size Set MGMTD peer socket send buffer size\n"
+ " -n, --vrfwnetns Use NetNS as VRF backend\n");
+
+ /* Command line argument treatment. */
+ while (1) {
+ opt = frr_getopt(argc, argv, 0);
+
+ if (opt && opt < 128 && strchr(DEPRECATED_OPTIONS, opt)) {
+ fprintf(stderr,
+ "The -%c option no longer exists.\nPlease refer to the manual.\n",
+ opt);
+ continue;
+ }
+
+ if (opt == EOF)
+ break;
+
+ switch (opt) {
+ case 0:
+ break;
+ case 's':
+ buffer_size = atoi(optarg);
+ break;
+ case 'n':
+ vrf_configure_backend(VRF_BACKEND_NETNS);
+ break;
+ default:
+ frr_help_exit(1);
+ break;
+ }
+ }
+
+ /* MGMTD master init. */
+ mgmt_master_init(frr_init(), buffer_size);
+
+ /* VRF commands initialization. */
+ vrf_cmd_init(NULL);
+
+ /* Interface commands initialization. */
+ if_cmd_init(NULL);
+
+ /* MGMTD related initialization. */
+ mgmt_init();
+
+ frr_config_fork();
+
+ frr_run(mm->master);
+
+ /* Not reached. */
+ return 0;
+}
diff --git a/mgmtd/mgmt_memory.c b/mgmtd/mgmt_memory.c
new file mode 100644
index 00000000..72ccca06
--- /dev/null
+++ b/mgmtd/mgmt_memory.c
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * mgmt memory type definitions
+ *
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar <spushpasis@vmware.com>
+ */
+
+#include <zebra.h>
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "mgmt_memory.h"
+
+/* this file is temporary in nature; definitions should be moved to the
+ * files they're used in
+ */
+
+DEFINE_MGROUP(MGMTD, "mgmt");
+DEFINE_MTYPE(MGMTD, MGMTD, "instance");
+DEFINE_MTYPE(MGMTD, MGMTD_XPATH, "xpath regex");
+DEFINE_MTYPE(MGMTD, MGMTD_ERR, "error");
+DEFINE_MTYPE(MGMTD, MGMTD_BE_ADPATER, "backend adapter");
+DEFINE_MTYPE(MGMTD, MGMTD_FE_ADPATER, "frontend adapter");
+DEFINE_MTYPE(MGMTD, MGMTD_FE_SESSION, "frontend session");
+DEFINE_MTYPE(MGMTD, MGMTD_TXN, "txn");
+DEFINE_MTYPE(MGMTD, MGMTD_TXN_REQ, "txn request");
+DEFINE_MTYPE(MGMTD, MGMTD_TXN_SETCFG_REQ, "txn set-config requests");
+DEFINE_MTYPE(MGMTD, MGMTD_TXN_COMMCFG_REQ, "txn commit-config requests");
+DEFINE_MTYPE(MGMTD, MGMTD_TXN_GETDATA_REQ, "txn get-data requests");
+DEFINE_MTYPE(MGMTD, MGMTD_TXN_GETDATA_REPLY, "txn get-data replies");
+DEFINE_MTYPE(MGMTD, MGMTD_TXN_GETTREE_REQ, "txn get-tree requests");
+DEFINE_MTYPE(MGMTD, MGMTD_TXN_RPC_REQ, "txn rpc requests");
+DEFINE_MTYPE(MGMTD, MGMTD_TXN_CFG_BATCH, "txn config batches");
+DEFINE_MTYPE(MGMTD, MGMTD_CMT_INFO, "commit info");
diff --git a/mgmtd/mgmt_memory.h b/mgmtd/mgmt_memory.h
new file mode 100644
index 00000000..e28586ed
--- /dev/null
+++ b/mgmtd/mgmt_memory.h
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * mgmt memory type declarations
+ *
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar <spushpasis@vmware.com>
+ */
+
+#ifndef _FRR_MGMTD_MEMORY_H
+#define _FRR_MGMTD_MEMORY_H
+
+#include "memory.h"
+
+DECLARE_MGROUP(MGMTD);
+DECLARE_MTYPE(MGMTD);
+DECLARE_MTYPE(MGMTD_XPATH);
+DECLARE_MTYPE(MGMTD_ERR);
+DECLARE_MTYPE(MGMTD_BE_ADPATER);
+DECLARE_MTYPE(MGMTD_FE_ADPATER);
+DECLARE_MTYPE(MGMTD_FE_SESSION);
+DECLARE_MTYPE(MGMTD_TXN);
+DECLARE_MTYPE(MGMTD_TXN_REQ);
+DECLARE_MTYPE(MGMTD_TXN_SETCFG_REQ);
+DECLARE_MTYPE(MGMTD_TXN_COMMCFG_REQ);
+DECLARE_MTYPE(MGMTD_TXN_GETDATA_REQ);
+DECLARE_MTYPE(MGMTD_TXN_GETDATA_REPLY);
+DECLARE_MTYPE(MGMTD_TXN_GETTREE_REQ);
+DECLARE_MTYPE(MGMTD_TXN_RPC_REQ);
+DECLARE_MTYPE(MGMTD_TXN_CFG_BATCH);
+DECLARE_MTYPE(MGMTD_BE_ADAPTER_MSG_BUF);
+DECLARE_MTYPE(MGMTD_CMT_INFO);
+#endif /* _FRR_MGMTD_MEMORY_H */
diff --git a/mgmtd/mgmt_testc.c b/mgmtd/mgmt_testc.c
new file mode 100644
index 00000000..8bb07ed0
--- /dev/null
+++ b/mgmtd/mgmt_testc.c
@@ -0,0 +1,266 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * January 29 2024, Christian Hopps <chopps@labn.net>
+ *
+ * Copyright (c) 2024, LabN Consulting, L.L.C.
+ *
+ */
+
+#include <zebra.h>
+#include <lib/version.h>
+#include "darr.h"
+#include "libfrr.h"
+#include "mgmt_be_client.h"
+#include "northbound.h"
+
+/* ---------------- */
+/* Local Prototypes */
+/* ---------------- */
+
+static void async_notification(struct nb_cb_notify_args *args);
+static int rpc_callback(struct nb_cb_rpc_args *args);
+
+static void sigusr1(void);
+static void sigint(void);
+
+/* ----------- */
+/* Global Data */
+/* ----------- */
+
+/* privileges */
+static zebra_capabilities_t _caps_p[] = {};
+
+struct zebra_privs_t __privs = {
+#if defined(FRR_USER) && defined(FRR_GROUP)
+ .user = FRR_USER,
+ .group = FRR_GROUP,
+#endif
+#ifdef VTY_GROUP
+ .vty_group = VTY_GROUP,
+#endif
+ .caps_p = _caps_p,
+ .cap_num_p = array_size(_caps_p),
+ .cap_num_i = 0,
+};
+
+#define OPTION_LISTEN 2000
+#define OPTION_NOTIF_COUNT 2001
+#define OPTION_TIMEOUT 2002
+const struct option longopts[] = {
+ { "listen", no_argument, NULL, OPTION_LISTEN },
+ { "notif-count", required_argument, NULL, OPTION_NOTIF_COUNT },
+ { "timeout", required_argument, NULL, OPTION_TIMEOUT },
+ { 0 }
+};
+
+
+/* Master of threads. */
+struct event_loop *master;
+
+struct mgmt_be_client *mgmt_be_client;
+
+static struct frr_daemon_info mgmtd_testc_di;
+
+struct frr_signal_t __signals[] = {
+ {
+ .signal = SIGUSR1,
+ .handler = &sigusr1,
+ },
+ {
+ .signal = SIGINT,
+ .handler = &sigint,
+ },
+ {
+ .signal = SIGTERM,
+ .handler = &sigint,
+ },
+};
+
+#define MGMTD_TESTC_VTY_PORT 2624
+
+/* clang-format off */
+static const struct frr_yang_module_info frr_ripd_info = {
+ .name = "frr-ripd",
+ .ignore_cfg_cbs = true,
+ .nodes = {
+ {
+ .xpath = "/frr-ripd:authentication-failure",
+ .cbs.notify = async_notification,
+ },
+ {
+ .xpath = "/frr-ripd:clear-rip-route",
+ .cbs.rpc = rpc_callback,
+ },
+ {
+ .xpath = NULL,
+ }
+ }
+};
+
+static const struct frr_yang_module_info *const mgmt_yang_modules[] = {
+ &frr_ripd_info,
+};
+
+FRR_DAEMON_INFO(mgmtd_testc, MGMTD_TESTC,
+ .proghelp = "FRR Management Daemon Test Client.",
+
+ .signals = __signals,
+ .n_signals = array_size(__signals),
+
+ .privs = &__privs,
+
+ .yang_modules = mgmt_yang_modules,
+ .n_yang_modules = array_size(mgmt_yang_modules),
+
+ /* avoid libfrr trying to read our config file for us */
+ .flags = FRR_MANUAL_VTY_START,
+ );
+/* clang-format on */
+
+const char **__notif_xpaths;
+const char **__rpc_xpaths;
+
+struct mgmt_be_client_cbs __client_cbs = {};
+struct event *event_timeout;
+
+int o_notif_count = 1;
+int o_timeout;
+
+/* --------- */
+/* Functions */
+/* --------- */
+
+
+static void sigusr1(void)
+{
+ zlog_rotate();
+}
+
+static void quit(int exit_code)
+{
+ EVENT_OFF(event_timeout);
+ darr_free(__client_cbs.notif_xpaths);
+ darr_free(__client_cbs.rpc_xpaths);
+
+ frr_fini();
+
+ exit(exit_code);
+}
+
+static void sigint(void)
+{
+ zlog_notice("Terminating on signal");
+ quit(0);
+}
+
+static void timeout(struct event *event)
+{
+ zlog_notice("Timeout, exiting");
+ quit(1);
+}
+
+static void success(struct event *event)
+{
+ zlog_notice("Success, exiting");
+ quit(0);
+}
+
+static void async_notification(struct nb_cb_notify_args *args)
+{
+ zlog_notice("Received YANG notification");
+
+ printf("{\"frr-ripd:authentication-failure\": {\"interface-name\": \"%s\"}}\n",
+ yang_dnode_get_string(args->dnode, "interface-name"));
+
+ if (o_notif_count && !--o_notif_count)
+ quit(0);
+}
+
+static int rpc_callback(struct nb_cb_rpc_args *args)
+{
+ const char *vrf = NULL;
+
+ zlog_notice("Received YANG RPC");
+
+ if (yang_dnode_exists(args->input, "vrf"))
+ vrf = yang_dnode_get_string(args->input, "vrf");
+
+ printf("{\"frr-ripd:clear-rip-route\": {\"vrf\": \"%s\"}}\n", vrf);
+
+ event_cancel(&event_timeout);
+ event_add_timer(master, success, NULL, 1, NULL);
+
+ return 0;
+}
+
+int main(int argc, char **argv)
+{
+ int f_listen = 0;
+ int i;
+
+ frr_preinit(&mgmtd_testc_di, argc, argv);
+ frr_opt_add("", longopts, "");
+
+ while (1) {
+ int opt;
+
+ opt = frr_getopt(argc, argv, NULL);
+
+ if (opt == EOF)
+ break;
+
+ switch (opt) {
+ case OPTION_LISTEN:
+ f_listen = 1;
+ break;
+ case OPTION_NOTIF_COUNT:
+ o_notif_count = atoi(optarg);
+ break;
+ case OPTION_TIMEOUT:
+ o_timeout = atoi(optarg);
+ break;
+ case 0:
+ break;
+ default:
+ frr_help_exit(1);
+ }
+ }
+
+ master = frr_init();
+
+ /*
+ * Setup notification listen
+ */
+ argv += optind;
+ argc -= optind;
+ if (!argc && f_listen) {
+ fprintf(stderr,
+ "Must specify at least one notification xpath to listen to\n");
+ exit(1);
+ }
+ if (argc && f_listen) {
+ for (i = 0; i < argc; i++) {
+ zlog_notice("Listen on xpath: %s", argv[i]);
+ darr_push(__notif_xpaths, argv[i]);
+ }
+ __client_cbs.notif_xpaths = __notif_xpaths;
+ __client_cbs.nnotif_xpaths = darr_len(__notif_xpaths);
+ }
+
+ darr_push(__rpc_xpaths, "/frr-ripd:clear-rip-route");
+ __client_cbs.rpc_xpaths = __rpc_xpaths;
+ __client_cbs.nrpc_xpaths = darr_len(__rpc_xpaths);
+
+ mgmt_be_client = mgmt_be_client_create("mgmtd-testc", &__client_cbs, 0,
+ master);
+
+ frr_config_fork();
+
+ if (o_timeout)
+ event_add_timer(master, timeout, NULL, o_timeout, &event_timeout);
+
+ frr_run(master);
+
+ /* Reached. */
+ return 0;
+}
diff --git a/mgmtd/mgmt_txn.c b/mgmtd/mgmt_txn.c
new file mode 100644
index 00000000..0f0cccbb
--- /dev/null
+++ b/mgmtd/mgmt_txn.c
@@ -0,0 +1,2946 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MGMTD Transactions
+ *
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar <spushpasis@vmware.com>
+ */
+
+#include <zebra.h>
+#include "darr.h"
+#include "hash.h"
+#include "jhash.h"
+#include "libfrr.h"
+#include "mgmt_msg.h"
+#include "mgmt_msg_native.h"
+#include "mgmtd/mgmt.h"
+#include "mgmtd/mgmt_memory.h"
+#include "mgmtd/mgmt_txn.h"
+
+#define __dbg(fmt, ...) \
+ DEBUGD(&mgmt_debug_txn, "TXN: %s: " fmt, __func__, ##__VA_ARGS__)
+#define __log_err(fmt, ...) zlog_err("%s: ERROR: " fmt, __func__, ##__VA_ARGS__)
+
+#define MGMTD_TXN_LOCK(txn) mgmt_txn_lock(txn, __FILE__, __LINE__)
+#define MGMTD_TXN_UNLOCK(txn) mgmt_txn_unlock(txn, __FILE__, __LINE__)
+
+enum mgmt_txn_event {
+ MGMTD_TXN_PROC_SETCFG = 1,
+ MGMTD_TXN_PROC_COMMITCFG,
+ MGMTD_TXN_PROC_GETCFG,
+ MGMTD_TXN_PROC_GETTREE,
+ MGMTD_TXN_PROC_RPC,
+ MGMTD_TXN_COMMITCFG_TIMEOUT,
+};
+
+PREDECL_LIST(mgmt_txn_reqs);
+
+struct mgmt_set_cfg_req {
+ Mgmtd__DatastoreId ds_id;
+ struct mgmt_ds_ctx *ds_ctx;
+ struct nb_cfg_change cfg_changes[MGMTD_MAX_CFG_CHANGES_IN_BATCH];
+ uint16_t num_cfg_changes;
+ bool implicit_commit;
+ Mgmtd__DatastoreId dst_ds_id;
+ struct mgmt_ds_ctx *dst_ds_ctx;
+ struct mgmt_setcfg_stats *setcfg_stats;
+};
+
+enum mgmt_commit_phase {
+ MGMTD_COMMIT_PHASE_PREPARE_CFG = 0,
+ MGMTD_COMMIT_PHASE_TXN_CREATE,
+ MGMTD_COMMIT_PHASE_APPLY_CFG,
+ MGMTD_COMMIT_PHASE_TXN_DELETE,
+ MGMTD_COMMIT_PHASE_MAX
+};
+
+static inline const char *mgmt_commit_phase2str(enum mgmt_commit_phase cmt_phase)
+{
+ switch (cmt_phase) {
+ case MGMTD_COMMIT_PHASE_PREPARE_CFG:
+ return "PREP-CFG";
+ case MGMTD_COMMIT_PHASE_TXN_CREATE:
+ return "CREATE-TXN";
+ case MGMTD_COMMIT_PHASE_APPLY_CFG:
+ return "APPLY-CFG";
+ case MGMTD_COMMIT_PHASE_TXN_DELETE:
+ return "DELETE-TXN";
+ case MGMTD_COMMIT_PHASE_MAX:
+ return "Invalid/Unknown";
+ }
+
+ return "Invalid/Unknown";
+}
+
+PREDECL_LIST(mgmt_txn_batches);
+
+struct mgmt_txn_be_cfg_batch {
+ struct mgmt_txn_ctx *txn;
+ enum mgmt_be_client_id be_id;
+ struct mgmt_be_client_adapter *be_adapter;
+ Mgmtd__YangCfgDataReq cfg_data[MGMTD_MAX_CFG_CHANGES_IN_BATCH];
+ Mgmtd__YangCfgDataReq *cfg_datap[MGMTD_MAX_CFG_CHANGES_IN_BATCH];
+ Mgmtd__YangData data[MGMTD_MAX_CFG_CHANGES_IN_BATCH];
+ Mgmtd__YangDataValue value[MGMTD_MAX_CFG_CHANGES_IN_BATCH];
+ size_t num_cfg_data;
+ int buf_space_left;
+ struct mgmt_txn_batches_item list_linkage;
+};
+
+DECLARE_LIST(mgmt_txn_batches, struct mgmt_txn_be_cfg_batch, list_linkage);
+
+#define FOREACH_TXN_CFG_BATCH_IN_LIST(list, batch) \
+ frr_each_safe (mgmt_txn_batches, list, batch)
+
+struct mgmt_edit_req {
+ char xpath_created[XPATH_MAXLEN];
+ bool unlock;
+};
+
+struct mgmt_commit_cfg_req {
+ Mgmtd__DatastoreId src_ds_id;
+ struct mgmt_ds_ctx *src_ds_ctx;
+ Mgmtd__DatastoreId dst_ds_id;
+ struct mgmt_ds_ctx *dst_ds_ctx;
+ uint32_t nb_txn_id;
+ uint8_t validate_only : 1;
+ uint8_t abort : 1;
+ uint8_t implicit : 1;
+ uint8_t rollback : 1;
+ uint8_t init : 1;
+
+ /* Track commit phases */
+ enum mgmt_commit_phase phase;
+
+ enum mgmt_commit_phase be_phase[MGMTD_BE_CLIENT_ID_MAX];
+
+ /*
+ * Additional information when the commit is triggered by native edit
+ * request.
+ */
+ struct mgmt_edit_req *edit;
+
+ /*
+ * Set of config changes to commit. This is used only
+ * when changes are NOT to be determined by comparing
+ * candidate and running DSs. This is typically used
+ * for downloading all relevant configs for a new backend
+ * client that has recently come up and connected with
+ * MGMTD.
+ */
+ struct nb_config_cbs *cfg_chgs;
+
+ /*
+ * Details on all the Backend Clients associated with
+ * this commit.
+ */
+ uint64_t clients;
+
+ /*
+ * List of backend batches for this commit to be validated
+ * and applied at the backend.
+ */
+ struct mgmt_txn_batches_head batches[MGMTD_BE_CLIENT_ID_MAX];
+ /*
+ * The last batch added for any backend client.
+ */
+ struct mgmt_txn_be_cfg_batch *last_be_cfg_batch[MGMTD_BE_CLIENT_ID_MAX];
+
+ struct mgmt_commit_stats *cmt_stats;
+};
+
+struct mgmt_get_data_reply {
+ /* Buffer space for preparing data reply */
+ int num_reply;
+ int last_batch;
+ Mgmtd__YangDataReply data_reply;
+ Mgmtd__YangData reply_data[MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH];
+ Mgmtd__YangData *reply_datap[MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH];
+ Mgmtd__YangDataValue reply_value[MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH];
+ char *reply_xpathp[MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH];
+};
+
+struct mgmt_get_data_req {
+ Mgmtd__DatastoreId ds_id;
+ struct nb_config *cfg_root;
+ char *xpaths[MGMTD_MAX_NUM_DATA_REQ_IN_BATCH];
+ int num_xpaths;
+
+ /*
+ * Buffer space for preparing reply.
+ * NOTE: Should only be malloc-ed on demand to reduce
+ * memory footprint. Freed up via mgmt_trx_req_free()
+ */
+ struct mgmt_get_data_reply *reply;
+
+ int total_reply;
+};
+
+
+struct txn_req_get_tree {
+ char *xpath; /* xpath of tree to get */
+ uint64_t sent_clients; /* Bitmask of clients sent req to */
+ uint64_t recv_clients; /* Bitmask of clients recv reply from */
+ int32_t partial_error; /* an error while gather results */
+ uint8_t result_type; /* LYD_FORMAT for results */
+ uint8_t wd_options; /* LYD_PRINT_WD_* flags for results */
+ uint8_t exact; /* if exact node is requested */
+ uint8_t simple_xpath; /* if xpath is simple */
+ struct lyd_node *client_results; /* result tree from clients */
+};
+
+struct txn_req_rpc {
+ char *xpath; /* xpath of rpc/action to invoke */
+ uint64_t sent_clients; /* Bitmask of clients sent req to */
+ uint64_t recv_clients; /* Bitmask of clients recv reply from */
+ uint8_t result_type; /* LYD_FORMAT for results */
+ char *errstr; /* error string */
+ struct lyd_node *client_results; /* result tree from clients */
+};
+
+struct mgmt_txn_req {
+ struct mgmt_txn_ctx *txn;
+ enum mgmt_txn_event req_event;
+ uint64_t req_id;
+ union {
+ struct mgmt_set_cfg_req *set_cfg;
+ struct mgmt_get_data_req *get_data;
+ struct txn_req_get_tree *get_tree;
+ struct txn_req_rpc *rpc;
+ struct mgmt_commit_cfg_req commit_cfg;
+ } req;
+
+ struct mgmt_txn_reqs_item list_linkage;
+};
+
+DECLARE_LIST(mgmt_txn_reqs, struct mgmt_txn_req, list_linkage);
+
+#define FOREACH_TXN_REQ_IN_LIST(list, req) \
+ frr_each_safe (mgmt_txn_reqs, list, req)
+
+struct mgmt_txn_ctx {
+ uint64_t session_id; /* One transaction per client session */
+ uint64_t txn_id;
+ enum mgmt_txn_type type;
+
+ /* struct mgmt_master *mm; */
+
+ struct event *proc_set_cfg;
+ struct event *proc_comm_cfg;
+ struct event *proc_get_cfg;
+ struct event *proc_get_data;
+ struct event *proc_get_tree;
+ struct event *comm_cfg_timeout;
+ struct event *get_tree_timeout;
+ struct event *rpc_timeout;
+ struct event *clnup;
+
+ /* List of backend adapters involved in this transaction */
+ struct mgmt_txn_badapters_head be_adapters;
+
+ int refcount;
+
+ struct mgmt_txns_item list_linkage;
+
+ /* TODO: why do we need unique lists for each type of transaction since
+ * a transaction is of only 1 type?
+ */
+
+ /*
+ * List of pending set-config requests for a given
+ * transaction/session. Just one list for requests
+ * not processed at all. There's no backend interaction
+ * involved.
+ */
+ struct mgmt_txn_reqs_head set_cfg_reqs;
+ /*
+ * List of pending get-config requests for a given
+ * transaction/session. Just one list for requests
+ * not processed at all. There's no backend interaction
+ * involved.
+ */
+ struct mgmt_txn_reqs_head get_cfg_reqs;
+ /*
+ * List of pending get-tree requests.
+ */
+ struct mgmt_txn_reqs_head get_tree_reqs;
+ /*
+ * List of pending rpc requests.
+ */
+ struct mgmt_txn_reqs_head rpc_reqs;
+ /*
+ * There will always be one commit-config allowed for a given
+ * transaction/session. No need to maintain lists for it.
+ */
+ struct mgmt_txn_req *commit_cfg_req;
+};
+
+DECLARE_LIST(mgmt_txns, struct mgmt_txn_ctx, list_linkage);
+
+#define FOREACH_TXN_IN_LIST(mm, txn) \
+ frr_each_safe (mgmt_txns, &(mm)->txn_list, (txn))
+
+static int mgmt_txn_send_commit_cfg_reply(struct mgmt_txn_ctx *txn,
+ enum mgmt_result result,
+ const char *error_if_any);
+
+static inline const char *mgmt_txn_commit_phase_str(struct mgmt_txn_ctx *txn)
+{
+ if (!txn->commit_cfg_req)
+ return "None";
+
+ return mgmt_commit_phase2str(txn->commit_cfg_req->req.commit_cfg.phase);
+}
+
+static void mgmt_txn_lock(struct mgmt_txn_ctx *txn, const char *file, int line);
+static void mgmt_txn_unlock(struct mgmt_txn_ctx **txn, const char *file,
+ int line);
+static int mgmt_txn_send_be_txn_delete(struct mgmt_txn_ctx *txn,
+ struct mgmt_be_client_adapter *adapter);
+
+static struct event_loop *mgmt_txn_tm;
+static struct mgmt_master *mgmt_txn_mm;
+
+static void mgmt_txn_register_event(struct mgmt_txn_ctx *txn,
+ enum mgmt_txn_event event);
+
+static void mgmt_txn_cleanup_txn(struct mgmt_txn_ctx **txn);
+
+static struct mgmt_txn_be_cfg_batch *
+mgmt_txn_cfg_batch_alloc(struct mgmt_txn_ctx *txn, enum mgmt_be_client_id id,
+ struct mgmt_be_client_adapter *be_adapter)
+{
+ struct mgmt_txn_be_cfg_batch *batch;
+
+ batch = XCALLOC(MTYPE_MGMTD_TXN_CFG_BATCH,
+ sizeof(struct mgmt_txn_be_cfg_batch));
+ assert(batch);
+ batch->be_id = id;
+
+ batch->txn = txn;
+ MGMTD_TXN_LOCK(txn);
+ assert(txn->commit_cfg_req);
+ mgmt_txn_batches_add_tail(&txn->commit_cfg_req->req.commit_cfg
+ .batches[id],
+ batch);
+ batch->be_adapter = be_adapter;
+ batch->buf_space_left = MGMTD_BE_CFGDATA_MAX_MSG_LEN;
+ if (be_adapter)
+ mgmt_be_adapter_lock(be_adapter);
+
+ txn->commit_cfg_req->req.commit_cfg.last_be_cfg_batch[id] = batch;
+
+ return batch;
+}
+
+static void mgmt_txn_cfg_batch_free(struct mgmt_txn_be_cfg_batch **batch)
+{
+ size_t indx;
+ struct mgmt_commit_cfg_req *cmtcfg_req;
+
+ __dbg(" freeing batch txn-id %" PRIu64, (*batch)->txn->txn_id);
+
+ assert((*batch)->txn && (*batch)->txn->type == MGMTD_TXN_TYPE_CONFIG);
+
+ cmtcfg_req = &(*batch)->txn->commit_cfg_req->req.commit_cfg;
+ mgmt_txn_batches_del(&cmtcfg_req->batches[(*batch)->be_id], *batch);
+
+ if ((*batch)->be_adapter)
+ mgmt_be_adapter_unlock(&(*batch)->be_adapter);
+
+ for (indx = 0; indx < (*batch)->num_cfg_data; indx++) {
+ if ((*batch)->data[indx].xpath) {
+ free((*batch)->data[indx].xpath);
+ (*batch)->data[indx].xpath = NULL;
+ }
+ }
+
+ MGMTD_TXN_UNLOCK(&(*batch)->txn);
+
+ XFREE(MTYPE_MGMTD_TXN_CFG_BATCH, *batch);
+ *batch = NULL;
+}
+
+static void mgmt_txn_cleanup_be_cfg_batches(struct mgmt_txn_ctx *txn,
+ enum mgmt_be_client_id id)
+{
+ struct mgmt_txn_be_cfg_batch *batch;
+ struct mgmt_txn_batches_head *list;
+
+ list = &txn->commit_cfg_req->req.commit_cfg.batches[id];
+ FOREACH_TXN_CFG_BATCH_IN_LIST (list, batch)
+ mgmt_txn_cfg_batch_free(&batch);
+
+ mgmt_txn_batches_fini(list);
+
+ txn->commit_cfg_req->req.commit_cfg.last_be_cfg_batch[id] = NULL;
+}
+
+static struct mgmt_txn_req *mgmt_txn_req_alloc(struct mgmt_txn_ctx *txn,
+ uint64_t req_id,
+ enum mgmt_txn_event req_event)
+{
+ struct mgmt_txn_req *txn_req;
+ enum mgmt_be_client_id id;
+
+ txn_req = XCALLOC(MTYPE_MGMTD_TXN_REQ, sizeof(struct mgmt_txn_req));
+ assert(txn_req);
+ txn_req->txn = txn;
+ txn_req->req_id = req_id;
+ txn_req->req_event = req_event;
+
+ switch (txn_req->req_event) {
+ case MGMTD_TXN_PROC_SETCFG:
+ txn_req->req.set_cfg = XCALLOC(MTYPE_MGMTD_TXN_SETCFG_REQ,
+ sizeof(struct mgmt_set_cfg_req));
+ assert(txn_req->req.set_cfg);
+ mgmt_txn_reqs_add_tail(&txn->set_cfg_reqs, txn_req);
+ __dbg("Added a new SETCFG req-id: %" PRIu64 " txn-id: %" PRIu64
+ ", session-id: %" PRIu64,
+ txn_req->req_id, txn->txn_id, txn->session_id);
+ break;
+ case MGMTD_TXN_PROC_COMMITCFG:
+ txn->commit_cfg_req = txn_req;
+ __dbg("Added a new COMMITCFG req-id: %" PRIu64
+ " txn-id: %" PRIu64 " session-id: %" PRIu64,
+ txn_req->req_id, txn->txn_id, txn->session_id);
+
+ FOREACH_MGMTD_BE_CLIENT_ID (id) {
+ txn_req->req.commit_cfg.be_phase[id] =
+ MGMTD_COMMIT_PHASE_PREPARE_CFG;
+ mgmt_txn_batches_init(
+ &txn_req->req.commit_cfg.batches[id]);
+ }
+
+ txn_req->req.commit_cfg.phase = MGMTD_COMMIT_PHASE_PREPARE_CFG;
+ break;
+ case MGMTD_TXN_PROC_GETCFG:
+ txn_req->req.get_data =
+ XCALLOC(MTYPE_MGMTD_TXN_GETDATA_REQ,
+ sizeof(struct mgmt_get_data_req));
+ assert(txn_req->req.get_data);
+ mgmt_txn_reqs_add_tail(&txn->get_cfg_reqs, txn_req);
+ __dbg("Added a new GETCFG req-id: %" PRIu64 " txn-id: %" PRIu64
+ " session-id: %" PRIu64,
+ txn_req->req_id, txn->txn_id, txn->session_id);
+ break;
+ case MGMTD_TXN_PROC_GETTREE:
+ txn_req->req.get_tree = XCALLOC(MTYPE_MGMTD_TXN_GETTREE_REQ,
+ sizeof(struct txn_req_get_tree));
+ mgmt_txn_reqs_add_tail(&txn->get_tree_reqs, txn_req);
+ __dbg("Added a new GETTREE req-id: %" PRIu64 " txn-id: %" PRIu64
+ " session-id: %" PRIu64,
+ txn_req->req_id, txn->txn_id, txn->session_id);
+ break;
+ case MGMTD_TXN_PROC_RPC:
+ txn_req->req.rpc = XCALLOC(MTYPE_MGMTD_TXN_RPC_REQ,
+ sizeof(struct txn_req_rpc));
+ assert(txn_req->req.rpc);
+ mgmt_txn_reqs_add_tail(&txn->rpc_reqs, txn_req);
+ __dbg("Added a new RPC req-id: %" PRIu64 " txn-id: %" PRIu64
+ " session-id: %" PRIu64,
+ txn_req->req_id, txn->txn_id, txn->session_id);
+ break;
+ case MGMTD_TXN_COMMITCFG_TIMEOUT:
+ break;
+ }
+
+ MGMTD_TXN_LOCK(txn);
+
+ return txn_req;
+}
+
+static void mgmt_txn_req_free(struct mgmt_txn_req **txn_req)
+{
+ int indx;
+ struct mgmt_txn_reqs_head *req_list = NULL;
+ enum mgmt_be_client_id id;
+ struct mgmt_be_client_adapter *adapter;
+ struct mgmt_commit_cfg_req *ccreq;
+ struct mgmt_set_cfg_req *set_cfg;
+ bool cleanup;
+
+ switch ((*txn_req)->req_event) {
+ case MGMTD_TXN_PROC_SETCFG:
+ set_cfg = (*txn_req)->req.set_cfg;
+ for (indx = 0; indx < set_cfg->num_cfg_changes; indx++) {
+ if (set_cfg->cfg_changes[indx].value)
+ free((void *)set_cfg->cfg_changes[indx].value);
+ }
+ req_list = &(*txn_req)->txn->set_cfg_reqs;
+ __dbg("Deleting SETCFG req-id: %" PRIu64 " txn-id: %" PRIu64,
+ (*txn_req)->req_id, (*txn_req)->txn->txn_id);
+ XFREE(MTYPE_MGMTD_TXN_SETCFG_REQ, (*txn_req)->req.set_cfg);
+ break;
+ case MGMTD_TXN_PROC_COMMITCFG:
+ __dbg("Deleting COMMITCFG req-id: %" PRIu64 " txn-id: %" PRIu64,
+ (*txn_req)->req_id, (*txn_req)->txn->txn_id);
+
+ ccreq = &(*txn_req)->req.commit_cfg;
+ cleanup = (ccreq->phase >= MGMTD_COMMIT_PHASE_TXN_CREATE &&
+ ccreq->phase < MGMTD_COMMIT_PHASE_TXN_DELETE);
+
+ XFREE(MTYPE_MGMTD_TXN_REQ, ccreq->edit);
+
+ FOREACH_MGMTD_BE_CLIENT_ID (id) {
+ /*
+ * Send TXN_DELETE to cleanup state for this
+ * transaction on backend
+ */
+
+ /*
+ * Get rid of the batches first so we don't end up doing
+ * anything more with them
+ */
+ mgmt_txn_cleanup_be_cfg_batches((*txn_req)->txn, id);
+
+ /*
+ * If we were in the middle of the state machine then
+ * send a txn delete message
+ */
+ adapter = mgmt_be_get_adapter_by_id(id);
+ if (adapter && cleanup && IS_IDBIT_SET(ccreq->clients, id))
+ mgmt_txn_send_be_txn_delete((*txn_req)->txn,
+ adapter);
+ }
+ break;
+ case MGMTD_TXN_PROC_GETCFG:
+ for (indx = 0; indx < (*txn_req)->req.get_data->num_xpaths;
+ indx++) {
+ if ((*txn_req)->req.get_data->xpaths[indx])
+ free((void *)(*txn_req)
+ ->req.get_data->xpaths[indx]);
+ }
+ req_list = &(*txn_req)->txn->get_cfg_reqs;
+ __dbg("Deleting GETCFG req-id: %" PRIu64 " txn-id: %" PRIu64,
+ (*txn_req)->req_id, (*txn_req)->txn->txn_id);
+ if ((*txn_req)->req.get_data->reply)
+ XFREE(MTYPE_MGMTD_TXN_GETDATA_REPLY,
+ (*txn_req)->req.get_data->reply);
+
+ if ((*txn_req)->req.get_data->cfg_root)
+ nb_config_free((*txn_req)->req.get_data->cfg_root);
+
+ XFREE(MTYPE_MGMTD_TXN_GETDATA_REQ, (*txn_req)->req.get_data);
+ break;
+ case MGMTD_TXN_PROC_GETTREE:
+ __dbg("Deleting GETTREE req-id: %" PRIu64 " of txn-id: %" PRIu64,
+ (*txn_req)->req_id, (*txn_req)->txn->txn_id);
+ req_list = &(*txn_req)->txn->get_tree_reqs;
+ lyd_free_all((*txn_req)->req.get_tree->client_results);
+ XFREE(MTYPE_MGMTD_XPATH, (*txn_req)->req.get_tree->xpath);
+ XFREE(MTYPE_MGMTD_TXN_GETTREE_REQ, (*txn_req)->req.get_tree);
+ break;
+ case MGMTD_TXN_PROC_RPC:
+ __dbg("Deleting RPC req-id: %" PRIu64 " txn-id: %" PRIu64,
+ (*txn_req)->req_id, (*txn_req)->txn->txn_id);
+ req_list = &(*txn_req)->txn->rpc_reqs;
+ lyd_free_all((*txn_req)->req.rpc->client_results);
+ XFREE(MTYPE_MGMTD_ERR, (*txn_req)->req.rpc->errstr);
+ XFREE(MTYPE_MGMTD_XPATH, (*txn_req)->req.rpc->xpath);
+ XFREE(MTYPE_MGMTD_TXN_RPC_REQ, (*txn_req)->req.rpc);
+ break;
+ case MGMTD_TXN_COMMITCFG_TIMEOUT:
+ break;
+ }
+
+ if (req_list) {
+ mgmt_txn_reqs_del(req_list, *txn_req);
+ __dbg("Removed req-id: %" PRIu64 " from request-list (left:%zu)",
+ (*txn_req)->req_id, mgmt_txn_reqs_count(req_list));
+ }
+
+ MGMTD_TXN_UNLOCK(&(*txn_req)->txn);
+ XFREE(MTYPE_MGMTD_TXN_REQ, (*txn_req));
+ *txn_req = NULL;
+}
+
+static void mgmt_txn_process_set_cfg(struct event *thread)
+{
+ struct mgmt_txn_ctx *txn;
+ struct mgmt_txn_req *txn_req;
+ struct mgmt_ds_ctx *ds_ctx;
+ struct nb_config *nb_config;
+ char err_buf[1024];
+ bool error;
+ int num_processed = 0;
+ size_t left;
+ struct mgmt_commit_stats *cmt_stats;
+ int ret = 0;
+
+ txn = (struct mgmt_txn_ctx *)EVENT_ARG(thread);
+ assert(txn);
+ cmt_stats = mgmt_fe_get_session_commit_stats(txn->session_id);
+
+ __dbg("Processing %zu SET_CONFIG requests txn-id:%" PRIu64
+ " session-id: %" PRIu64,
+ mgmt_txn_reqs_count(&txn->set_cfg_reqs), txn->txn_id,
+ txn->session_id);
+
+ FOREACH_TXN_REQ_IN_LIST (&txn->set_cfg_reqs, txn_req) {
+ assert(txn_req->req_event == MGMTD_TXN_PROC_SETCFG);
+ ds_ctx = txn_req->req.set_cfg->ds_ctx;
+ if (!ds_ctx) {
+ mgmt_fe_send_set_cfg_reply(txn->session_id, txn->txn_id,
+ txn_req->req.set_cfg->ds_id,
+ txn_req->req_id,
+ MGMTD_INTERNAL_ERROR,
+ "No such datastore!",
+ txn_req->req.set_cfg
+ ->implicit_commit);
+ goto mgmt_txn_process_set_cfg_done;
+ }
+
+ nb_config = mgmt_ds_get_nb_config(ds_ctx);
+ if (!nb_config) {
+ mgmt_fe_send_set_cfg_reply(txn->session_id, txn->txn_id,
+ txn_req->req.set_cfg->ds_id,
+ txn_req->req_id,
+ MGMTD_INTERNAL_ERROR,
+ "Unable to retrieve DS Config Tree!",
+ txn_req->req.set_cfg
+ ->implicit_commit);
+ goto mgmt_txn_process_set_cfg_done;
+ }
+
+ error = false;
+ nb_candidate_edit_config_changes(nb_config,
+ txn_req->req.set_cfg->cfg_changes,
+ (size_t)txn_req->req.set_cfg
+ ->num_cfg_changes,
+ NULL, false, err_buf,
+ sizeof(err_buf), &error);
+ if (error) {
+ mgmt_fe_send_set_cfg_reply(txn->session_id, txn->txn_id,
+ txn_req->req.set_cfg->ds_id,
+ txn_req->req_id,
+ MGMTD_INTERNAL_ERROR, err_buf,
+ txn_req->req.set_cfg
+ ->implicit_commit);
+ goto mgmt_txn_process_set_cfg_done;
+ }
+
+ if (txn_req->req.set_cfg->implicit_commit) {
+ assert(mgmt_txn_reqs_count(&txn->set_cfg_reqs) == 1);
+ assert(txn_req->req.set_cfg->dst_ds_ctx);
+
+ /* We expect the user to have locked the DST DS */
+ if (!mgmt_ds_is_locked(txn_req->req.set_cfg->dst_ds_ctx,
+ txn->session_id)) {
+ __log_err("DS %u not locked for implicit commit txn-id: %" PRIu64
+ " session-id: %" PRIu64 " err: %s",
+ txn_req->req.set_cfg->dst_ds_id,
+ txn->txn_id, txn->session_id,
+ strerror(ret));
+ mgmt_fe_send_set_cfg_reply(
+ txn->session_id, txn->txn_id,
+ txn_req->req.set_cfg->ds_id,
+ txn_req->req_id, MGMTD_DS_LOCK_FAILED,
+ "running DS not locked for implicit commit",
+ txn_req->req.set_cfg->implicit_commit);
+ goto mgmt_txn_process_set_cfg_done;
+ }
+
+ mgmt_txn_send_commit_config_req(txn->txn_id,
+ txn_req->req_id,
+ txn_req->req.set_cfg
+ ->ds_id,
+ txn_req->req.set_cfg
+ ->ds_ctx,
+ txn_req->req.set_cfg
+ ->dst_ds_id,
+ txn_req->req.set_cfg
+ ->dst_ds_ctx,
+ false, false, true,
+ NULL);
+
+ if (mm->perf_stats_en)
+ gettimeofday(&cmt_stats->last_start, NULL);
+ cmt_stats->commit_cnt++;
+ } else if (mgmt_fe_send_set_cfg_reply(txn->session_id,
+ txn->txn_id,
+ txn_req->req.set_cfg->ds_id,
+ txn_req->req_id,
+ MGMTD_SUCCESS, NULL,
+ false) != 0) {
+ __log_err("Failed to send SET_CONFIG_REPLY txn-id %" PRIu64
+ " session-id: %" PRIu64,
+ txn->txn_id, txn->session_id);
+ }
+
+mgmt_txn_process_set_cfg_done:
+
+ /*
+ * Note: The following will remove it from the list as well.
+ */
+ mgmt_txn_req_free(&txn_req);
+
+ num_processed++;
+ if (num_processed == MGMTD_TXN_MAX_NUM_SETCFG_PROC)
+ break;
+ }
+
+ left = mgmt_txn_reqs_count(&txn->set_cfg_reqs);
+ if (left) {
+ __dbg("Processed maximum number of Set-Config requests (%d/%d/%d). Rescheduling for rest.",
+ num_processed, MGMTD_TXN_MAX_NUM_SETCFG_PROC, (int)left);
+ mgmt_txn_register_event(txn, MGMTD_TXN_PROC_SETCFG);
+ }
+}
+
+static int mgmt_txn_send_commit_cfg_reply(struct mgmt_txn_ctx *txn,
+ enum mgmt_result result,
+ const char *error_if_any)
+{
+ bool success, create_cmt_info_rec;
+
+ if (!txn->commit_cfg_req)
+ return -1;
+
+ success = (result == MGMTD_SUCCESS || result == MGMTD_NO_CFG_CHANGES);
+
+ /* TODO: these replies should not be send if it's a rollback
+ * b/c right now that is special cased.. that special casing should be
+ * removed; however...
+ */
+ if (!txn->commit_cfg_req->req.commit_cfg.edit &&
+ !txn->commit_cfg_req->req.commit_cfg.implicit && txn->session_id &&
+ !txn->commit_cfg_req->req.commit_cfg.rollback &&
+ mgmt_fe_send_commit_cfg_reply(txn->session_id, txn->txn_id,
+ txn->commit_cfg_req->req.commit_cfg
+ .src_ds_id,
+ txn->commit_cfg_req->req.commit_cfg
+ .dst_ds_id,
+ txn->commit_cfg_req->req_id,
+ txn->commit_cfg_req->req.commit_cfg
+ .validate_only,
+ result, error_if_any) != 0) {
+ __log_err("Failed to send COMMIT-CONFIG-REPLY txn-id: %" PRIu64
+ " session-id: %" PRIu64,
+ txn->txn_id, txn->session_id);
+ }
+
+ if (!txn->commit_cfg_req->req.commit_cfg.edit &&
+ txn->commit_cfg_req->req.commit_cfg.implicit && txn->session_id &&
+ !txn->commit_cfg_req->req.commit_cfg.rollback &&
+ mgmt_fe_send_set_cfg_reply(txn->session_id, txn->txn_id,
+ txn->commit_cfg_req->req.commit_cfg
+ .src_ds_id,
+ txn->commit_cfg_req->req_id,
+ success ? MGMTD_SUCCESS
+ : MGMTD_INTERNAL_ERROR,
+ error_if_any, true) != 0) {
+ __log_err("Failed to send SET-CONFIG-REPLY txn-id: %" PRIu64
+ " session-id: %" PRIu64,
+ txn->txn_id, txn->session_id);
+ }
+
+ if (txn->commit_cfg_req->req.commit_cfg.edit &&
+ mgmt_fe_adapter_send_edit_reply(txn->session_id, txn->txn_id,
+ txn->commit_cfg_req->req_id,
+ txn->commit_cfg_req->req.commit_cfg
+ .edit->unlock,
+ true,
+ txn->commit_cfg_req->req.commit_cfg
+ .edit->xpath_created,
+ success ? 0 : -1,
+ error_if_any) != 0) {
+ __log_err("Failed to send EDIT-REPLY txn-id: %" PRIu64
+ " session-id: %" PRIu64,
+ txn->txn_id, txn->session_id);
+ }
+
+ if (success) {
+ /* Stop the commit-timeout timer */
+ /* XXX why only on success? */
+ EVENT_OFF(txn->comm_cfg_timeout);
+
+ create_cmt_info_rec =
+ (result != MGMTD_NO_CFG_CHANGES &&
+ !txn->commit_cfg_req->req.commit_cfg.rollback);
+
+ /*
+ * Successful commit: Merge Src DS into Dst DS if and only if
+ * this was not a validate-only or abort request.
+ */
+ if ((txn->session_id &&
+ !txn->commit_cfg_req->req.commit_cfg.validate_only &&
+ !txn->commit_cfg_req->req.commit_cfg.abort) ||
+ txn->commit_cfg_req->req.commit_cfg.rollback) {
+ mgmt_ds_copy_dss(txn->commit_cfg_req->req.commit_cfg
+ .src_ds_ctx,
+ txn->commit_cfg_req->req.commit_cfg
+ .dst_ds_ctx,
+ create_cmt_info_rec);
+ }
+
+ /*
+ * Restore Src DS back to Dest DS only through a commit abort
+ * request.
+ */
+ if (txn->session_id && txn->commit_cfg_req->req.commit_cfg.abort)
+ mgmt_ds_copy_dss(txn->commit_cfg_req->req.commit_cfg
+ .dst_ds_ctx,
+ txn->commit_cfg_req->req.commit_cfg
+ .src_ds_ctx,
+ false);
+ } else {
+ /*
+ * The commit has failied. For implicit commit requests restore
+ * back the contents of the candidate DS.
+ */
+ if (txn->commit_cfg_req->req.commit_cfg.implicit)
+ mgmt_ds_copy_dss(txn->commit_cfg_req->req.commit_cfg
+ .dst_ds_ctx,
+ txn->commit_cfg_req->req.commit_cfg
+ .src_ds_ctx,
+ false);
+ }
+
+ if (txn->commit_cfg_req->req.commit_cfg.rollback) {
+ mgmt_ds_unlock(txn->commit_cfg_req->req.commit_cfg.src_ds_ctx);
+ mgmt_ds_unlock(txn->commit_cfg_req->req.commit_cfg.dst_ds_ctx);
+ /*
+ * Resume processing the rollback command.
+ *
+ * TODO: there's no good reason to special case rollback, the
+ * rollback boolean should be passed back to the FE client and it
+ * can do the right thing.
+ */
+ mgmt_history_rollback_complete(success);
+ }
+
+ if (txn->commit_cfg_req->req.commit_cfg.init) {
+ /*
+ * This is the backend init request.
+ * We need to unlock the running datastore.
+ */
+ mgmt_ds_unlock(txn->commit_cfg_req->req.commit_cfg.dst_ds_ctx);
+ }
+
+ txn->commit_cfg_req->req.commit_cfg.cmt_stats = NULL;
+ mgmt_txn_req_free(&txn->commit_cfg_req);
+
+ /*
+ * The CONFIG Transaction should be destroyed from Frontend-adapter.
+ * But in case the transaction is not triggered from a front-end session
+ * we need to cleanup by itself.
+ */
+ if (!txn->session_id)
+ mgmt_txn_cleanup_txn(&txn);
+
+ return 0;
+}
+
+static int
+mgmt_try_move_commit_to_next_phase(struct mgmt_txn_ctx *txn,
+ struct mgmt_commit_cfg_req *cmtcfg_req)
+{
+ enum mgmt_be_client_id id;
+
+ __dbg("txn-id: %" PRIu64 ", Phase '%s'", txn->txn_id,
+ mgmt_txn_commit_phase_str(txn));
+
+ /*
+ * Check if all clients has moved to next phase or not.
+ */
+ FOREACH_MGMTD_BE_CLIENT_ID (id) {
+ if (IS_IDBIT_SET(cmtcfg_req->clients, id) &&
+ cmtcfg_req->be_phase[id] == cmtcfg_req->phase) {
+ /*
+ * There's atleast once client who hasn't moved to
+ * next phase.
+ *
+ * TODO: Need to re-think this design for the case
+ * set of validators for a given YANG data item is
+ * different from the set of notifiers for the same.
+ */
+ return -1;
+ }
+ }
+
+ /*
+ * If we are here, it means all the clients has moved to next phase.
+ * So we can move the whole commit to next phase.
+ */
+ cmtcfg_req->phase++;
+
+ __dbg("Move entire txn-id: %" PRIu64 " to phase '%s'", txn->txn_id,
+ mgmt_txn_commit_phase_str(txn));
+
+ mgmt_txn_register_event(txn, MGMTD_TXN_PROC_COMMITCFG);
+
+ return 0;
+}
+
+/*
+ * This is the real workhorse
+ */
+static int mgmt_txn_create_config_batches(struct mgmt_txn_req *txn_req,
+ struct nb_config_cbs *changes)
+{
+ struct nb_config_cb *cb, *nxt;
+ struct nb_config_change *chg;
+ struct mgmt_txn_be_cfg_batch *batch;
+ char *xpath = NULL, *value = NULL;
+ enum mgmt_be_client_id id;
+ struct mgmt_be_client_adapter *adapter;
+ struct mgmt_commit_cfg_req *cmtcfg_req;
+ int num_chgs = 0;
+ int xpath_len, value_len;
+ uint64_t clients, chg_clients;
+
+ cmtcfg_req = &txn_req->req.commit_cfg;
+
+ RB_FOREACH_SAFE (cb, nb_config_cbs, changes, nxt) {
+ chg = (struct nb_config_change *)cb;
+
+ /*
+ * Could have directly pointed to xpath in nb_node.
+ * But dont want to mess with it now.
+ * xpath = chg->cb.nb_node->xpath;
+ */
+ xpath = lyd_path(chg->cb.dnode, LYD_PATH_STD, NULL, 0);
+ if (!xpath) {
+ (void)mgmt_txn_send_commit_cfg_reply(
+ txn_req->txn, MGMTD_INTERNAL_ERROR,
+ "Internal error! Could not get Xpath from Ds node!");
+ return -1;
+ }
+
+ value = (char *)lyd_get_value(chg->cb.dnode);
+ if (!value)
+ value = (char *)MGMTD_BE_CONTAINER_NODE_VAL;
+
+ __dbg("XPATH: %s, Value: '%s'", xpath, value ? value : "NIL");
+
+ clients =
+ mgmt_be_interested_clients(xpath,
+ MGMT_BE_XPATH_SUBSCR_TYPE_CFG);
+
+ chg_clients = 0;
+
+ xpath_len = strlen(xpath) + 1;
+ value_len = strlen(value) + 1;
+ FOREACH_BE_CLIENT_BITS (id, clients) {
+ adapter = mgmt_be_get_adapter_by_id(id);
+ if (!adapter)
+ continue;
+
+ chg_clients |= (1ull << id);
+
+ batch = cmtcfg_req->last_be_cfg_batch[id];
+ if (!batch ||
+ (batch->num_cfg_data ==
+ MGMTD_MAX_CFG_CHANGES_IN_BATCH) ||
+ (batch->buf_space_left < (xpath_len + value_len))) {
+ /* Allocate a new config batch */
+ batch = mgmt_txn_cfg_batch_alloc(txn_req->txn,
+ id, adapter);
+ }
+
+ batch->buf_space_left -= (xpath_len + value_len);
+
+ mgmt_yang_cfg_data_req_init(
+ &batch->cfg_data[batch->num_cfg_data]);
+ batch->cfg_datap[batch->num_cfg_data] =
+ &batch->cfg_data[batch->num_cfg_data];
+
+ /*
+ * On the backend, we don't really care if it's CREATE
+ * or MODIFY, because the existence was already checked
+ * on the frontend. Therefore we use SET for both.
+ */
+ if (chg->cb.operation == NB_CB_DESTROY)
+ batch->cfg_data[batch->num_cfg_data].req_type =
+ MGMTD__CFG_DATA_REQ_TYPE__REMOVE_DATA;
+ else
+ batch->cfg_data[batch->num_cfg_data].req_type =
+ MGMTD__CFG_DATA_REQ_TYPE__SET_DATA;
+
+ mgmt_yang_data_init(&batch->data[batch->num_cfg_data]);
+ batch->cfg_data[batch->num_cfg_data].data =
+ &batch->data[batch->num_cfg_data];
+ batch->data[batch->num_cfg_data].xpath = strdup(xpath);
+
+ mgmt_yang_data_value_init(
+ &batch->value[batch->num_cfg_data]);
+ batch->data[batch->num_cfg_data].value =
+ &batch->value[batch->num_cfg_data];
+ batch->value[batch->num_cfg_data].value_case =
+ MGMTD__YANG_DATA_VALUE__VALUE_ENCODED_STR_VAL;
+ batch->value[batch->num_cfg_data].encoded_str_val =
+ value;
+
+ __dbg(" -- %s, batch item:%d", adapter->name,
+ (int)batch->num_cfg_data);
+
+ batch->num_cfg_data++;
+ num_chgs++;
+ }
+
+ if (!chg_clients)
+ __dbg("Daemons interested in XPATH are not currently connected: %s",
+ xpath);
+
+ cmtcfg_req->clients |= chg_clients;
+
+ free(xpath);
+ }
+
+ cmtcfg_req->cmt_stats->last_batch_cnt = num_chgs;
+ if (!num_chgs) {
+ (void)mgmt_txn_send_commit_cfg_reply(txn_req->txn,
+ MGMTD_NO_CFG_CHANGES,
+ "No connected daemons interested in changes");
+ return -1;
+ }
+
+ /* Move all BE clients to create phase */
+ FOREACH_MGMTD_BE_CLIENT_ID(id) {
+ if (IS_IDBIT_SET(cmtcfg_req->clients, id))
+ cmtcfg_req->be_phase[id] =
+ MGMTD_COMMIT_PHASE_TXN_CREATE;
+ }
+
+ return 0;
+}
+
+static int mgmt_txn_prepare_config(struct mgmt_txn_ctx *txn)
+{
+ struct nb_context nb_ctx;
+ struct nb_config *nb_config;
+ struct nb_config_cbs changes;
+ struct nb_config_cbs *cfg_chgs = NULL;
+ int ret;
+ bool del_cfg_chgs = false;
+
+ ret = 0;
+ memset(&nb_ctx, 0, sizeof(nb_ctx));
+ memset(&changes, 0, sizeof(changes));
+ if (txn->commit_cfg_req->req.commit_cfg.cfg_chgs) {
+ cfg_chgs = txn->commit_cfg_req->req.commit_cfg.cfg_chgs;
+ del_cfg_chgs = true;
+ goto mgmt_txn_prep_config_validation_done;
+ }
+
+ if (txn->commit_cfg_req->req.commit_cfg.src_ds_id != MGMTD_DS_CANDIDATE) {
+ (void)mgmt_txn_send_commit_cfg_reply(
+ txn, MGMTD_INVALID_PARAM,
+ "Source DS cannot be any other than CANDIDATE!");
+ ret = -1;
+ goto mgmt_txn_prepare_config_done;
+ }
+
+ if (txn->commit_cfg_req->req.commit_cfg.dst_ds_id != MGMTD_DS_RUNNING) {
+ (void)mgmt_txn_send_commit_cfg_reply(
+ txn, MGMTD_INVALID_PARAM,
+ "Destination DS cannot be any other than RUNNING!");
+ ret = -1;
+ goto mgmt_txn_prepare_config_done;
+ }
+
+ if (!txn->commit_cfg_req->req.commit_cfg.src_ds_ctx) {
+ (void)mgmt_txn_send_commit_cfg_reply(txn, MGMTD_INVALID_PARAM,
+ "No such source datastore!");
+ ret = -1;
+ goto mgmt_txn_prepare_config_done;
+ }
+
+ if (!txn->commit_cfg_req->req.commit_cfg.dst_ds_ctx) {
+ (void)mgmt_txn_send_commit_cfg_reply(txn, MGMTD_INVALID_PARAM,
+ "No such destination datastore!");
+ ret = -1;
+ goto mgmt_txn_prepare_config_done;
+ }
+
+ if (txn->commit_cfg_req->req.commit_cfg.abort) {
+ /*
+ * This is a commit abort request. Return back success.
+ * That should trigger a restore of Candidate datastore to
+ * Running.
+ */
+ (void)mgmt_txn_send_commit_cfg_reply(txn, MGMTD_SUCCESS, NULL);
+ goto mgmt_txn_prepare_config_done;
+ }
+
+ nb_config = mgmt_ds_get_nb_config(
+ txn->commit_cfg_req->req.commit_cfg.src_ds_ctx);
+ if (!nb_config) {
+ (void)mgmt_txn_send_commit_cfg_reply(
+ txn, MGMTD_INTERNAL_ERROR,
+ "Unable to retrieve Commit DS Config Tree!");
+ ret = -1;
+ goto mgmt_txn_prepare_config_done;
+ }
+
+ /*
+ * Validate YANG contents of the source DS and get the diff
+ * between source and destination DS contents.
+ */
+ char err_buf[BUFSIZ] = { 0 };
+
+ ret = nb_candidate_validate_yang(nb_config, true, err_buf,
+ sizeof(err_buf) - 1);
+ if (ret != NB_OK) {
+ if (strncmp(err_buf, " ", strlen(err_buf)) == 0)
+ strlcpy(err_buf, "Validation failed", sizeof(err_buf));
+ (void)mgmt_txn_send_commit_cfg_reply(txn, MGMTD_INVALID_PARAM,
+ err_buf);
+ ret = -1;
+ goto mgmt_txn_prepare_config_done;
+ }
+
+ nb_config_diff(mgmt_ds_get_nb_config(txn->commit_cfg_req->req.commit_cfg
+ .dst_ds_ctx),
+ nb_config, &changes);
+ cfg_chgs = &changes;
+ del_cfg_chgs = true;
+
+ if (RB_EMPTY(nb_config_cbs, cfg_chgs)) {
+ /*
+ * This means there's no changes to commit whatsoever
+ * is the source of the changes in config.
+ */
+ (void)mgmt_txn_send_commit_cfg_reply(txn, MGMTD_NO_CFG_CHANGES,
+ "No changes found to be committed!");
+ ret = -1;
+ goto mgmt_txn_prepare_config_done;
+ }
+
+#ifdef MGMTD_LOCAL_VALIDATIONS_ENABLED
+ if (mm->perf_stats_en)
+ gettimeofday(&txn->commit_cfg_req->req.commit_cfg.cmt_stats
+ ->validate_start,
+ NULL);
+ /*
+ * Perform application level validations locally on the MGMTD
+ * process by calling application specific validation routines
+ * loaded onto MGMTD process using libraries.
+ */
+ nb_ctx.client = NB_CLIENT_MGMTD_SERVER;
+ nb_ctx.user = (void *)txn;
+ ret = nb_candidate_validate_code(&nb_ctx, nb_config, &changes, err_buf,
+ sizeof(err_buf) - 1);
+ if (ret != NB_OK) {
+ if (strncmp(err_buf, " ", strlen(err_buf)) == 0)
+ strlcpy(err_buf, "Validation failed", sizeof(err_buf));
+ (void)mgmt_txn_send_commit_cfg_reply(txn, MGMTD_INVALID_PARAM,
+ err_buf);
+ ret = -1;
+ goto mgmt_txn_prepare_config_done;
+ }
+
+ if (txn->commit_cfg_req->req.commit_cfg.validate_only) {
+ /*
+ * This was a validate-only COMMIT request return success.
+ */
+ (void)mgmt_txn_send_commit_cfg_reply(txn, MGMTD_SUCCESS, NULL);
+ goto mgmt_txn_prepare_config_done;
+ }
+#endif /* ifdef MGMTD_LOCAL_VALIDATIONS_ENABLED */
+
+mgmt_txn_prep_config_validation_done:
+
+ if (mm->perf_stats_en)
+ gettimeofday(&txn->commit_cfg_req->req.commit_cfg.cmt_stats
+ ->prep_cfg_start,
+ NULL);
+
+ /*
+ * Iterate over the diffs and create ordered batches of config
+ * commands to be validated.
+ */
+ ret = mgmt_txn_create_config_batches(txn->commit_cfg_req, cfg_chgs);
+ if (ret != 0) {
+ ret = -1;
+ goto mgmt_txn_prepare_config_done;
+ }
+
+ /* Move to the Transaction Create Phase */
+ txn->commit_cfg_req->req.commit_cfg.phase =
+ MGMTD_COMMIT_PHASE_TXN_CREATE;
+ mgmt_txn_register_event(txn, MGMTD_TXN_PROC_COMMITCFG);
+
+ /*
+ * Start the COMMIT Timeout Timer to abort Txn if things get stuck at
+ * backend.
+ */
+ mgmt_txn_register_event(txn, MGMTD_TXN_COMMITCFG_TIMEOUT);
+mgmt_txn_prepare_config_done:
+
+ if (cfg_chgs && del_cfg_chgs)
+ nb_config_diff_del_changes(cfg_chgs);
+
+ return ret;
+}
+
+static int mgmt_txn_send_be_txn_create(struct mgmt_txn_ctx *txn)
+{
+ enum mgmt_be_client_id id;
+ struct mgmt_be_client_adapter *adapter;
+ struct mgmt_commit_cfg_req *cmtcfg_req;
+
+ assert(txn->type == MGMTD_TXN_TYPE_CONFIG && txn->commit_cfg_req);
+
+ cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
+ FOREACH_MGMTD_BE_CLIENT_ID (id) {
+ if (IS_IDBIT_SET(cmtcfg_req->clients, id)) {
+ adapter = mgmt_be_get_adapter_by_id(id);
+ if (mgmt_be_send_txn_req(adapter, txn->txn_id, true)) {
+ (void)mgmt_txn_send_commit_cfg_reply(
+ txn, MGMTD_INTERNAL_ERROR,
+ "Could not send TXN_CREATE to backend adapter");
+ return -1;
+ }
+ }
+ }
+
+ /*
+ * Dont move the commit to next phase yet. Wait for the TXN_REPLY to
+ * come back.
+ */
+
+ __dbg("txn-id: %" PRIu64 " session-id: %" PRIu64 " Phase '%s'",
+ txn->txn_id, txn->session_id, mgmt_txn_commit_phase_str(txn));
+
+ return 0;
+}
+
+static int mgmt_txn_send_be_cfg_data(struct mgmt_txn_ctx *txn,
+ struct mgmt_be_client_adapter *adapter)
+{
+ struct mgmt_commit_cfg_req *cmtcfg_req;
+ struct mgmt_txn_be_cfg_batch *batch;
+ struct mgmt_be_cfgreq cfg_req = { 0 };
+ size_t num_batches, indx;
+
+ assert(txn->type == MGMTD_TXN_TYPE_CONFIG && txn->commit_cfg_req);
+
+ cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
+ assert(IS_IDBIT_SET(cmtcfg_req->clients, adapter->id));
+
+ indx = 0;
+ num_batches = mgmt_txn_batches_count(&cmtcfg_req->batches[adapter->id]);
+ FOREACH_TXN_CFG_BATCH_IN_LIST (&cmtcfg_req->batches[adapter->id],
+ batch) {
+
+ cfg_req.cfgdata_reqs = batch->cfg_datap;
+ cfg_req.num_reqs = batch->num_cfg_data;
+ indx++;
+ if (mgmt_be_send_cfgdata_req(adapter, txn->txn_id,
+ cfg_req.cfgdata_reqs,
+ cfg_req.num_reqs,
+ indx == num_batches)) {
+ (void)mgmt_txn_send_commit_cfg_reply(
+ txn, MGMTD_INTERNAL_ERROR,
+ "Internal Error! Could not send config data to backend!");
+ __log_err("Could not send CFGDATA_CREATE txn-id: %" PRIu64
+ " to client '%s",
+ txn->txn_id, adapter->name);
+ return -1;
+ }
+
+ cmtcfg_req->cmt_stats->last_num_cfgdata_reqs++;
+ }
+
+ /*
+ * We don't advance the phase here, instead that is driven by the
+ * cfg_reply.
+ */
+
+ return 0;
+}
+
+static int mgmt_txn_send_be_txn_delete(struct mgmt_txn_ctx *txn,
+ struct mgmt_be_client_adapter *adapter)
+{
+ struct mgmt_commit_cfg_req *cmtcfg_req =
+ &txn->commit_cfg_req->req.commit_cfg;
+
+ assert(txn->type == MGMTD_TXN_TYPE_CONFIG);
+
+ if (IS_IDBIT_UNSET(cmtcfg_req->clients, adapter->id))
+ return 0;
+
+ return mgmt_be_send_txn_req(adapter, txn->txn_id, false);
+}
+
+static void mgmt_txn_cfg_commit_timedout(struct event *thread)
+{
+ struct mgmt_txn_ctx *txn;
+
+ txn = (struct mgmt_txn_ctx *)EVENT_ARG(thread);
+ assert(txn);
+
+ assert(txn->type == MGMTD_TXN_TYPE_CONFIG);
+
+ if (!txn->commit_cfg_req)
+ return;
+
+ __log_err("Backend timeout txn-id: %" PRIu64 " aborting commit",
+ txn->txn_id);
+
+ /*
+ * Send a COMMIT_CONFIG_REPLY with failure.
+ * NOTE: The transaction cleanup will be triggered from Front-end
+ * adapter.
+ */
+ mgmt_txn_send_commit_cfg_reply(
+ txn, MGMTD_INTERNAL_ERROR,
+ "Operation on the backend timed-out. Aborting commit!");
+}
+
+
+static int txn_get_tree_data_done(struct mgmt_txn_ctx *txn,
+ struct mgmt_txn_req *txn_req)
+{
+ struct txn_req_get_tree *get_tree = txn_req->req.get_tree;
+ uint64_t req_id = txn_req->req_id;
+ struct lyd_node *result;
+ int ret = NB_OK;
+
+ /* cancel timer and send reply onward */
+ EVENT_OFF(txn->get_tree_timeout);
+
+ if (!get_tree->simple_xpath && get_tree->client_results) {
+ /*
+ * We have a complex query so Filter results by the xpath query.
+ */
+ if (yang_lyd_trim_xpath(&get_tree->client_results,
+ txn_req->req.get_tree->xpath))
+ ret = NB_ERR;
+ }
+
+ result = get_tree->client_results;
+
+ if (ret == NB_OK && result && get_tree->exact)
+ result = yang_dnode_get(result, get_tree->xpath);
+
+ if (ret == NB_OK)
+ ret = mgmt_fe_adapter_send_tree_data(txn->session_id,
+ txn->txn_id,
+ txn_req->req_id,
+ get_tree->result_type,
+ get_tree->wd_options,
+ result,
+ get_tree->partial_error,
+ false);
+
+ /* we're done with the request */
+ mgmt_txn_req_free(&txn_req);
+
+ if (ret) {
+ __log_err("Error sending the results of GETTREE for txn-id %" PRIu64
+ " req_id %" PRIu64 " to requested type %u",
+ txn->txn_id, req_id, get_tree->result_type);
+
+ (void)mgmt_fe_adapter_txn_error(txn->txn_id, req_id, false, ret,
+ "Error converting results of GETTREE");
+ }
+
+ return ret;
+}
+
+static int txn_rpc_done(struct mgmt_txn_ctx *txn, struct mgmt_txn_req *txn_req)
+{
+ struct txn_req_rpc *rpc = txn_req->req.rpc;
+ uint64_t req_id = txn_req->req_id;
+
+ /* cancel timer and send reply onward */
+ EVENT_OFF(txn->rpc_timeout);
+
+ if (rpc->errstr)
+ mgmt_fe_adapter_txn_error(txn->txn_id, req_id, false, -1,
+ rpc->errstr);
+ else if (mgmt_fe_adapter_send_rpc_reply(txn->session_id, txn->txn_id,
+ req_id, rpc->result_type,
+ rpc->client_results)) {
+ __log_err("Error sending the results of RPC for txn-id %" PRIu64
+ " req_id %" PRIu64 " to requested type %u",
+ txn->txn_id, req_id, rpc->result_type);
+
+ (void)mgmt_fe_adapter_txn_error(txn->txn_id, req_id, false, -1,
+ "Error converting results of RPC");
+ }
+
+ /* we're done with the request */
+ mgmt_txn_req_free(&txn_req);
+
+ return 0;
+}
+
+static void txn_get_tree_timeout(struct event *thread)
+{
+ struct mgmt_txn_ctx *txn;
+ struct mgmt_txn_req *txn_req;
+
+ txn_req = (struct mgmt_txn_req *)EVENT_ARG(thread);
+ txn = txn_req->txn;
+
+ assert(txn);
+ assert(txn->type == MGMTD_TXN_TYPE_SHOW);
+
+
+ __log_err("Backend timeout txn-id: %" PRIu64 " ending get-tree",
+ txn->txn_id);
+
+ /*
+ * Send a get-tree data reply.
+ *
+ * NOTE: The transaction cleanup will be triggered from Front-end
+ * adapter.
+ */
+
+ txn_req->req.get_tree->partial_error = -ETIMEDOUT;
+ txn_get_tree_data_done(txn, txn_req);
+}
+
+static void txn_rpc_timeout(struct event *thread)
+{
+ struct mgmt_txn_ctx *txn;
+ struct mgmt_txn_req *txn_req;
+
+ txn_req = (struct mgmt_txn_req *)EVENT_ARG(thread);
+ txn = txn_req->txn;
+
+ assert(txn);
+ assert(txn->type == MGMTD_TXN_TYPE_RPC);
+
+ __log_err("Backend timeout txn-id: %" PRIu64 " ending rpc", txn->txn_id);
+
+ /*
+ * Send a get-tree data reply.
+ *
+ * NOTE: The transaction cleanup will be triggered from Front-end
+ * adapter.
+ */
+
+ txn_req->req.rpc->errstr =
+ XSTRDUP(MTYPE_MGMTD_ERR, "Operation on the backend timed-out");
+ txn_rpc_done(txn, txn_req);
+}
+
+/*
+ * Send CFG_APPLY_REQs to all the backend client.
+ *
+ * NOTE: This is always dispatched when all CFGDATA_CREATE_REQs
+ * for all backend clients has been generated. Please see
+ * mgmt_txn_register_event() and mgmt_txn_process_commit_cfg()
+ * for details.
+ */
+static int mgmt_txn_send_be_cfg_apply(struct mgmt_txn_ctx *txn)
+{
+ enum mgmt_be_client_id id;
+ struct mgmt_be_client_adapter *adapter;
+ struct mgmt_commit_cfg_req *cmtcfg_req;
+
+ assert(txn->type == MGMTD_TXN_TYPE_CONFIG && txn->commit_cfg_req);
+
+ cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
+ if (cmtcfg_req->validate_only) {
+ /*
+ * If this was a validate-only COMMIT request return success.
+ */
+ (void)mgmt_txn_send_commit_cfg_reply(txn, MGMTD_SUCCESS, NULL);
+ return 0;
+ }
+
+ FOREACH_MGMTD_BE_CLIENT_ID (id) {
+ if (IS_IDBIT_SET(cmtcfg_req->clients, id)) {
+ adapter = mgmt_be_get_adapter_by_id(id);
+ if (!adapter)
+ return -1;
+
+ if (mgmt_be_send_cfgapply_req(adapter, txn->txn_id)) {
+ (void)mgmt_txn_send_commit_cfg_reply(
+ txn, MGMTD_INTERNAL_ERROR,
+ "Could not send CFG_APPLY_REQ to backend adapter");
+ return -1;
+ }
+ cmtcfg_req->cmt_stats->last_num_apply_reqs++;
+
+ UNSET_FLAG(adapter->flags,
+ MGMTD_BE_ADAPTER_FLAGS_CFG_SYNCED);
+ }
+ }
+
+ /*
+ * Dont move the commit to next phase yet. Wait for all VALIDATE_REPLIES
+ * to come back.
+ */
+
+ return 0;
+}
+
+static void mgmt_txn_process_commit_cfg(struct event *thread)
+{
+ struct mgmt_txn_ctx *txn;
+ struct mgmt_commit_cfg_req *cmtcfg_req;
+
+ txn = (struct mgmt_txn_ctx *)EVENT_ARG(thread);
+ assert(txn);
+
+ __dbg("Processing COMMIT_CONFIG for txn-id: %" PRIu64
+ " session-id: %" PRIu64 " Phase '%s'",
+ txn->txn_id, txn->session_id, mgmt_txn_commit_phase_str(txn));
+
+ assert(txn->commit_cfg_req);
+ cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
+ switch (cmtcfg_req->phase) {
+ case MGMTD_COMMIT_PHASE_PREPARE_CFG:
+ mgmt_txn_prepare_config(txn);
+ break;
+ case MGMTD_COMMIT_PHASE_TXN_CREATE:
+ if (mm->perf_stats_en)
+ gettimeofday(&cmtcfg_req->cmt_stats->txn_create_start,
+ NULL);
+ /*
+ * Send TXN_CREATE_REQ to all Backend now.
+ */
+ mgmt_txn_send_be_txn_create(txn);
+ break;
+ case MGMTD_COMMIT_PHASE_APPLY_CFG:
+ if (mm->perf_stats_en)
+ gettimeofday(&cmtcfg_req->cmt_stats->apply_cfg_start,
+ NULL);
+ /*
+ * We should have received successful CFG_VALIDATE_REPLY from
+ * all concerned Backend Clients by now. Send out the
+ * CFG_APPLY_REQs now.
+ */
+ mgmt_txn_send_be_cfg_apply(txn);
+ break;
+ case MGMTD_COMMIT_PHASE_TXN_DELETE:
+ if (mm->perf_stats_en)
+ gettimeofday(&cmtcfg_req->cmt_stats->txn_del_start,
+ NULL);
+ /*
+ * We would have sent TXN_DELETE_REQ to all backend by now.
+ * Send a successful CONFIG_COMMIT_REPLY back to front-end.
+ * NOTE: This should also trigger DS merge/unlock and Txn
+ * cleanup. Please see mgmt_fe_send_commit_cfg_reply() for
+ * more details.
+ */
+ EVENT_OFF(txn->comm_cfg_timeout);
+ mgmt_txn_send_commit_cfg_reply(txn, MGMTD_SUCCESS, NULL);
+ break;
+ case MGMTD_COMMIT_PHASE_MAX:
+ break;
+ }
+}
+
+static void mgmt_init_get_data_reply(struct mgmt_get_data_reply *get_reply)
+{
+ size_t indx;
+
+ for (indx = 0; indx < array_size(get_reply->reply_data); indx++)
+ get_reply->reply_datap[indx] = &get_reply->reply_data[indx];
+}
+
+static void mgmt_reset_get_data_reply(struct mgmt_get_data_reply *get_reply)
+{
+ int indx;
+
+ for (indx = 0; indx < get_reply->num_reply; indx++) {
+ if (get_reply->reply_xpathp[indx]) {
+ free(get_reply->reply_xpathp[indx]);
+ get_reply->reply_xpathp[indx] = 0;
+ }
+ if (get_reply->reply_data[indx].xpath) {
+ free(get_reply->reply_data[indx].xpath);
+ get_reply->reply_data[indx].xpath = 0;
+ }
+ }
+
+ get_reply->num_reply = 0;
+ memset(&get_reply->data_reply, 0, sizeof(get_reply->data_reply));
+ memset(&get_reply->reply_data, 0, sizeof(get_reply->reply_data));
+ memset(&get_reply->reply_datap, 0, sizeof(get_reply->reply_datap));
+
+ memset(&get_reply->reply_value, 0, sizeof(get_reply->reply_value));
+
+ mgmt_init_get_data_reply(get_reply);
+}
+
+static void mgmt_reset_get_data_reply_buf(struct mgmt_get_data_req *get_data)
+{
+ if (get_data->reply)
+ mgmt_reset_get_data_reply(get_data->reply);
+}
+
+static void mgmt_txn_send_getcfg_reply_data(struct mgmt_txn_req *txn_req,
+ struct mgmt_get_data_req *get_req)
+{
+ struct mgmt_get_data_reply *get_reply;
+ Mgmtd__YangDataReply *data_reply;
+
+ get_reply = get_req->reply;
+ if (!get_reply)
+ return;
+
+ data_reply = &get_reply->data_reply;
+ mgmt_yang_data_reply_init(data_reply);
+ data_reply->n_data = get_reply->num_reply;
+ data_reply->data = get_reply->reply_datap;
+ data_reply->next_indx = (!get_reply->last_batch ? get_req->total_reply
+ : -1);
+
+ __dbg("Sending %zu Get-Config/Data replies next-index:%" PRId64,
+ data_reply->n_data, data_reply->next_indx);
+
+ switch (txn_req->req_event) {
+ case MGMTD_TXN_PROC_GETCFG:
+ if (mgmt_fe_send_get_reply(txn_req->txn->session_id,
+ txn_req->txn->txn_id, get_req->ds_id,
+ txn_req->req_id, MGMTD_SUCCESS,
+ data_reply, NULL) != 0) {
+ __log_err("Failed to send GET-CONFIG-REPLY txn-id: %" PRIu64
+ " session-id: %" PRIu64 " req-id: %" PRIu64,
+ txn_req->txn->txn_id,
+ txn_req->txn->session_id, txn_req->req_id);
+ }
+ break;
+ case MGMTD_TXN_PROC_SETCFG:
+ case MGMTD_TXN_PROC_COMMITCFG:
+ case MGMTD_TXN_PROC_GETTREE:
+ case MGMTD_TXN_PROC_RPC:
+ case MGMTD_TXN_COMMITCFG_TIMEOUT:
+ __log_err("Invalid Txn-Req-Event %u", txn_req->req_event);
+ break;
+ }
+
+ /*
+ * Reset reply buffer for next reply.
+ */
+ mgmt_reset_get_data_reply_buf(get_req);
+}
+
+static void txn_iter_get_config_data_cb(const char *xpath, struct lyd_node *node,
+ struct nb_node *nb_node, void *ctx)
+{
+ struct mgmt_txn_req *txn_req;
+ struct mgmt_get_data_req *get_req;
+ struct mgmt_get_data_reply *get_reply;
+ Mgmtd__YangData *data;
+ Mgmtd__YangDataValue *data_value;
+
+ txn_req = (struct mgmt_txn_req *)ctx;
+ if (!txn_req)
+ return;
+
+ if (!(node->schema->nodetype & LYD_NODE_TERM))
+ return;
+
+ assert(txn_req->req_event == MGMTD_TXN_PROC_GETCFG);
+
+ get_req = txn_req->req.get_data;
+ assert(get_req);
+ get_reply = get_req->reply;
+ data = &get_reply->reply_data[get_reply->num_reply];
+ data_value = &get_reply->reply_value[get_reply->num_reply];
+
+ mgmt_yang_data_init(data);
+ data->xpath = strdup(xpath);
+ mgmt_yang_data_value_init(data_value);
+ data_value->value_case = MGMTD__YANG_DATA_VALUE__VALUE_ENCODED_STR_VAL;
+ data_value->encoded_str_val = (char *)lyd_get_value(node);
+ data->value = data_value;
+
+ get_reply->num_reply++;
+ get_req->total_reply++;
+ __dbg(" [%d] XPATH: '%s', Value: '%s'", get_req->total_reply,
+ data->xpath, data_value->encoded_str_val);
+
+ if (get_reply->num_reply == MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH)
+ mgmt_txn_send_getcfg_reply_data(txn_req, get_req);
+}
+
+static int mgmt_txn_get_config(struct mgmt_txn_ctx *txn,
+ struct mgmt_txn_req *txn_req,
+ struct nb_config *root)
+{
+ int indx;
+ struct mgmt_get_data_req *get_data;
+ struct mgmt_get_data_reply *get_reply;
+
+ get_data = txn_req->req.get_data;
+
+ if (!get_data->reply) {
+ get_data->reply = XCALLOC(MTYPE_MGMTD_TXN_GETDATA_REPLY,
+ sizeof(struct mgmt_get_data_reply));
+ if (!get_data->reply) {
+ mgmt_fe_send_get_reply(
+ txn->session_id, txn->txn_id, get_data->ds_id,
+ txn_req->req_id, MGMTD_INTERNAL_ERROR, NULL,
+ "Internal error: Unable to allocate reply buffers!");
+ goto mgmt_txn_get_config_failed;
+ }
+ }
+
+ /*
+ * Read data contents from the DS and respond back directly.
+ * No need to go to backend for getting data.
+ */
+ get_reply = get_data->reply;
+ for (indx = 0; indx < get_data->num_xpaths; indx++) {
+ __dbg("Trying to get all data under '%s'",
+ get_data->xpaths[indx]);
+ mgmt_init_get_data_reply(get_reply);
+ /*
+ * mgmt_ds_iter_data works on path prefixes, but the user may
+ * want to also use an xpath regexp we need to add this
+ * functionality.
+ */
+ if (mgmt_ds_iter_data(get_data->ds_id, root,
+ get_data->xpaths[indx],
+ txn_iter_get_config_data_cb,
+ (void *)txn_req) == -1) {
+ __dbg("Invalid Xpath '%s", get_data->xpaths[indx]);
+ mgmt_fe_send_get_reply(txn->session_id, txn->txn_id,
+ get_data->ds_id, txn_req->req_id,
+ MGMTD_INTERNAL_ERROR, NULL,
+ "Invalid xpath");
+ goto mgmt_txn_get_config_failed;
+ }
+ __dbg("Got %d remaining data-replies for xpath '%s'",
+ get_reply->num_reply, get_data->xpaths[indx]);
+ get_reply->last_batch = true;
+ mgmt_txn_send_getcfg_reply_data(txn_req, get_data);
+ }
+
+mgmt_txn_get_config_failed:
+
+ /*
+ * Delete the txn request. It will also remove it from request
+ * list.
+ */
+ mgmt_txn_req_free(&txn_req);
+
+ return 0;
+}
+
+static void mgmt_txn_process_get_cfg(struct event *thread)
+{
+ struct mgmt_txn_ctx *txn;
+ struct mgmt_txn_req *txn_req;
+ struct nb_config *cfg_root;
+ int num_processed = 0;
+ bool error;
+
+ txn = (struct mgmt_txn_ctx *)EVENT_ARG(thread);
+ assert(txn);
+
+ __dbg("Processing %zu GET_CONFIG requests txn-id: %" PRIu64
+ " session-id: %" PRIu64,
+ mgmt_txn_reqs_count(&txn->get_cfg_reqs), txn->txn_id,
+ txn->session_id);
+
+ FOREACH_TXN_REQ_IN_LIST (&txn->get_cfg_reqs, txn_req) {
+ error = false;
+ assert(txn_req->req_event == MGMTD_TXN_PROC_GETCFG);
+ cfg_root = txn_req->req.get_data->cfg_root;
+ assert(cfg_root);
+
+ if (mgmt_txn_get_config(txn, txn_req, cfg_root) != 0) {
+ __log_err("Unable to retrieve config from DS %d txn-id: %" PRIu64
+ " session-id: %" PRIu64 " req-id: %" PRIu64,
+ txn_req->req.get_data->ds_id, txn->txn_id,
+ txn->session_id, txn_req->req_id);
+ error = true;
+ }
+
+ if (error) {
+ /*
+ * Delete the txn request.
+ * Note: The following will remove it from the list
+ * as well.
+ */
+ mgmt_txn_req_free(&txn_req);
+ }
+
+ /*
+ * Else the transaction would have been already deleted or
+ * moved to corresponding pending list. No need to delete it.
+ */
+ num_processed++;
+ if (num_processed == MGMTD_TXN_MAX_NUM_GETCFG_PROC)
+ break;
+ }
+
+ if (mgmt_txn_reqs_count(&txn->get_cfg_reqs)) {
+ __dbg("Processed maximum number of Get-Config requests (%d/%d). Rescheduling for rest.",
+ num_processed, MGMTD_TXN_MAX_NUM_GETCFG_PROC);
+ mgmt_txn_register_event(txn, MGMTD_TXN_PROC_GETCFG);
+ }
+}
+
+static struct mgmt_txn_ctx *
+mgmt_fe_find_txn_by_session_id(struct mgmt_master *cm, uint64_t session_id,
+ enum mgmt_txn_type type)
+{
+ struct mgmt_txn_ctx *txn;
+
+ FOREACH_TXN_IN_LIST (cm, txn) {
+ if (txn->session_id == session_id && txn->type == type)
+ return txn;
+ }
+
+ return NULL;
+}
+
+static struct mgmt_txn_ctx *mgmt_txn_create_new(uint64_t session_id,
+ enum mgmt_txn_type type)
+{
+ struct mgmt_txn_ctx *txn = NULL;
+
+ /* Do not allow multiple config transactions */
+ if (type == MGMTD_TXN_TYPE_CONFIG && mgmt_config_txn_in_progress())
+ return NULL;
+
+ txn = mgmt_fe_find_txn_by_session_id(mgmt_txn_mm, session_id, type);
+ if (!txn) {
+ txn = XCALLOC(MTYPE_MGMTD_TXN, sizeof(struct mgmt_txn_ctx));
+ assert(txn);
+
+ txn->session_id = session_id;
+ txn->type = type;
+ mgmt_txns_add_tail(&mgmt_txn_mm->txn_list, txn);
+ /* TODO: why do we need N lists for one transaction */
+ mgmt_txn_reqs_init(&txn->set_cfg_reqs);
+ mgmt_txn_reqs_init(&txn->get_cfg_reqs);
+ mgmt_txn_reqs_init(&txn->get_tree_reqs);
+ mgmt_txn_reqs_init(&txn->rpc_reqs);
+ txn->commit_cfg_req = NULL;
+ txn->refcount = 0;
+ if (!mgmt_txn_mm->next_txn_id)
+ mgmt_txn_mm->next_txn_id++;
+ txn->txn_id = mgmt_txn_mm->next_txn_id++;
+ hash_get(mgmt_txn_mm->txn_hash, txn, hash_alloc_intern);
+
+ __dbg("Added new '%s' txn-id: %" PRIu64,
+ mgmt_txn_type2str(type), txn->txn_id);
+
+ if (type == MGMTD_TXN_TYPE_CONFIG)
+ mgmt_txn_mm->cfg_txn = txn;
+
+ MGMTD_TXN_LOCK(txn);
+ }
+
+ return txn;
+}
+
+static void mgmt_txn_delete(struct mgmt_txn_ctx **txn)
+{
+ MGMTD_TXN_UNLOCK(txn);
+}
+
+static unsigned int mgmt_txn_hash_key(const void *data)
+{
+ const struct mgmt_txn_ctx *txn = data;
+
+ return jhash2((uint32_t *)&txn->txn_id,
+ sizeof(txn->txn_id) / sizeof(uint32_t), 0);
+}
+
+static bool mgmt_txn_hash_cmp(const void *d1, const void *d2)
+{
+ const struct mgmt_txn_ctx *txn1 = d1;
+ const struct mgmt_txn_ctx *txn2 = d2;
+
+ return (txn1->txn_id == txn2->txn_id);
+}
+
+static void mgmt_txn_hash_free(void *data)
+{
+ struct mgmt_txn_ctx *txn = data;
+
+ mgmt_txn_delete(&txn);
+}
+
+static void mgmt_txn_hash_init(void)
+{
+ if (!mgmt_txn_mm || mgmt_txn_mm->txn_hash)
+ return;
+
+ mgmt_txn_mm->txn_hash = hash_create(mgmt_txn_hash_key, mgmt_txn_hash_cmp,
+ "MGMT Transactions");
+}
+
+static void mgmt_txn_hash_destroy(void)
+{
+ if (!mgmt_txn_mm || !mgmt_txn_mm->txn_hash)
+ return;
+
+ hash_clean(mgmt_txn_mm->txn_hash, mgmt_txn_hash_free);
+ hash_free(mgmt_txn_mm->txn_hash);
+ mgmt_txn_mm->txn_hash = NULL;
+}
+
+static inline struct mgmt_txn_ctx *mgmt_txn_id2ctx(uint64_t txn_id)
+{
+ struct mgmt_txn_ctx key = { 0 };
+ struct mgmt_txn_ctx *txn;
+
+ if (!mgmt_txn_mm || !mgmt_txn_mm->txn_hash)
+ return NULL;
+
+ key.txn_id = txn_id;
+ txn = hash_lookup(mgmt_txn_mm->txn_hash, &key);
+
+ return txn;
+}
+
+uint64_t mgmt_txn_get_session_id(uint64_t txn_id)
+{
+ struct mgmt_txn_ctx *txn = mgmt_txn_id2ctx(txn_id);
+
+ return txn ? txn->session_id : MGMTD_SESSION_ID_NONE;
+}
+
+static void mgmt_txn_lock(struct mgmt_txn_ctx *txn, const char *file, int line)
+{
+ txn->refcount++;
+ __dbg("%s:%d --> Lock %s txn-id: %" PRIu64 " refcnt: %d", file, line,
+ mgmt_txn_type2str(txn->type), txn->txn_id, txn->refcount);
+}
+
+static void mgmt_txn_unlock(struct mgmt_txn_ctx **txn, const char *file,
+ int line)
+{
+ assert(*txn && (*txn)->refcount);
+
+ (*txn)->refcount--;
+ __dbg("%s:%d --> Unlock %s txn-id: %" PRIu64 " refcnt: %d", file, line,
+ mgmt_txn_type2str((*txn)->type), (*txn)->txn_id, (*txn)->refcount);
+ if (!(*txn)->refcount) {
+ if ((*txn)->type == MGMTD_TXN_TYPE_CONFIG)
+ if (mgmt_txn_mm->cfg_txn == *txn)
+ mgmt_txn_mm->cfg_txn = NULL;
+ EVENT_OFF((*txn)->proc_get_cfg);
+ EVENT_OFF((*txn)->proc_get_data);
+ EVENT_OFF((*txn)->proc_comm_cfg);
+ EVENT_OFF((*txn)->comm_cfg_timeout);
+ EVENT_OFF((*txn)->get_tree_timeout);
+ hash_release(mgmt_txn_mm->txn_hash, *txn);
+ mgmt_txns_del(&mgmt_txn_mm->txn_list, *txn);
+
+ __dbg("Deleted %s txn-id: %" PRIu64 " session-id: %" PRIu64,
+ mgmt_txn_type2str((*txn)->type), (*txn)->txn_id,
+ (*txn)->session_id);
+
+ XFREE(MTYPE_MGMTD_TXN, *txn);
+ }
+
+ *txn = NULL;
+}
+
+static void mgmt_txn_cleanup_txn(struct mgmt_txn_ctx **txn)
+{
+ /* TODO: Any other cleanup applicable */
+
+ mgmt_txn_delete(txn);
+}
+
+static void mgmt_txn_cleanup_all_txns(void)
+{
+ struct mgmt_txn_ctx *txn;
+
+ if (!mgmt_txn_mm || !mgmt_txn_mm->txn_hash)
+ return;
+
+ FOREACH_TXN_IN_LIST (mgmt_txn_mm, txn)
+ mgmt_txn_cleanup_txn(&txn);
+}
+
+static void mgmt_txn_register_event(struct mgmt_txn_ctx *txn,
+ enum mgmt_txn_event event)
+{
+ struct timeval tv = { .tv_sec = 0,
+ .tv_usec = MGMTD_TXN_PROC_DELAY_USEC };
+
+ assert(mgmt_txn_mm && mgmt_txn_tm);
+
+ switch (event) {
+ case MGMTD_TXN_PROC_SETCFG:
+ event_add_timer_tv(mgmt_txn_tm, mgmt_txn_process_set_cfg, txn,
+ &tv, &txn->proc_set_cfg);
+ break;
+ case MGMTD_TXN_PROC_COMMITCFG:
+ event_add_timer_tv(mgmt_txn_tm, mgmt_txn_process_commit_cfg,
+ txn, &tv, &txn->proc_comm_cfg);
+ break;
+ case MGMTD_TXN_PROC_GETCFG:
+ event_add_timer_tv(mgmt_txn_tm, mgmt_txn_process_get_cfg, txn,
+ &tv, &txn->proc_get_cfg);
+ break;
+ case MGMTD_TXN_COMMITCFG_TIMEOUT:
+ event_add_timer(mgmt_txn_tm, mgmt_txn_cfg_commit_timedout, txn,
+ MGMTD_TXN_CFG_COMMIT_MAX_DELAY_SEC,
+ &txn->comm_cfg_timeout);
+ break;
+ case MGMTD_TXN_PROC_GETTREE:
+ case MGMTD_TXN_PROC_RPC:
+ assert(!"code bug do not register this event");
+ break;
+ }
+}
+
+int mgmt_txn_init(struct mgmt_master *mm, struct event_loop *tm)
+{
+ if (mgmt_txn_mm || mgmt_txn_tm)
+ assert(!"MGMTD TXN: Call txn_init() only once");
+
+ mgmt_txn_mm = mm;
+ mgmt_txn_tm = tm;
+ mgmt_txns_init(&mm->txn_list);
+ mgmt_txn_hash_init();
+ assert(!mm->cfg_txn);
+ mm->cfg_txn = NULL;
+
+ return 0;
+}
+
+void mgmt_txn_destroy(void)
+{
+ mgmt_txn_cleanup_all_txns();
+ mgmt_txn_hash_destroy();
+}
+
+bool mgmt_config_txn_in_progress(void)
+{
+ if (mgmt_txn_mm && mgmt_txn_mm->cfg_txn)
+ return true;
+
+ return false;
+}
+
+uint64_t mgmt_create_txn(uint64_t session_id, enum mgmt_txn_type type)
+{
+ struct mgmt_txn_ctx *txn;
+
+ txn = mgmt_txn_create_new(session_id, type);
+ return txn ? txn->txn_id : MGMTD_TXN_ID_NONE;
+}
+
+void mgmt_destroy_txn(uint64_t *txn_id)
+{
+ struct mgmt_txn_ctx *txn;
+
+ txn = mgmt_txn_id2ctx(*txn_id);
+ if (!txn)
+ return;
+
+ mgmt_txn_delete(&txn);
+ *txn_id = MGMTD_TXN_ID_NONE;
+}
+
+int mgmt_txn_send_set_config_req(uint64_t txn_id, uint64_t req_id,
+ Mgmtd__DatastoreId ds_id,
+ struct mgmt_ds_ctx *ds_ctx,
+ Mgmtd__YangCfgDataReq **cfg_req,
+ size_t num_req, bool implicit_commit,
+ Mgmtd__DatastoreId dst_ds_id,
+ struct mgmt_ds_ctx *dst_ds_ctx)
+{
+ struct mgmt_txn_ctx *txn;
+ struct mgmt_txn_req *txn_req;
+ size_t indx;
+ uint16_t *num_chgs;
+ struct nb_cfg_change *cfg_chg;
+ struct nb_node *node;
+
+ txn = mgmt_txn_id2ctx(txn_id);
+ if (!txn)
+ return -1;
+
+ if (implicit_commit && mgmt_txn_reqs_count(&txn->set_cfg_reqs)) {
+ __log_err(
+ "For implicit commit config only one SETCFG-REQ can be allowed!");
+ return -1;
+ }
+
+ txn_req = mgmt_txn_req_alloc(txn, req_id, MGMTD_TXN_PROC_SETCFG);
+ txn_req->req.set_cfg->ds_id = ds_id;
+ txn_req->req.set_cfg->ds_ctx = ds_ctx;
+ num_chgs = &txn_req->req.set_cfg->num_cfg_changes;
+ for (indx = 0; indx < num_req; indx++) {
+ cfg_chg = &txn_req->req.set_cfg->cfg_changes[*num_chgs];
+
+ switch (cfg_req[indx]->req_type) {
+ case MGMTD__CFG_DATA_REQ_TYPE__DELETE_DATA:
+ cfg_chg->operation = NB_OP_DELETE;
+ break;
+ case MGMTD__CFG_DATA_REQ_TYPE__REMOVE_DATA:
+ cfg_chg->operation = NB_OP_DESTROY;
+ break;
+ case MGMTD__CFG_DATA_REQ_TYPE__SET_DATA:
+ /*
+ * For backward compatibility, we need to allow creating
+ * *new* list keys with SET_DATA operation. NB_OP_MODIFY
+ * is not allowed for keys, so use NB_OP_CREATE_EXCL.
+ */
+ node = nb_node_find(cfg_req[indx]->data->xpath);
+ if (node && lysc_is_key(node->snode))
+ cfg_chg->operation = NB_OP_CREATE_EXCL;
+ else
+ cfg_chg->operation = NB_OP_MODIFY;
+ break;
+ case MGMTD__CFG_DATA_REQ_TYPE__CREATE_DATA:
+ cfg_chg->operation = NB_OP_CREATE_EXCL;
+ break;
+ case MGMTD__CFG_DATA_REQ_TYPE__REPLACE_DATA:
+ cfg_chg->operation = NB_OP_REPLACE;
+ break;
+ case MGMTD__CFG_DATA_REQ_TYPE__REQ_TYPE_NONE:
+ case _MGMTD__CFG_DATA_REQ_TYPE_IS_INT_SIZE:
+ default:
+ continue;
+ }
+
+ __dbg("XPath: '%s', Value: '%s'", cfg_req[indx]->data->xpath,
+ (cfg_req[indx]->data->value &&
+ cfg_req[indx]->data->value->encoded_str_val
+ ? cfg_req[indx]->data->value->encoded_str_val
+ : "NULL"));
+ strlcpy(cfg_chg->xpath, cfg_req[indx]->data->xpath,
+ sizeof(cfg_chg->xpath));
+ cfg_chg->value =
+ (cfg_req[indx]->data->value &&
+ cfg_req[indx]->data->value->encoded_str_val
+ ? strdup(cfg_req[indx]
+ ->data->value->encoded_str_val)
+ : NULL);
+ if (cfg_chg->value)
+ __dbg("Allocated value at %p ==> '%s'", cfg_chg->value,
+ cfg_chg->value);
+
+ (*num_chgs)++;
+ }
+ txn_req->req.set_cfg->implicit_commit = implicit_commit;
+ txn_req->req.set_cfg->dst_ds_id = dst_ds_id;
+ txn_req->req.set_cfg->dst_ds_ctx = dst_ds_ctx;
+ txn_req->req.set_cfg->setcfg_stats =
+ mgmt_fe_get_session_setcfg_stats(txn->session_id);
+ mgmt_txn_register_event(txn, MGMTD_TXN_PROC_SETCFG);
+
+ return 0;
+}
+
+int mgmt_txn_send_commit_config_req(uint64_t txn_id, uint64_t req_id,
+ Mgmtd__DatastoreId src_ds_id,
+ struct mgmt_ds_ctx *src_ds_ctx,
+ Mgmtd__DatastoreId dst_ds_id,
+ struct mgmt_ds_ctx *dst_ds_ctx,
+ bool validate_only, bool abort,
+ bool implicit, struct mgmt_edit_req *edit)
+{
+ struct mgmt_txn_ctx *txn;
+ struct mgmt_txn_req *txn_req;
+
+ txn = mgmt_txn_id2ctx(txn_id);
+ if (!txn)
+ return -1;
+
+ if (txn->commit_cfg_req) {
+ __log_err("Commit already in-progress txn-id: %" PRIu64
+ " session-id: %" PRIu64 ". Cannot start another",
+ txn->txn_id, txn->session_id);
+ return -1;
+ }
+
+ txn_req = mgmt_txn_req_alloc(txn, req_id, MGMTD_TXN_PROC_COMMITCFG);
+ txn_req->req.commit_cfg.src_ds_id = src_ds_id;
+ txn_req->req.commit_cfg.src_ds_ctx = src_ds_ctx;
+ txn_req->req.commit_cfg.dst_ds_id = dst_ds_id;
+ txn_req->req.commit_cfg.dst_ds_ctx = dst_ds_ctx;
+ txn_req->req.commit_cfg.validate_only = validate_only;
+ txn_req->req.commit_cfg.abort = abort;
+ txn_req->req.commit_cfg.implicit = implicit;
+ txn_req->req.commit_cfg.edit = edit;
+ txn_req->req.commit_cfg.cmt_stats =
+ mgmt_fe_get_session_commit_stats(txn->session_id);
+
+ /*
+ * Trigger a COMMIT-CONFIG process.
+ */
+ mgmt_txn_register_event(txn, MGMTD_TXN_PROC_COMMITCFG);
+ return 0;
+}
+
+int mgmt_txn_notify_be_adapter_conn(struct mgmt_be_client_adapter *adapter,
+ bool connect)
+{
+ struct mgmt_txn_ctx *txn;
+ struct mgmt_txn_req *txn_req;
+ struct mgmt_commit_cfg_req *cmtcfg_req;
+ static struct mgmt_commit_stats dummy_stats;
+ struct nb_config_cbs *adapter_cfgs = NULL;
+ struct mgmt_ds_ctx *ds_ctx;
+
+ memset(&dummy_stats, 0, sizeof(dummy_stats));
+ if (connect) {
+ ds_ctx = mgmt_ds_get_ctx_by_id(mm, MGMTD_DS_RUNNING);
+ assert(ds_ctx);
+
+ /*
+ * Lock the running datastore to prevent any changes while we
+ * are initializing the backend.
+ */
+ if (mgmt_ds_lock(ds_ctx, 0) != 0)
+ return -1;
+
+ /* Get config for this single backend client */
+ mgmt_be_get_adapter_config(adapter, &adapter_cfgs);
+ if (!adapter_cfgs || RB_EMPTY(nb_config_cbs, adapter_cfgs)) {
+ SET_FLAG(adapter->flags,
+ MGMTD_BE_ADAPTER_FLAGS_CFG_SYNCED);
+ mgmt_ds_unlock(ds_ctx);
+ return 0;
+ }
+
+ /*
+ * Create a CONFIG transaction to push the config changes
+ * provided to the backend client.
+ */
+ txn = mgmt_txn_create_new(0, MGMTD_TXN_TYPE_CONFIG);
+ if (!txn) {
+ __log_err("Failed to create CONFIG Transaction for downloading CONFIGs for client '%s'",
+ adapter->name);
+ mgmt_ds_unlock(ds_ctx);
+ nb_config_diff_del_changes(adapter_cfgs);
+ return -1;
+ }
+
+ __dbg("Created initial txn-id: %" PRIu64 " for BE client '%s'",
+ txn->txn_id, adapter->name);
+ /*
+ * Set the changeset for transaction to commit and trigger the
+ * commit request.
+ */
+ txn_req = mgmt_txn_req_alloc(txn, 0, MGMTD_TXN_PROC_COMMITCFG);
+ txn_req->req.commit_cfg.src_ds_id = MGMTD_DS_NONE;
+ txn_req->req.commit_cfg.src_ds_ctx = 0;
+ txn_req->req.commit_cfg.dst_ds_id = MGMTD_DS_RUNNING;
+ txn_req->req.commit_cfg.dst_ds_ctx = ds_ctx;
+ txn_req->req.commit_cfg.validate_only = false;
+ txn_req->req.commit_cfg.abort = false;
+ txn_req->req.commit_cfg.init = true;
+ txn_req->req.commit_cfg.cmt_stats = &dummy_stats;
+ txn_req->req.commit_cfg.cfg_chgs = adapter_cfgs;
+
+ /*
+ * Trigger a COMMIT-CONFIG process.
+ */
+ mgmt_txn_register_event(txn, MGMTD_TXN_PROC_COMMITCFG);
+
+ } else {
+ /*
+ * Check if any transaction is currently on-going that
+ * involves this backend client. If so, report the transaction
+ * has failed.
+ */
+ FOREACH_TXN_IN_LIST (mgmt_txn_mm, txn) {
+ /* TODO: update with operational state when that is
+ * completed */
+ if (txn->type == MGMTD_TXN_TYPE_CONFIG) {
+ cmtcfg_req = txn->commit_cfg_req
+ ? &txn->commit_cfg_req->req
+ .commit_cfg
+ : NULL;
+ if (cmtcfg_req && IS_IDBIT_SET(cmtcfg_req->clients,
+ adapter->id)) {
+ mgmt_txn_send_commit_cfg_reply(
+ txn, MGMTD_INTERNAL_ERROR,
+ "Backend daemon disconnected while processing commit!");
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+int mgmt_txn_notify_be_txn_reply(uint64_t txn_id, bool create, bool success,
+ struct mgmt_be_client_adapter *adapter)
+{
+ struct mgmt_txn_ctx *txn;
+ struct mgmt_commit_cfg_req *cmtcfg_req = NULL;
+
+ txn = mgmt_txn_id2ctx(txn_id);
+ if (!txn || txn->type != MGMTD_TXN_TYPE_CONFIG)
+ return -1;
+
+ if (!create && !txn->commit_cfg_req)
+ return 0;
+
+ assert(txn->commit_cfg_req);
+ cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
+ if (create) {
+ if (success) {
+ /*
+ * Done with TXN_CREATE. Move the backend client to
+ * next phase.
+ */
+ assert(cmtcfg_req->phase ==
+ MGMTD_COMMIT_PHASE_TXN_CREATE);
+
+ /*
+ * Send CFGDATA_CREATE-REQs to the backend immediately.
+ */
+ mgmt_txn_send_be_cfg_data(txn, adapter);
+ } else {
+ mgmt_txn_send_commit_cfg_reply(
+ txn, MGMTD_INTERNAL_ERROR,
+ "Internal error! Failed to initiate transaction at backend!");
+ }
+ }
+
+ return 0;
+}
+
+int mgmt_txn_notify_be_cfgdata_reply(uint64_t txn_id, bool success,
+ char *error_if_any,
+ struct mgmt_be_client_adapter *adapter)
+{
+ struct mgmt_txn_ctx *txn;
+ struct mgmt_commit_cfg_req *cmtcfg_req;
+
+ txn = mgmt_txn_id2ctx(txn_id);
+ if (!txn || txn->type != MGMTD_TXN_TYPE_CONFIG)
+ return -1;
+
+ if (!txn->commit_cfg_req)
+ return -1;
+ cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
+
+ if (!success) {
+ __log_err("CFGDATA_CREATE_REQ sent to '%s' failed txn-id: %" PRIu64
+ " err: %s",
+ adapter->name, txn->txn_id,
+ error_if_any ? error_if_any : "None");
+ mgmt_txn_send_commit_cfg_reply(
+ txn, MGMTD_INTERNAL_ERROR,
+ error_if_any
+ ? error_if_any
+ : "Internal error! Failed to download config data to backend!");
+ return 0;
+ }
+
+ __dbg("CFGDATA_CREATE_REQ sent to '%s' was successful txn-id: %" PRIu64
+ " err: %s",
+ adapter->name, txn->txn_id, error_if_any ? error_if_any : "None");
+
+ cmtcfg_req->be_phase[adapter->id] = MGMTD_COMMIT_PHASE_APPLY_CFG;
+
+ mgmt_try_move_commit_to_next_phase(txn, cmtcfg_req);
+
+ return 0;
+}
+
+int mgmt_txn_notify_be_cfg_apply_reply(uint64_t txn_id, bool success,
+ char *error_if_any,
+ struct mgmt_be_client_adapter *adapter)
+{
+ struct mgmt_txn_ctx *txn;
+ struct mgmt_commit_cfg_req *cmtcfg_req = NULL;
+
+ txn = mgmt_txn_id2ctx(txn_id);
+ if (!txn || txn->type != MGMTD_TXN_TYPE_CONFIG || !txn->commit_cfg_req)
+ return -1;
+
+ cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
+
+ if (!success) {
+ __log_err("CFGDATA_APPLY_REQ sent to '%s' failed txn-id: %" PRIu64
+ " err: %s",
+ adapter->name, txn->txn_id,
+ error_if_any ? error_if_any : "None");
+ mgmt_txn_send_commit_cfg_reply(
+ txn, MGMTD_INTERNAL_ERROR,
+ error_if_any
+ ? error_if_any
+ : "Internal error! Failed to apply config data on backend!");
+ return 0;
+ }
+
+ cmtcfg_req->be_phase[adapter->id] = MGMTD_COMMIT_PHASE_TXN_DELETE;
+
+ /*
+ * All configuration for the specific backend has been applied.
+ * Send TXN-DELETE to wrap up the transaction for this backend.
+ */
+ SET_FLAG(adapter->flags, MGMTD_BE_ADAPTER_FLAGS_CFG_SYNCED);
+ mgmt_txn_send_be_txn_delete(txn, adapter);
+
+ mgmt_try_move_commit_to_next_phase(txn, cmtcfg_req);
+ if (mm->perf_stats_en)
+ gettimeofday(&cmtcfg_req->cmt_stats->apply_cfg_end, NULL);
+
+ return 0;
+}
+
+int mgmt_txn_send_get_req(uint64_t txn_id, uint64_t req_id,
+ Mgmtd__DatastoreId ds_id, struct nb_config *cfg_root,
+ Mgmtd__YangGetDataReq **data_req, size_t num_reqs)
+{
+ struct mgmt_txn_ctx *txn;
+ struct mgmt_txn_req *txn_req;
+ enum mgmt_txn_event req_event;
+ size_t indx;
+
+ txn = mgmt_txn_id2ctx(txn_id);
+ if (!txn)
+ return -1;
+
+ req_event = MGMTD_TXN_PROC_GETCFG;
+ txn_req = mgmt_txn_req_alloc(txn, req_id, req_event);
+ txn_req->req.get_data->ds_id = ds_id;
+ txn_req->req.get_data->cfg_root = cfg_root;
+ for (indx = 0;
+ indx < num_reqs && indx < MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH;
+ indx++) {
+ __dbg("XPath: '%s'", data_req[indx]->data->xpath);
+ txn_req->req.get_data->xpaths[indx] =
+ strdup(data_req[indx]->data->xpath);
+ txn_req->req.get_data->num_xpaths++;
+ }
+
+ mgmt_txn_register_event(txn, req_event);
+
+ return 0;
+}
+
+
+/**
+ * Send get-tree requests to each client indicated in `clients` bitmask, which
+ * has registered operational state that matches the given `xpath`
+ */
+int mgmt_txn_send_get_tree_oper(uint64_t txn_id, uint64_t req_id,
+ uint64_t clients, Mgmtd__DatastoreId ds_id,
+ LYD_FORMAT result_type, uint8_t flags,
+ uint32_t wd_options, bool simple_xpath,
+ const char *xpath)
+{
+ struct mgmt_msg_get_tree *msg;
+ struct mgmt_txn_ctx *txn;
+ struct mgmt_txn_req *txn_req;
+ struct txn_req_get_tree *get_tree;
+ enum mgmt_be_client_id id;
+ ssize_t slen = strlen(xpath);
+ int ret;
+
+ txn = mgmt_txn_id2ctx(txn_id);
+ if (!txn)
+ return -1;
+
+ /* If error in this function below here, be sure to free the req */
+ txn_req = mgmt_txn_req_alloc(txn, req_id, MGMTD_TXN_PROC_GETTREE);
+ get_tree = txn_req->req.get_tree;
+ get_tree->result_type = result_type;
+ get_tree->wd_options = wd_options;
+ get_tree->exact = CHECK_FLAG(flags, GET_DATA_FLAG_EXACT);
+ get_tree->simple_xpath = simple_xpath;
+ get_tree->xpath = XSTRDUP(MTYPE_MGMTD_XPATH, xpath);
+
+ if (CHECK_FLAG(flags, GET_DATA_FLAG_CONFIG)) {
+ /*
+ * If the requested datastore is operational, get the config
+ * from running.
+ */
+ struct mgmt_ds_ctx *ds =
+ mgmt_ds_get_ctx_by_id(mm, ds_id == MGMTD_DS_OPERATIONAL
+ ? MGMTD_DS_RUNNING
+ : ds_id);
+ struct nb_config *config = mgmt_ds_get_nb_config(ds);
+
+ if (config) {
+ struct ly_set *set = NULL;
+ LY_ERR err;
+
+ err = lyd_find_xpath(config->dnode, xpath, &set);
+ if (err) {
+ get_tree->partial_error = err;
+ goto state;
+ }
+
+ /*
+ * If there's a single result, duplicate the returned
+ * node. If there are multiple results, duplicate the
+ * whole config and mark simple_xpath as false so the
+ * result is trimmed later in txn_get_tree_data_done.
+ */
+ if (set->count == 1) {
+ err = lyd_dup_single(set->dnodes[0], NULL,
+ LYD_DUP_WITH_PARENTS |
+ LYD_DUP_WITH_FLAGS |
+ LYD_DUP_RECURSIVE,
+ &get_tree->client_results);
+ if (!err)
+ while (get_tree->client_results->parent)
+ get_tree->client_results = lyd_parent(
+ get_tree->client_results);
+ } else if (set->count > 1) {
+ err = lyd_dup_siblings(config->dnode, NULL,
+ LYD_DUP_RECURSIVE |
+ LYD_DUP_WITH_FLAGS,
+ &get_tree->client_results);
+ if (!err)
+ get_tree->simple_xpath = false;
+ }
+
+ if (err)
+ get_tree->partial_error = err;
+
+ ly_set_free(set, NULL);
+ }
+ }
+state:
+ /* If we are only getting config, we are done */
+ if (!CHECK_FLAG(flags, GET_DATA_FLAG_STATE) ||
+ ds_id != MGMTD_DS_OPERATIONAL || !clients)
+ return txn_get_tree_data_done(txn, txn_req);
+
+ msg = mgmt_msg_native_alloc_msg(struct mgmt_msg_get_tree, slen + 1,
+ MTYPE_MSG_NATIVE_GET_TREE);
+ msg->refer_id = txn_id;
+ msg->req_id = req_id;
+ msg->code = MGMT_MSG_CODE_GET_TREE;
+ /* Always operate with the binary format in the backend */
+ msg->result_type = LYD_LYB;
+ strlcpy(msg->xpath, xpath, slen + 1);
+
+ assert(clients);
+ FOREACH_BE_CLIENT_BITS (id, clients) {
+ ret = mgmt_be_send_native(id, msg);
+ if (ret) {
+ __log_err("Could not send get-tree message to backend client %s",
+ mgmt_be_client_id2name(id));
+ continue;
+ }
+
+ __dbg("Sent get-tree req to backend client %s",
+ mgmt_be_client_id2name(id));
+
+ /* record that we sent the request to the client */
+ get_tree->sent_clients |= (1u << id);
+ }
+
+ mgmt_msg_native_free_msg(msg);
+
+ /* Return if we didn't send any messages to backends */
+ if (!get_tree->sent_clients)
+ return txn_get_tree_data_done(txn, txn_req);
+
+ /* Start timeout timer - pulled out of register event code so we can
+ * pass a different arg
+ */
+ event_add_timer(mgmt_txn_tm, txn_get_tree_timeout, txn_req,
+ MGMTD_TXN_GET_TREE_MAX_DELAY_SEC,
+ &txn->get_tree_timeout);
+ return 0;
+}
+
+int mgmt_txn_send_edit(uint64_t txn_id, uint64_t req_id,
+ Mgmtd__DatastoreId ds_id, struct mgmt_ds_ctx *ds_ctx,
+ Mgmtd__DatastoreId commit_ds_id,
+ struct mgmt_ds_ctx *commit_ds_ctx, bool unlock,
+ bool commit, LYD_FORMAT request_type, uint8_t flags,
+ uint8_t operation, const char *xpath, const char *data)
+{
+ struct mgmt_txn_ctx *txn;
+ struct mgmt_edit_req *edit;
+ struct nb_config *nb_config;
+ char errstr[BUFSIZ];
+ int ret;
+
+ txn = mgmt_txn_id2ctx(txn_id);
+ if (!txn)
+ return -1;
+
+ edit = XCALLOC(MTYPE_MGMTD_TXN_REQ, sizeof(struct mgmt_edit_req));
+
+ nb_config = mgmt_ds_get_nb_config(ds_ctx);
+ assert(nb_config);
+
+ ret = nb_candidate_edit_tree(nb_config, operation, request_type, xpath,
+ data, edit->xpath_created, errstr,
+ sizeof(errstr));
+ if (ret)
+ goto reply;
+
+ if (commit) {
+ edit->unlock = unlock;
+
+ mgmt_txn_send_commit_config_req(txn_id, req_id, ds_id, ds_ctx,
+ commit_ds_id, commit_ds_ctx,
+ false, false, true, edit);
+ return 0;
+ }
+reply:
+ mgmt_fe_adapter_send_edit_reply(txn->session_id, txn->txn_id, req_id,
+ unlock, commit, edit->xpath_created,
+ ret ? -1 : 0, errstr);
+
+ XFREE(MTYPE_MGMTD_TXN_REQ, edit);
+
+ return 0;
+}
+
+int mgmt_txn_send_rpc(uint64_t txn_id, uint64_t req_id, uint64_t clients,
+ LYD_FORMAT result_type, const char *xpath,
+ const char *data, size_t data_len)
+{
+ struct mgmt_txn_ctx *txn;
+ struct mgmt_txn_req *txn_req;
+ struct mgmt_msg_rpc *msg;
+ struct txn_req_rpc *rpc;
+ uint64_t id;
+ int ret;
+
+ txn = mgmt_txn_id2ctx(txn_id);
+ if (!txn)
+ return -1;
+
+ txn_req = mgmt_txn_req_alloc(txn, req_id, MGMTD_TXN_PROC_RPC);
+ rpc = txn_req->req.rpc;
+ rpc->xpath = XSTRDUP(MTYPE_MGMTD_XPATH, xpath);
+ rpc->result_type = result_type;
+
+ msg = mgmt_msg_native_alloc_msg(struct mgmt_msg_rpc, 0,
+ MTYPE_MSG_NATIVE_RPC);
+ msg->refer_id = txn_id;
+ msg->req_id = req_id;
+ msg->code = MGMT_MSG_CODE_RPC;
+ msg->request_type = result_type;
+
+ mgmt_msg_native_xpath_encode(msg, xpath);
+ if (data)
+ mgmt_msg_native_append(msg, data, data_len);
+
+ assert(clients);
+ FOREACH_BE_CLIENT_BITS (id, clients) {
+ ret = mgmt_be_send_native(id, msg);
+ if (ret) {
+ __log_err("Could not send rpc message to backend client %s",
+ mgmt_be_client_id2name(id));
+ continue;
+ }
+
+ __dbg("Sent rpc req to backend client %s",
+ mgmt_be_client_id2name(id));
+
+ /* record that we sent the request to the client */
+ rpc->sent_clients |= (1u << id);
+ }
+
+ mgmt_msg_native_free_msg(msg);
+
+ if (!rpc->sent_clients)
+ return txn_rpc_done(txn, txn_req);
+
+ event_add_timer(mgmt_txn_tm, txn_rpc_timeout, txn_req,
+ MGMTD_TXN_RPC_MAX_DELAY_SEC, &txn->rpc_timeout);
+
+ return 0;
+}
+
+/*
+ * Error reply from the backend client.
+ */
+int mgmt_txn_notify_error(struct mgmt_be_client_adapter *adapter,
+ uint64_t txn_id, uint64_t req_id, int error,
+ const char *errstr)
+{
+ enum mgmt_be_client_id id = adapter->id;
+ struct mgmt_txn_ctx *txn = mgmt_txn_id2ctx(txn_id);
+ struct txn_req_get_tree *get_tree;
+ struct txn_req_rpc *rpc;
+ struct mgmt_txn_req *txn_req;
+
+ if (!txn) {
+ __log_err("Error reply from %s cannot find txn-id %" PRIu64,
+ adapter->name, txn_id);
+ return -1;
+ }
+
+ /* Find the request. */
+ FOREACH_TXN_REQ_IN_LIST (&txn->get_tree_reqs, txn_req)
+ if (txn_req->req_id == req_id)
+ break;
+ if (!txn_req)
+ FOREACH_TXN_REQ_IN_LIST (&txn->rpc_reqs, txn_req)
+ if (txn_req->req_id == req_id)
+ break;
+ if (!txn_req) {
+ __log_err("Error reply from %s for txn-id %" PRIu64
+ " cannot find req_id %" PRIu64,
+ adapter->name, txn_id, req_id);
+ return -1;
+ }
+
+ __log_err("Error reply from %s for txn-id %" PRIu64 " req_id %" PRIu64,
+ adapter->name, txn_id, req_id);
+
+ switch (txn_req->req_event) {
+ case MGMTD_TXN_PROC_GETTREE:
+ get_tree = txn_req->req.get_tree;
+ get_tree->recv_clients |= (1u << id);
+ get_tree->partial_error = error;
+
+ /* check if done yet */
+ if (get_tree->recv_clients != get_tree->sent_clients)
+ return 0;
+ return txn_get_tree_data_done(txn, txn_req);
+ case MGMTD_TXN_PROC_RPC:
+ rpc = txn_req->req.rpc;
+ rpc->recv_clients |= (1u << id);
+ if (errstr) {
+ XFREE(MTYPE_MGMTD_ERR, rpc->errstr);
+ rpc->errstr = XSTRDUP(MTYPE_MGMTD_ERR, errstr);
+ }
+ /* check if done yet */
+ if (rpc->recv_clients != rpc->sent_clients)
+ return 0;
+ return txn_rpc_done(txn, txn_req);
+
+ /* non-native message events */
+ case MGMTD_TXN_PROC_SETCFG:
+ case MGMTD_TXN_PROC_COMMITCFG:
+ case MGMTD_TXN_PROC_GETCFG:
+ case MGMTD_TXN_COMMITCFG_TIMEOUT:
+ default:
+ assert(!"non-native req event in native erorr path");
+ return -1;
+ }
+}
+
+/*
+ * Get-tree data from the backend client.
+ */
+int mgmt_txn_notify_tree_data_reply(struct mgmt_be_client_adapter *adapter,
+ struct mgmt_msg_tree_data *data_msg,
+ size_t msg_len)
+{
+ uint64_t txn_id = data_msg->refer_id;
+ uint64_t req_id = data_msg->req_id;
+
+ enum mgmt_be_client_id id = adapter->id;
+ struct mgmt_txn_ctx *txn = mgmt_txn_id2ctx(txn_id);
+ struct mgmt_txn_req *txn_req;
+ struct txn_req_get_tree *get_tree;
+ struct lyd_node *tree = NULL;
+ LY_ERR err;
+
+ if (!txn) {
+ __log_err("GETTREE reply from %s for a missing txn-id %" PRIu64,
+ adapter->name, txn_id);
+ return -1;
+ }
+
+ /* Find the request. */
+ FOREACH_TXN_REQ_IN_LIST (&txn->get_tree_reqs, txn_req)
+ if (txn_req->req_id == req_id)
+ break;
+ if (!txn_req) {
+ __log_err("GETTREE reply from %s for txn-id %" PRIu64
+ " missing req_id %" PRIu64,
+ adapter->name, txn_id, req_id);
+ return -1;
+ }
+
+ get_tree = txn_req->req.get_tree;
+
+ /* store the result */
+ err = lyd_parse_data_mem(ly_native_ctx, (const char *)data_msg->result,
+ data_msg->result_type,
+ LYD_PARSE_STRICT | LYD_PARSE_ONLY,
+ 0 /*LYD_VALIDATE_OPERATIONAL*/, &tree);
+ if (err) {
+ __log_err("GETTREE reply from %s for txn-id %" PRIu64
+ " req_id %" PRIu64 " error parsing result of type %u",
+ adapter->name, txn_id, req_id, data_msg->result_type);
+ }
+ if (!err) {
+ /* TODO: we could merge ly_errs here if it's not binary */
+
+ if (!get_tree->client_results)
+ get_tree->client_results = tree;
+ else
+ err = lyd_merge_siblings(&get_tree->client_results,
+ tree, LYD_MERGE_DESTRUCT);
+ if (err) {
+ __log_err("GETTREE reply from %s for txn-id %" PRIu64
+ " req_id %" PRIu64 " error merging result",
+ adapter->name, txn_id, req_id);
+ }
+ }
+ if (!get_tree->partial_error)
+ get_tree->partial_error = (data_msg->partial_error
+ ? data_msg->partial_error
+ : (int)err);
+
+ if (!data_msg->more)
+ get_tree->recv_clients |= (1u << id);
+
+ /* check if done yet */
+ if (get_tree->recv_clients != get_tree->sent_clients)
+ return 0;
+
+ return txn_get_tree_data_done(txn, txn_req);
+}
+
+int mgmt_txn_notify_rpc_reply(struct mgmt_be_client_adapter *adapter,
+ struct mgmt_msg_rpc_reply *reply_msg,
+ size_t msg_len)
+{
+ uint64_t txn_id = reply_msg->refer_id;
+ uint64_t req_id = reply_msg->req_id;
+ enum mgmt_be_client_id id = adapter->id;
+ struct mgmt_txn_ctx *txn = mgmt_txn_id2ctx(txn_id);
+ struct mgmt_txn_req *txn_req;
+ struct txn_req_rpc *rpc;
+ struct lyd_node *tree;
+ size_t data_len = msg_len - sizeof(*reply_msg);
+ LY_ERR err = LY_SUCCESS;
+
+ if (!txn) {
+ __log_err("RPC reply from %s for a missing txn-id %" PRIu64,
+ adapter->name, txn_id);
+ return -1;
+ }
+
+ /* Find the request. */
+ FOREACH_TXN_REQ_IN_LIST (&txn->rpc_reqs, txn_req)
+ if (txn_req->req_id == req_id)
+ break;
+ if (!txn_req) {
+ __log_err("RPC reply from %s for txn-id %" PRIu64
+ " missing req_id %" PRIu64,
+ adapter->name, txn_id, req_id);
+ return -1;
+ }
+
+ rpc = txn_req->req.rpc;
+
+ tree = NULL;
+ if (data_len)
+ err = yang_parse_rpc(rpc->xpath, reply_msg->result_type,
+ reply_msg->data, true, &tree);
+ if (err) {
+ __log_err("RPC reply from %s for txn-id %" PRIu64
+ " req_id %" PRIu64 " error parsing result of type %u: %s",
+ adapter->name, txn_id, req_id, reply_msg->result_type,
+ ly_strerrcode(err));
+ }
+ if (!err && tree) {
+ if (!rpc->client_results)
+ rpc->client_results = tree;
+ else
+ err = lyd_merge_siblings(&rpc->client_results, tree,
+ LYD_MERGE_DESTRUCT);
+ if (err) {
+ __log_err("RPC reply from %s for txn-id %" PRIu64
+ " req_id %" PRIu64 " error merging result: %s",
+ adapter->name, txn_id, req_id,
+ ly_strerrcode(err));
+ }
+ }
+ if (err) {
+ XFREE(MTYPE_MGMTD_ERR, rpc->errstr);
+ rpc->errstr = XSTRDUP(MTYPE_MGMTD_ERR,
+ "Cannot parse result from the backend");
+ }
+
+ rpc->recv_clients |= (1u << id);
+
+ /* check if done yet */
+ if (rpc->recv_clients != rpc->sent_clients)
+ return 0;
+
+ return txn_rpc_done(txn, txn_req);
+}
+
+void mgmt_txn_status_write(struct vty *vty)
+{
+ struct mgmt_txn_ctx *txn;
+
+ vty_out(vty, "MGMTD Transactions\n");
+
+ FOREACH_TXN_IN_LIST (mgmt_txn_mm, txn) {
+ vty_out(vty, " Txn: \t\t\t0x%p\n", txn);
+ vty_out(vty, " Txn-Id: \t\t\t%" PRIu64 "\n", txn->txn_id);
+ vty_out(vty, " Session-Id: \t\t%" PRIu64 "\n",
+ txn->session_id);
+ vty_out(vty, " Type: \t\t\t%s\n",
+ mgmt_txn_type2str(txn->type));
+ vty_out(vty, " Ref-Count: \t\t\t%d\n", txn->refcount);
+ }
+ vty_out(vty, " Total: %d\n",
+ (int)mgmt_txns_count(&mgmt_txn_mm->txn_list));
+}
+
+int mgmt_txn_rollback_trigger_cfg_apply(struct mgmt_ds_ctx *src_ds_ctx,
+ struct mgmt_ds_ctx *dst_ds_ctx)
+{
+ static struct nb_config_cbs changes;
+ static struct mgmt_commit_stats dummy_stats;
+
+ struct nb_config_cbs *cfg_chgs = NULL;
+ struct mgmt_txn_ctx *txn;
+ struct mgmt_txn_req *txn_req;
+
+ memset(&changes, 0, sizeof(changes));
+ memset(&dummy_stats, 0, sizeof(dummy_stats));
+ /*
+ * This could be the case when the config is directly
+ * loaded onto the candidate DS from a file. Get the
+ * diff from a full comparison of the candidate and
+ * running DSs.
+ */
+ nb_config_diff(mgmt_ds_get_nb_config(dst_ds_ctx),
+ mgmt_ds_get_nb_config(src_ds_ctx), &changes);
+ cfg_chgs = &changes;
+
+ if (RB_EMPTY(nb_config_cbs, cfg_chgs)) {
+ /*
+ * This means there's no changes to commit whatsoever
+ * is the source of the changes in config.
+ */
+ return -1;
+ }
+
+ /*
+ * Create a CONFIG transaction to push the config changes
+ * provided to the backend client.
+ */
+ txn = mgmt_txn_create_new(0, MGMTD_TXN_TYPE_CONFIG);
+ if (!txn) {
+ __log_err(
+ "Failed to create CONFIG Transaction for downloading CONFIGs");
+ return -1;
+ }
+
+ __dbg("Created rollback txn-id: %" PRIu64, txn->txn_id);
+
+ /*
+ * Set the changeset for transaction to commit and trigger the commit
+ * request.
+ */
+ txn_req = mgmt_txn_req_alloc(txn, 0, MGMTD_TXN_PROC_COMMITCFG);
+ txn_req->req.commit_cfg.src_ds_id = MGMTD_DS_CANDIDATE;
+ txn_req->req.commit_cfg.src_ds_ctx = src_ds_ctx;
+ txn_req->req.commit_cfg.dst_ds_id = MGMTD_DS_RUNNING;
+ txn_req->req.commit_cfg.dst_ds_ctx = dst_ds_ctx;
+ txn_req->req.commit_cfg.validate_only = false;
+ txn_req->req.commit_cfg.abort = false;
+ txn_req->req.commit_cfg.rollback = true;
+ txn_req->req.commit_cfg.cmt_stats = &dummy_stats;
+ txn_req->req.commit_cfg.cfg_chgs = cfg_chgs;
+
+ /*
+ * Trigger a COMMIT-CONFIG process.
+ */
+ mgmt_txn_register_event(txn, MGMTD_TXN_PROC_COMMITCFG);
+ return 0;
+}
diff --git a/mgmtd/mgmt_txn.h b/mgmtd/mgmt_txn.h
new file mode 100644
index 00000000..b6ca2886
--- /dev/null
+++ b/mgmtd/mgmt_txn.h
@@ -0,0 +1,363 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MGMTD Transactions
+ *
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar <spushpasis@vmware.com>
+ */
+
+#ifndef _FRR_MGMTD_TXN_H_
+#define _FRR_MGMTD_TXN_H_
+
+#include "lib/mgmt_msg_native.h"
+#include "mgmtd/mgmt_be_adapter.h"
+#include "mgmtd/mgmt.h"
+#include "mgmtd/mgmt_ds.h"
+
+#define MGMTD_TXN_PROC_DELAY_USEC 10
+#define MGMTD_TXN_MAX_NUM_SETCFG_PROC 128
+#define MGMTD_TXN_MAX_NUM_GETCFG_PROC 128
+#define MGMTD_TXN_MAX_NUM_GETDATA_PROC 128
+
+#define MGMTD_TXN_CFG_COMMIT_MAX_DELAY_SEC 600
+#define MGMTD_TXN_GET_TREE_MAX_DELAY_SEC 600
+#define MGMTD_TXN_RPC_MAX_DELAY_SEC 60
+
+#define MGMTD_TXN_CLEANUP_DELAY_USEC 10
+
+#define MGMTD_TXN_ID_NONE 0
+
+/*
+ * The following definition enables local validation of config
+ * on the MGMTD process by loading client-defined NB callbacks
+ * and calling them locally before sening CNFG_APPLY_REQ to
+ * backend for actual apply of configuration on internal state
+ * of the backend application.
+ *
+ * #define MGMTD_LOCAL_VALIDATIONS_ENABLED
+ *
+ * Note: Enabled by default in configure.ac, if this needs to be
+ * disabled then pass --enable-mgmtd-local-validations=no to
+ * the list of arguments passed to ./configure
+ */
+
+PREDECL_LIST(mgmt_txns);
+
+struct mgmt_master;
+struct mgmt_edit_req;
+
+enum mgmt_txn_type {
+ MGMTD_TXN_TYPE_NONE = 0,
+ MGMTD_TXN_TYPE_CONFIG,
+ MGMTD_TXN_TYPE_SHOW,
+ MGMTD_TXN_TYPE_RPC,
+};
+
+static inline const char *mgmt_txn_type2str(enum mgmt_txn_type type)
+{
+ switch (type) {
+ case MGMTD_TXN_TYPE_NONE:
+ return "None";
+ case MGMTD_TXN_TYPE_CONFIG:
+ return "CONFIG";
+ case MGMTD_TXN_TYPE_SHOW:
+ return "SHOW";
+ case MGMTD_TXN_TYPE_RPC:
+ return "RPC";
+ }
+
+ return "Unknown";
+}
+
+/* Initialise transaction module. */
+extern int mgmt_txn_init(struct mgmt_master *cm, struct event_loop *tm);
+
+/* Destroy the transaction module. */
+extern void mgmt_txn_destroy(void);
+
+/*
+ * Check if configuration transaction is in progress.
+ *
+ * Returns:
+ * true if in-progress, false otherwise.
+ */
+extern bool mgmt_config_txn_in_progress(void);
+
+/**
+ * Get the session ID associated with the given ``txn-id``.
+ *
+ */
+extern uint64_t mgmt_txn_get_session_id(uint64_t txn_id);
+
+/*
+ * Create transaction.
+ *
+ * session_id
+ * Session ID.
+ *
+ * type
+ * Transaction type (CONFIG/SHOW/NONE)
+ *
+ * Returns:
+ * transaction ID.
+ */
+extern uint64_t mgmt_create_txn(uint64_t session_id, enum mgmt_txn_type type);
+
+/*
+ * Destroy transaction.
+ *
+ * txn_id
+ * Unique transaction identifier.
+ */
+extern void mgmt_destroy_txn(uint64_t *txn_id);
+
+/*
+ * Send set-config request to be processed later in transaction.
+ *
+ * txn_id
+ * Unique transaction identifier.
+ *
+ * req_id
+ * Unique transaction request identifier.
+ *
+ * ds_id
+ * Datastore ID.
+ *
+ * ds_hndl
+ * Datastore handle.
+ *
+ * cfg_req
+ * Config requests.
+ *
+ * num_req
+ * Number of config requests.
+ *
+ * implicit_commit
+ * TRUE if the commit is implicit, FALSE otherwise.
+ *
+ * dst_ds_id
+ * Destination datastore ID.
+ *
+ * dst_ds_handle
+ * Destination datastore handle.
+ *
+ * Returns:
+ * 0 on success, -1 on failures.
+ */
+extern int mgmt_txn_send_set_config_req(uint64_t txn_id, uint64_t req_id,
+ Mgmtd__DatastoreId ds_id,
+ struct mgmt_ds_ctx *ds_ctx,
+ Mgmtd__YangCfgDataReq **cfg_req,
+ size_t num_req, bool implicit_commit,
+ Mgmtd__DatastoreId dst_ds_id,
+ struct mgmt_ds_ctx *dst_ds_ctx);
+
+/*
+ * Send commit-config request to be processed later in transaction.
+ *
+ * txn_id
+ * Unique transaction identifier.
+ *
+ * req_id
+ * Unique transaction request identifier.
+ *
+ * src_ds_id
+ * Source datastore ID.
+ *
+ * src_ds_hndl
+ * Source Datastore handle.
+ *
+ * validate_only
+ * TRUE if commit request needs to be validated only, FALSE otherwise.
+ *
+ * abort
+ * TRUE if need to restore Src DS back to Dest DS, FALSE otherwise.
+ *
+ * implicit
+ * TRUE if the commit is implicit, FALSE otherwise.
+ *
+ * edit
+ * Additional info when triggered from native edit request.
+ *
+ * Returns:
+ * 0 on success, -1 on failures.
+ */
+extern int mgmt_txn_send_commit_config_req(
+ uint64_t txn_id, uint64_t req_id, Mgmtd__DatastoreId src_ds_id,
+ struct mgmt_ds_ctx *dst_ds_ctx, Mgmtd__DatastoreId dst_ds_id,
+ struct mgmt_ds_ctx *src_ds_ctx, bool validate_only, bool abort,
+ bool implicit, struct mgmt_edit_req *edit);
+
+/*
+ * Send get-{cfg,data} request to be processed later in transaction.
+ *
+ * Is get-config if cfg_root is provided and the config is gathered locally,
+ * otherwise it's get-data and data is fetched from backedn clients.
+ */
+extern int mgmt_txn_send_get_req(uint64_t txn_id, uint64_t req_id,
+ Mgmtd__DatastoreId ds_id,
+ struct nb_config *cfg_root,
+ Mgmtd__YangGetDataReq **data_req,
+ size_t num_reqs);
+
+
+/**
+ * Send get-tree to the backend `clients`.
+ *
+ * Args:
+ * txn_id: Transaction identifier.
+ * req_id: FE client request identifier.
+ * clients: Bitmask of clients to send get-tree to.
+ * ds_id: datastore ID.
+ * result_type: LYD_FORMAT result format.
+ * flags: option flags for the request.
+ * wd_options: LYD_PRINT_WD_* flags for the result.
+ * simple_xpath: true if xpath is simple (only key predicates).
+ * xpath: The xpath to get the tree from.
+ *
+ * Return:
+ * 0 on success.
+ */
+extern int mgmt_txn_send_get_tree_oper(uint64_t txn_id, uint64_t req_id,
+ uint64_t clients,
+ Mgmtd__DatastoreId ds_id,
+ LYD_FORMAT result_type, uint8_t flags,
+ uint32_t wd_options, bool simple_xpath,
+ const char *xpath);
+
+/**
+ * Send edit request.
+ *
+ * Args:
+ * txn_id: Transaction identifier.
+ * req_id: FE client request identifier.
+ * ds_id: Datastore ID.
+ * ds_ctx: Datastore context.
+ * commit_ds_id: Commit datastore ID.
+ * commit_ds_ctx: Commit datastore context.
+ * unlock: Unlock datastores after the edit.
+ * commit: Commit the candidate datastore after the edit.
+ * request_type: LYD_FORMAT request type.
+ * flags: option flags for the request.
+ * operation: The operation to perform.
+ * xpath: The xpath of data node to edit.
+ * data: The data tree.
+ */
+extern int
+mgmt_txn_send_edit(uint64_t txn_id, uint64_t req_id, Mgmtd__DatastoreId ds_id,
+ struct mgmt_ds_ctx *ds_ctx, Mgmtd__DatastoreId commit_ds_id,
+ struct mgmt_ds_ctx *commit_ds_ctx, bool unlock, bool commit,
+ LYD_FORMAT request_type, uint8_t flags, uint8_t operation,
+ const char *xpath, const char *data);
+
+/**
+ * Send RPC request.
+ *
+ * Args:
+ * txn_id: Transaction identifier.
+ * req_id: FE client request identifier.
+ * clients: Bitmask of clients to send RPC to.
+ * result_type: LYD_FORMAT result format.
+ * xpath: The xpath of the RPC.
+ * data: The input parameters data tree.
+ * data_len: The length of the input parameters data.
+ *
+ * Return:
+ * 0 on success.
+ */
+extern int mgmt_txn_send_rpc(uint64_t txn_id, uint64_t req_id, uint64_t clients,
+ LYD_FORMAT result_type, const char *xpath,
+ const char *data, size_t data_len);
+
+/*
+ * Notifiy backend adapter on connection.
+ */
+extern int
+mgmt_txn_notify_be_adapter_conn(struct mgmt_be_client_adapter *adapter,
+ bool connect);
+
+/*
+ * Reply to backend adapter about transaction create/delete.
+ */
+extern int
+mgmt_txn_notify_be_txn_reply(uint64_t txn_id, bool create, bool success,
+ struct mgmt_be_client_adapter *adapter);
+
+/*
+ * Reply to backend adapater with config data create request.
+ */
+extern int
+mgmt_txn_notify_be_cfgdata_reply(uint64_t txn_id, bool success,
+ char *error_if_any,
+ struct mgmt_be_client_adapter *adapter);
+
+/*
+ * Reply to backend adapater with config data validate request.
+ */
+extern int mgmt_txn_notify_be_cfg_validate_reply(
+ uint64_t txn_id, bool success, uint64_t batch_ids[],
+ size_t num_batch_ids, char *error_if_any,
+ struct mgmt_be_client_adapter *adapter);
+
+/*
+ * Reply to backend adapater with config data apply request.
+ */
+extern int
+mgmt_txn_notify_be_cfg_apply_reply(uint64_t txn_id, bool success,
+ char *error_if_any,
+ struct mgmt_be_client_adapter *adapter);
+
+
+/**
+ * Process a reply from a backend client to our get-tree request
+ *
+ * Args:
+ * adapter: The adapter that received the result.
+ * txn_id: The transaction for this get-tree request.
+ * req_id: The request ID for this transaction.
+ * error: the integer error value (negative)
+ * errstr: the string description of the error.
+ */
+int mgmt_txn_notify_error(struct mgmt_be_client_adapter *adapter,
+ uint64_t txn_id, uint64_t req_id, int error,
+ const char *errstr);
+
+/**
+ * Process a reply from a backend client to our get-tree request
+ *
+ * Args:
+ * adapter: The adapter that received the result.
+ * data_msg: The message from the backend.
+ * msg_len: Total length of the message.
+ */
+
+extern int mgmt_txn_notify_tree_data_reply(struct mgmt_be_client_adapter *adapter,
+ struct mgmt_msg_tree_data *data_msg,
+ size_t msg_len);
+
+/**
+ * Process a reply from a backend client to our RPC request
+ *
+ * Args:
+ * adapter: The adapter that received the result.
+ * reply_msg: The message from the backend.
+ * msg_len: Total length of the message.
+ */
+extern int mgmt_txn_notify_rpc_reply(struct mgmt_be_client_adapter *adapter,
+ struct mgmt_msg_rpc_reply *reply_msg,
+ size_t msg_len);
+
+/*
+ * Dump transaction status to vty.
+ */
+extern void mgmt_txn_status_write(struct vty *vty);
+
+/*
+ * Trigger rollback config apply.
+ *
+ * Creates a new transaction and commit request for rollback.
+ */
+extern int
+mgmt_txn_rollback_trigger_cfg_apply(struct mgmt_ds_ctx *src_ds_ctx,
+ struct mgmt_ds_ctx *dst_ds_ctx);
+#endif /* _FRR_MGMTD_TXN_H_ */
diff --git a/mgmtd/mgmt_vty.c b/mgmtd/mgmt_vty.c
new file mode 100644
index 00000000..8ccb4635
--- /dev/null
+++ b/mgmtd/mgmt_vty.c
@@ -0,0 +1,738 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MGMTD VTY Interface
+ *
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar <spushpasis@vmware.com>
+ */
+
+#include <zebra.h>
+
+#include "affinitymap.h"
+#include "command.h"
+#include "filter.h"
+#include "json.h"
+#include "keychain.h"
+#include "network.h"
+#include "northbound_cli.h"
+#include "routemap.h"
+
+#include "mgmtd/mgmt.h"
+#include "mgmtd/mgmt_be_adapter.h"
+#include "mgmtd/mgmt_fe_adapter.h"
+#include "mgmtd/mgmt_ds.h"
+#include "mgmtd/mgmt_history.h"
+
+#include "mgmtd/mgmt_vty_clippy.c"
+#include "ripd/rip_nb.h"
+#include "ripngd/ripng_nb.h"
+#include "staticd/static_vty.h"
+#include "zebra/zebra_cli.h"
+
+extern struct frr_daemon_info *mgmt_daemon_info;
+
+DEFPY(show_mgmt_be_adapter,
+ show_mgmt_be_adapter_cmd,
+ "show mgmt backend-adapter all",
+ SHOW_STR
+ MGMTD_STR
+ MGMTD_BE_ADAPTER_STR
+ "Display all Backend Adapters\n")
+{
+ mgmt_be_adapter_status_write(vty);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY(show_mgmt_be_xpath_reg,
+ show_mgmt_be_xpath_reg_cmd,
+ "show mgmt backend-yang-xpath-registry",
+ SHOW_STR
+ MGMTD_STR
+ "Backend Adapter YANG Xpath Registry\n")
+{
+ mgmt_be_xpath_register_write(vty);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY(show_mgmt_fe_adapter, show_mgmt_fe_adapter_cmd,
+ "show mgmt frontend-adapter all [detail$detail]",
+ SHOW_STR
+ MGMTD_STR
+ MGMTD_FE_ADAPTER_STR
+ "Display all Frontend Adapters\n"
+ "Display more details\n")
+{
+ mgmt_fe_adapter_status_write(vty, !!detail);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY_HIDDEN(mgmt_performance_measurement,
+ mgmt_performance_measurement_cmd,
+ "[no] mgmt performance-measurement",
+ NO_STR
+ MGMTD_STR
+ "Enable performance measurement\n")
+{
+ if (no)
+ mgmt_fe_adapter_perf_measurement(vty, false);
+ else
+ mgmt_fe_adapter_perf_measurement(vty, true);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY(mgmt_reset_performance_stats,
+ mgmt_reset_performance_stats_cmd,
+ "mgmt reset-statistics",
+ MGMTD_STR
+ "Reset the Performance measurement statistics\n")
+{
+ mgmt_fe_adapter_reset_perf_stats(vty);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY(show_mgmt_txn,
+ show_mgmt_txn_cmd,
+ "show mgmt transaction all",
+ SHOW_STR
+ MGMTD_STR
+ MGMTD_TXN_STR
+ "Display all Transactions\n")
+{
+ mgmt_txn_status_write(vty);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY(show_mgmt_ds,
+ show_mgmt_ds_cmd,
+ "show mgmt datastore [all|candidate|operational|running]$dsname",
+ SHOW_STR
+ MGMTD_STR
+ MGMTD_DS_STR
+ "All datastores (default)\n"
+ "Candidate datastore\n"
+ "Operational datastore\n"
+ "Running datastore\n")
+{
+ struct mgmt_ds_ctx *ds_ctx;
+
+ if (!dsname || dsname[0] == 'a') {
+ mgmt_ds_status_write(vty);
+ return CMD_SUCCESS;
+ }
+ ds_ctx = mgmt_ds_get_ctx_by_id(mm, mgmt_ds_name2id(dsname));
+ if (!ds_ctx) {
+ vty_out(vty, "ERROR: Could not access %s datastore!\n", dsname);
+ return CMD_ERR_NO_MATCH;
+ }
+ mgmt_ds_status_write_one(vty, ds_ctx);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY(mgmt_commit,
+ mgmt_commit_cmd,
+ "mgmt commit <check|apply|abort>$type",
+ MGMTD_STR
+ "Commit action\n"
+ "Validate the set of config commands\n"
+ "Validate and apply the set of config commands\n"
+ "Abort and drop the set of config commands recently added\n")
+{
+ bool validate_only = type[0] == 'c';
+ bool abort = type[1] == 'b';
+
+ if (vty_mgmt_send_commit_config(vty, validate_only, abort) != 0)
+ return CMD_WARNING_CONFIG_FAILED;
+ return CMD_SUCCESS;
+}
+
+DEFPY(mgmt_create_config_data, mgmt_create_config_data_cmd,
+ "mgmt create-config WORD$path VALUE",
+ MGMTD_STR
+ "Create configuration data\n"
+ "XPath expression specifying the YANG data path\n"
+ "Value of the data to create\n")
+{
+ strlcpy(vty->cfg_changes[0].xpath, path,
+ sizeof(vty->cfg_changes[0].xpath));
+ vty->cfg_changes[0].value = value;
+ vty->cfg_changes[0].operation = NB_OP_CREATE_EXCL;
+ vty->num_cfg_changes = 1;
+
+ vty_mgmt_send_config_data(vty, NULL, false);
+ return CMD_SUCCESS;
+}
+
+DEFPY(mgmt_set_config_data, mgmt_set_config_data_cmd,
+ "mgmt set-config WORD$path VALUE",
+ MGMTD_STR
+ "Set configuration data\n"
+ "XPath expression specifying the YANG data path\n"
+ "Value of the data to set\n")
+{
+ strlcpy(vty->cfg_changes[0].xpath, path,
+ sizeof(vty->cfg_changes[0].xpath));
+ vty->cfg_changes[0].value = value;
+ vty->cfg_changes[0].operation = NB_OP_MODIFY;
+ vty->num_cfg_changes = 1;
+
+ vty_mgmt_send_config_data(vty, NULL, false);
+ return CMD_SUCCESS;
+}
+
+DEFPY(mgmt_delete_config_data, mgmt_delete_config_data_cmd,
+ "mgmt delete-config WORD$path",
+ MGMTD_STR
+ "Delete configuration data\n"
+ "XPath expression specifying the YANG data path\n")
+{
+
+ strlcpy(vty->cfg_changes[0].xpath, path,
+ sizeof(vty->cfg_changes[0].xpath));
+ vty->cfg_changes[0].value = NULL;
+ vty->cfg_changes[0].operation = NB_OP_DELETE;
+ vty->num_cfg_changes = 1;
+
+ vty_mgmt_send_config_data(vty, NULL, false);
+ return CMD_SUCCESS;
+}
+
+DEFPY(mgmt_remove_config_data, mgmt_remove_config_data_cmd,
+ "mgmt remove-config WORD$path",
+ MGMTD_STR
+ "Remove configuration data\n"
+ "XPath expression specifying the YANG data path\n")
+{
+
+ strlcpy(vty->cfg_changes[0].xpath, path,
+ sizeof(vty->cfg_changes[0].xpath));
+ vty->cfg_changes[0].value = NULL;
+ vty->cfg_changes[0].operation = NB_OP_DESTROY;
+ vty->num_cfg_changes = 1;
+
+ vty_mgmt_send_config_data(vty, NULL, false);
+ return CMD_SUCCESS;
+}
+
+DEFPY(mgmt_replace_config_data, mgmt_replace_config_data_cmd,
+ "mgmt replace-config WORD$path VALUE",
+ MGMTD_STR
+ "Replace configuration data\n"
+ "XPath expression specifying the YANG data path\n"
+ "Value of the data to set\n")
+{
+
+ strlcpy(vty->cfg_changes[0].xpath, path,
+ sizeof(vty->cfg_changes[0].xpath));
+ vty->cfg_changes[0].value = value;
+ vty->cfg_changes[0].operation = NB_OP_REPLACE;
+ vty->num_cfg_changes = 1;
+
+ vty_mgmt_send_config_data(vty, NULL, false);
+ return CMD_SUCCESS;
+}
+
+DEFPY(mgmt_edit, mgmt_edit_cmd,
+ "mgmt edit {create|delete|merge|replace|remove}$op XPATH [json|xml]$fmt [lock$lock] [commit$commit] [DATA]",
+ MGMTD_STR
+ "Edit configuration data\n"
+ "Create data\n"
+ "Delete data\n"
+ "Merge data\n"
+ "Replace data\n"
+ "Remove data\n"
+ "XPath expression specifying the YANG data path\n"
+ "JSON input format (default)\n"
+ "XML input format\n"
+ "Lock the datastores automatically\n"
+ "Commit the changes automatically\n"
+ "Data tree\n")
+{
+ LYD_FORMAT format = (fmt && fmt[0] == 'x') ? LYD_XML : LYD_JSON;
+ uint8_t operation;
+ uint8_t flags = 0;
+
+ switch (op[2]) {
+ case 'e':
+ operation = NB_OP_CREATE_EXCL;
+ break;
+ case 'l':
+ operation = NB_OP_DELETE;
+ break;
+ case 'r':
+ operation = NB_OP_MODIFY;
+ break;
+ case 'p':
+ operation = NB_OP_REPLACE;
+ break;
+ case 'm':
+ operation = NB_OP_DESTROY;
+ break;
+ default:
+ vty_out(vty, "Invalid operation!\n");
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
+ if (!data && (operation == NB_OP_CREATE_EXCL ||
+ operation == NB_OP_MODIFY || operation == NB_OP_REPLACE)) {
+ vty_out(vty, "Data tree is missing!\n");
+ return CMD_WARNING_CONFIG_FAILED;
+ }
+
+ if (lock)
+ flags |= EDIT_FLAG_IMPLICIT_LOCK;
+
+ if (commit)
+ flags |= EDIT_FLAG_IMPLICIT_COMMIT;
+
+ vty_mgmt_send_edit_req(vty, MGMT_MSG_DATASTORE_CANDIDATE, format, flags,
+ operation, xpath, data);
+ return CMD_SUCCESS;
+}
+
+DEFPY(mgmt_rpc, mgmt_rpc_cmd,
+ "mgmt rpc XPATH [json|xml]$fmt [DATA]",
+ MGMTD_STR
+ "Invoke RPC\n"
+ "XPath expression specifying the YANG data path\n"
+ "JSON input format (default)\n"
+ "XML input format\n"
+ "Input data tree\n")
+{
+ LYD_FORMAT format = (fmt && fmt[0] == 'x') ? LYD_XML : LYD_JSON;
+
+ vty_mgmt_send_rpc_req(vty, format, xpath, data);
+ return CMD_SUCCESS;
+}
+
+DEFPY(show_mgmt_get_config, show_mgmt_get_config_cmd,
+ "show mgmt get-config [candidate|operational|running]$dsname WORD$path",
+ SHOW_STR MGMTD_STR
+ "Get configuration data from a specific configuration datastore\n"
+ "Candidate datastore (default)\n"
+ "Operational datastore\n"
+ "Running datastore\n"
+ "XPath expression specifying the YANG data path\n")
+{
+ const char *xpath_list[VTY_MAXCFGCHANGES] = {0};
+ Mgmtd__DatastoreId datastore = MGMTD_DS_CANDIDATE;
+
+ if (dsname)
+ datastore = mgmt_ds_name2id(dsname);
+
+ xpath_list[0] = path;
+ vty_mgmt_send_get_req(vty, true, datastore, xpath_list, 1);
+ return CMD_SUCCESS;
+}
+
+DEFPY(show_mgmt_get_data, show_mgmt_get_data_cmd,
+ "show mgmt get-data WORD$path [datastore <candidate|running|operational>$ds] [with-config|only-config]$content [exact]$exact [with-defaults <trim|all-tag|all>$wd] [json|xml]$fmt",
+ SHOW_STR
+ MGMTD_STR
+ "Get a data from the operational datastore\n"
+ "XPath expression specifying the YANG data root\n"
+ "Specify datastore to get data from (operational by default)\n"
+ "Candidate datastore\n"
+ "Running datastore\n"
+ "Operational datastore\n"
+ "Include \"config true\" data\n"
+ "Get only \"config true\" data\n"
+ "Get exact node instead of the whole data tree\n"
+ "Configure 'with-defaults' mode per RFC 6243 (\"explicit\" mode by default)\n"
+ "Use \"trim\" mode\n"
+ "Use \"report-all-tagged\" mode\n"
+ "Use \"report-all\" mode\n"
+ "JSON output format\n"
+ "XML output format\n")
+{
+ LYD_FORMAT format = (fmt && fmt[0] == 'x') ? LYD_XML : LYD_JSON;
+ int plen = strlen(path);
+ char *xpath = NULL;
+ uint8_t flags = content ? GET_DATA_FLAG_CONFIG : GET_DATA_FLAG_STATE;
+ uint8_t defaults = GET_DATA_DEFAULTS_EXPLICIT;
+ uint8_t datastore = MGMT_MSG_DATASTORE_OPERATIONAL;
+
+ if (content && content[0] == 'w')
+ flags |= GET_DATA_FLAG_STATE;
+
+ if (exact)
+ flags |= GET_DATA_FLAG_EXACT;
+
+ if (wd) {
+ if (wd[0] == 't')
+ defaults = GET_DATA_DEFAULTS_TRIM;
+ else if (wd[3] == '-')
+ defaults = GET_DATA_DEFAULTS_ALL_ADD_TAG;
+ else
+ defaults = GET_DATA_DEFAULTS_ALL;
+ }
+
+ if (ds) {
+ if (ds[0] == 'c')
+ datastore = MGMT_MSG_DATASTORE_CANDIDATE;
+ else if (ds[0] == 'r')
+ datastore = MGMT_MSG_DATASTORE_RUNNING;
+ }
+
+ /* get rid of extraneous trailing slash-* or single '/' unless root */
+ if (plen > 2 && ((path[plen - 2] == '/' && path[plen - 1] == '*') ||
+ (path[plen - 2] != '/' && path[plen - 1] == '/'))) {
+ plen = path[plen - 1] == '/' ? plen - 1 : plen - 2;
+ xpath = XSTRDUP(MTYPE_TMP, path);
+ xpath[plen] = 0;
+ path = xpath;
+ }
+
+ vty_mgmt_send_get_data_req(vty, datastore, format, flags, defaults,
+ path);
+
+ if (xpath)
+ XFREE(MTYPE_TMP, xpath);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY(show_mgmt_dump_data,
+ show_mgmt_dump_data_cmd,
+ "show mgmt datastore-contents [candidate|operational|running]$dsname [xpath WORD$path] [file WORD$filepath] <json|xml>$fmt",
+ SHOW_STR
+ MGMTD_STR
+ "Get Datastore contents from a specific datastore\n"
+ "Candidate datastore (default)\n"
+ "Operational datastore\n"
+ "Running datastore\n"
+ "XPath expression specifying the YANG data path\n"
+ "XPath string\n"
+ "Dump the contents to a file\n"
+ "Full path of the file\n"
+ "json output\n"
+ "xml output\n")
+{
+ struct mgmt_ds_ctx *ds_ctx;
+ Mgmtd__DatastoreId datastore = MGMTD_DS_CANDIDATE;
+ LYD_FORMAT format = fmt[0] == 'j' ? LYD_JSON : LYD_XML;
+ FILE *f = NULL;
+
+ if (dsname)
+ datastore = mgmt_ds_name2id(dsname);
+
+ ds_ctx = mgmt_ds_get_ctx_by_id(mm, datastore);
+ if (!ds_ctx) {
+ vty_out(vty, "ERROR: Could not access datastore!\n");
+ return CMD_ERR_NO_MATCH;
+ }
+
+ if (filepath) {
+ f = fopen(filepath, "w");
+ if (!f) {
+ vty_out(vty,
+ "Could not open file pointed by filepath %s\n",
+ filepath);
+ return CMD_SUCCESS;
+ }
+ }
+
+ mgmt_ds_dump_tree(vty, ds_ctx, path, f, format);
+
+ if (f)
+ fclose(f);
+ return CMD_SUCCESS;
+}
+
+DEFPY(show_mgmt_map_xpath,
+ show_mgmt_map_xpath_cmd,
+ "show mgmt yang-xpath-subscription WORD$path",
+ SHOW_STR
+ MGMTD_STR
+ "Get YANG Backend Subscription\n"
+ "XPath expression specifying the YANG data path\n")
+{
+ mgmt_be_show_xpath_registries(vty, path);
+ return CMD_SUCCESS;
+}
+
+DEFPY(mgmt_load_config,
+ mgmt_load_config_cmd,
+ "mgmt load-config WORD$filepath <merge|replace>$type",
+ MGMTD_STR
+ "Load configuration onto Candidate Datastore\n"
+ "Full path of the file\n"
+ "Merge configuration with contents of Candidate Datastore\n"
+ "Replace the existing contents of Candidate datastore\n")
+{
+ bool merge = type[0] == 'm' ? true : false;
+ struct mgmt_ds_ctx *ds_ctx;
+ int ret;
+
+ if (access(filepath, F_OK) == -1) {
+ vty_out(vty, "ERROR: File %s : %s\n", filepath,
+ strerror(errno));
+ return CMD_ERR_NO_FILE;
+ }
+
+ ds_ctx = mgmt_ds_get_ctx_by_id(mm, MGMTD_DS_CANDIDATE);
+ if (!ds_ctx) {
+ vty_out(vty, "ERROR: Could not access Candidate datastore!\n");
+ return CMD_ERR_NO_MATCH;
+ }
+
+ ret = mgmt_ds_load_config_from_file(ds_ctx, filepath, merge);
+ if (ret != 0)
+ vty_out(vty, "Error with parsing the file with error code %d\n",
+ ret);
+ return CMD_SUCCESS;
+}
+
+DEFPY(mgmt_save_config,
+ mgmt_save_config_cmd,
+ "mgmt save-config <candidate|running>$dsname WORD$filepath",
+ MGMTD_STR
+ "Save configuration from datastore\n"
+ "Candidate datastore\n"
+ "Running datastore\n"
+ "Full path of the file\n")
+{
+ Mgmtd__DatastoreId datastore = mgmt_ds_name2id(dsname);
+ struct mgmt_ds_ctx *ds_ctx;
+ FILE *f;
+
+ ds_ctx = mgmt_ds_get_ctx_by_id(mm, datastore);
+ if (!ds_ctx) {
+ vty_out(vty, "ERROR: Could not access the '%s' datastore!\n",
+ dsname);
+ return CMD_ERR_NO_MATCH;
+ }
+
+ if (!filepath) {
+ vty_out(vty, "ERROR: No file path mentioned!\n");
+ return CMD_ERR_NO_MATCH;
+ }
+
+ f = fopen(filepath, "w");
+ if (!f) {
+ vty_out(vty, "Could not open file pointed by filepath %s\n",
+ filepath);
+ return CMD_SUCCESS;
+ }
+
+ mgmt_ds_dump_tree(vty, ds_ctx, "/", f, LYD_JSON);
+
+ fclose(f);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY(show_mgmt_cmt_hist,
+ show_mgmt_cmt_hist_cmd,
+ "show mgmt commit-history",
+ SHOW_STR
+ MGMTD_STR
+ "Show commit history\n")
+{
+ show_mgmt_cmt_history(vty);
+ return CMD_SUCCESS;
+}
+
+DEFPY(mgmt_rollback,
+ mgmt_rollback_cmd,
+ "mgmt rollback <commit-id WORD$commit | last [(1-10)]$last>",
+ MGMTD_STR
+ "Rollback commits\n"
+ "Rollback to commit ID\n"
+ "Commit-ID\n"
+ "Rollbak n commits\n"
+ "Number of commits\n")
+{
+ if (commit)
+ mgmt_history_rollback_by_id(vty, commit);
+ else
+ mgmt_history_rollback_n(vty, last);
+
+ return CMD_SUCCESS;
+}
+
+int config_write_mgmt_debug(struct vty *vty);
+static struct cmd_node debug_node = {
+ .name = "mgmt debug",
+ .node = DEBUG_NODE,
+ .prompt = "",
+ .config_write = config_write_mgmt_debug,
+};
+
+static int write_mgmt_debug_helper(struct vty *vty, bool config)
+{
+ uint32_t mode = config ? DEBUG_MODE_CONF : DEBUG_MODE_ALL;
+ bool be = DEBUG_MODE_CHECK(&mgmt_debug_be, mode);
+ bool ds = DEBUG_MODE_CHECK(&mgmt_debug_ds, mode);
+ bool fe = DEBUG_MODE_CHECK(&mgmt_debug_fe, mode);
+ bool txn = DEBUG_MODE_CHECK(&mgmt_debug_txn, mode);
+
+ if (!(be || ds || fe || txn))
+ return 0;
+
+ vty_out(vty, "debug mgmt");
+ if (be)
+ vty_out(vty, " backend");
+ if (ds)
+ vty_out(vty, " datastore");
+ if (fe)
+ vty_out(vty, " frontend");
+ if (txn)
+ vty_out(vty, " transaction");
+
+ vty_out(vty, "\n");
+
+ return 0;
+}
+
+int config_write_mgmt_debug(struct vty *vty)
+{
+ return write_mgmt_debug_helper(vty, true);
+}
+
+DEFPY_NOSH(show_debugging_mgmt, show_debugging_mgmt_cmd,
+ "show debugging [mgmt]", SHOW_STR DEBUG_STR "MGMT Information\n")
+{
+ vty_out(vty, "MGMT debugging status:\n");
+
+ write_mgmt_debug_helper(vty, false);
+
+ cmd_show_lib_debugs(vty);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY(debug_mgmt, debug_mgmt_cmd,
+ "[no$no] debug mgmt {backend$be|datastore$ds|frontend$fe|transaction$txn}",
+ NO_STR DEBUG_STR MGMTD_STR
+ "Backend debug\n"
+ "Datastore debug\n"
+ "Frontend debug\n"
+ "Transaction debug\n")
+{
+ uint32_t mode = DEBUG_NODE2MODE(vty->node);
+
+ if (be) {
+ DEBUG_MODE_SET(&mgmt_debug_be, mode, !no);
+ mgmt_be_adapter_toggle_client_debug(
+ DEBUG_MODE_CHECK(&mgmt_debug_be, DEBUG_MODE_ALL));
+ }
+ if (ds)
+ DEBUG_MODE_SET(&mgmt_debug_ds, mode, !no);
+ if (fe) {
+ DEBUG_MODE_SET(&mgmt_debug_fe, mode, !no);
+ mgmt_fe_adapter_toggle_client_debug(
+ DEBUG_MODE_CHECK(&mgmt_debug_fe, DEBUG_MODE_ALL));
+ }
+ if (txn)
+ DEBUG_MODE_SET(&mgmt_debug_txn, mode, !no);
+
+ return CMD_SUCCESS;
+}
+
+static void mgmt_config_read_in(struct event *event)
+{
+ if (vty_mgmt_fe_enabled())
+ mgmt_vty_read_configs();
+ else {
+ zlog_warn("%s: no connection to front-end server, retry in 1s",
+ __func__);
+ event_add_timer(mm->master, mgmt_config_read_in, NULL, 1,
+ &mgmt_daemon_info->read_in);
+ }
+}
+
+static int mgmtd_config_write(struct vty *vty)
+{
+ struct lyd_node *root;
+
+ LY_LIST_FOR (running_config->dnode, root) {
+ nb_cli_show_dnode_cmds(vty, root, false);
+ }
+
+ return 1;
+}
+
+static struct cmd_node mgmtd_node = {
+ .name = "mgmtd",
+ .node = MGMTD_NODE,
+ .prompt = "",
+ .config_write = mgmtd_config_write,
+};
+
+void mgmt_vty_init(void)
+{
+ /*
+ * Library based CLI handlers
+ */
+ filter_cli_init();
+ route_map_cli_init();
+ affinity_map_init();
+ keychain_cli_init();
+
+ /*
+ * Initialize command handling from VTYSH connection.
+ * Call command initialization routines defined by
+ * backend components that are moved to new MGMTD infra
+ * here one by one.
+ */
+ zebra_cli_init();
+#ifdef HAVE_RIPD
+ rip_cli_init();
+#endif
+#ifdef HAVE_RIPNGD
+ ripng_cli_init();
+#endif
+#ifdef HAVE_STATICD
+ static_vty_init();
+#endif
+
+ event_add_event(mm->master, mgmt_config_read_in, NULL, 0,
+ &mgmt_daemon_info->read_in);
+
+ install_node(&debug_node);
+ install_node(&mgmtd_node);
+
+ install_element(VIEW_NODE, &show_mgmt_be_adapter_cmd);
+ install_element(VIEW_NODE, &show_mgmt_be_xpath_reg_cmd);
+ install_element(VIEW_NODE, &show_mgmt_fe_adapter_cmd);
+ install_element(VIEW_NODE, &show_mgmt_txn_cmd);
+ install_element(VIEW_NODE, &show_mgmt_ds_cmd);
+ install_element(VIEW_NODE, &show_mgmt_get_config_cmd);
+ install_element(VIEW_NODE, &show_mgmt_get_data_cmd);
+ install_element(VIEW_NODE, &show_mgmt_dump_data_cmd);
+ install_element(VIEW_NODE, &show_mgmt_map_xpath_cmd);
+ install_element(VIEW_NODE, &show_mgmt_cmt_hist_cmd);
+
+ install_element(CONFIG_NODE, &mgmt_commit_cmd);
+ install_element(CONFIG_NODE, &mgmt_create_config_data_cmd);
+ install_element(CONFIG_NODE, &mgmt_set_config_data_cmd);
+ install_element(CONFIG_NODE, &mgmt_delete_config_data_cmd);
+ install_element(CONFIG_NODE, &mgmt_remove_config_data_cmd);
+ install_element(CONFIG_NODE, &mgmt_replace_config_data_cmd);
+ install_element(CONFIG_NODE, &mgmt_edit_cmd);
+ install_element(CONFIG_NODE, &mgmt_rpc_cmd);
+ install_element(CONFIG_NODE, &mgmt_load_config_cmd);
+ install_element(CONFIG_NODE, &mgmt_save_config_cmd);
+ install_element(CONFIG_NODE, &mgmt_rollback_cmd);
+
+ install_element(VIEW_NODE, &debug_mgmt_cmd);
+ install_element(CONFIG_NODE, &debug_mgmt_cmd);
+
+ /* Enable view */
+ install_element(ENABLE_NODE, &mgmt_performance_measurement_cmd);
+ install_element(ENABLE_NODE, &mgmt_reset_performance_stats_cmd);
+
+ install_element(ENABLE_NODE, &show_debugging_mgmt_cmd);
+
+ mgmt_fe_client_lib_vty_init();
+ /*
+ * TODO: Register and handlers for auto-completion here.
+ */
+}
diff --git a/mgmtd/subdir.am b/mgmtd/subdir.am
new file mode 100644
index 00000000..14544c4f
--- /dev/null
+++ b/mgmtd/subdir.am
@@ -0,0 +1,107 @@
+#
+# mgmtd -- Mangagement Daemon
+#
+
+# dist_examples_DATA += \
+ # end
+
+vtysh_daemons += mgmtd
+
+# man8 += $(MANBUILD)/frr-mgmtd.8
+# endif
+
+clippy_scan += \
+ mgmtd/mgmt_vty.c \
+ # end
+
+lib_LTLIBRARIES += mgmtd/libmgmt_be_nb.la
+mgmtd_libmgmt_be_nb_la_SOURCES = \
+ mgmtd/mgmt_be_nb.c \
+ zebra/zebra_cli.c \
+ # end
+nodist_mgmtd_libmgmt_be_nb_la_SOURCES = \
+ # end
+mgmtd_libmgmt_be_nb_la_CFLAGS = $(AM_CFLAGS) -DINCLUDE_MGMTD_CMDDEFS_ONLY
+mgmtd_libmgmt_be_nb_la_CPPFLAGS = $(AM_CPPFLAGS) -DINCLUDE_MGMTD_CMDDEFS_ONLY
+mgmtd_libmgmt_be_nb_la_LDFLAGS = -version-info 0:0:0
+
+noinst_LIBRARIES += mgmtd/libmgmtd.a
+mgmtd_libmgmtd_a_SOURCES = \
+ mgmtd/mgmt.c \
+ mgmtd/mgmt_ds.c \
+ mgmtd/mgmt_be_adapter.c \
+ mgmtd/mgmt_fe_adapter.c \
+ mgmtd/mgmt_history.c \
+ mgmtd/mgmt_memory.c \
+ mgmtd/mgmt_txn.c \
+ mgmtd/mgmt_vty.c \
+ # end
+
+noinst_HEADERS += \
+ mgmtd/mgmt.h \
+ mgmtd/mgmt_be_adapter.h \
+ mgmtd/mgmt_ds.h \
+ mgmtd/mgmt_fe_adapter.h \
+ mgmtd/mgmt_history.h \
+ mgmtd/mgmt_memory.h \
+ mgmtd/mgmt_txn.h \
+ zebra/zebra_cli.h \
+ # end
+
+sbin_PROGRAMS += mgmtd/mgmtd
+
+if MGMTD_TESTC
+sbin_PROGRAMS += mgmtd/mgmtd_testc
+mgmtd_mgmtd_testc_SOURCES = mgmtd/mgmt_testc.c
+mgmtd_mgmtd_testc_LDADD = lib/libfrr.la
+endif
+
+mgmtd_mgmtd_SOURCES = \
+ mgmtd/mgmt_main.c \
+ # end
+nodist_mgmtd_mgmtd_SOURCES = \
+ yang/frr-zebra.yang.c \
+ yang/frr-zebra-route-map.yang.c \
+ yang/ietf/ietf-netconf.yang.c \
+ yang/ietf/ietf-netconf-with-defaults.yang.c \
+ # nothing
+mgmtd_mgmtd_CFLAGS = $(AM_CFLAGS) -I ./
+mgmtd_mgmtd_LDADD = mgmtd/libmgmtd.a lib/libfrr.la $(LIBCAP) $(LIBM) $(LIBYANG_LIBS) $(UST_LIBS)
+mgmtd_mgmtd_LDADD += mgmtd/libmgmt_be_nb.la
+
+
+if STATICD
+nodist_mgmtd_mgmtd_SOURCES += yang/frr-bfdd.yang.c
+else
+if RIPD
+nodist_mgmtd_mgmtd_SOURCES += yang/frr-bfdd.yang.c
+endif
+endif
+
+if RIPD
+nodist_mgmtd_mgmtd_SOURCES += \
+ yang/frr-ripd.yang.c \
+ # end
+mgmtd_libmgmt_be_nb_la_SOURCES += \
+ ripd/rip_cli.c \
+ # end
+endif
+
+if RIPNGD
+nodist_mgmtd_mgmtd_SOURCES += \
+ yang/frr-ripngd.yang.c \
+ # end
+mgmtd_libmgmt_be_nb_la_SOURCES += \
+ ripngd/ripng_cli.c \
+ # end
+endif
+
+if STATICD
+nodist_mgmtd_mgmtd_SOURCES += \
+ yang/frr-staticd.yang.c \
+ # end
+nodist_mgmtd_libmgmt_be_nb_la_SOURCES += \
+ staticd/static_vty.c \
+ # end
+endif
+