diff --git a/python/vyos/config.py b/python/vyos/config.py
index b7ee606a9..1fab46761 100644
--- a/python/vyos/config.py
+++ b/python/vyos/config.py
@@ -1,619 +1,622 @@
 # Copyright 2017-2024 VyOS maintainers and contributors <maintainers@vyos.io>
 #
 # This library is free software; you can redistribute it and/or
 # modify it under the terms of the GNU Lesser General Public
 # License as published by the Free Software Foundation; either
 # version 2.1 of the License, or (at your option) any later version.
 #
 # This library is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 # Lesser General Public License for more details.
 #
 # You should have received a copy of the GNU Lesser General Public
 # License along with this library.  If not, see <http://www.gnu.org/licenses/>.
 
 """
 A library for reading VyOS running config data.
 
 This library is used internally by all config scripts of VyOS,
 but its API should be considered stable and safe to use
 in user scripts.
 
 Note that this module will not work outside VyOS.
 
 Node taxonomy
 #############
 
 There are multiple types of config tree nodes in VyOS, each requires
 its own set of operations.
 
 *Leaf nodes* (such as "address" in interfaces) can have values, but cannot
 have children.
 Leaf nodes can have one value, multiple values, or no values at all.
 
 For example, "system host-name" is a single-value leaf node,
 "system name-server" is a multi-value leaf node (commonly abbreviated "multi node"),
 and "system ip disable-forwarding" is a valueless leaf node.
 
 Non-leaf nodes cannot have values, but they can have child nodes. They are divided into
 two classes depending on whether the names of their children are fixed or not.
 For example, under "system", the names of all valid child nodes are predefined
 ("login", "name-server" etc.).
 
 To the contrary, children of the "system task-scheduler task" node can have arbitrary names.
 Such nodes are called *tag nodes*. This terminology is confusing but we keep using it for lack
 of a better word. No one remembers if the "tag" in "task Foo" is "task" or "Foo",
 but the distinction is irrelevant in practice.
 
 Configuration modes
 ###################
 
 VyOS has two distinct modes: operational mode and configuration mode. When a user logins,
 the CLI is in the operational mode. In this mode, only the running (effective) config is accessible for reading.
 
 When a user enters the "configure" command, a configuration session is setup. Every config session
 has its *proposed* (or *session*) config built on top of the current running config. When changes are commited, if commit succeeds,
 the proposed config is merged into the running config.
 
 In configuration mode, "base" functions like `exists`, `return_value` return values from the session config,
 while functions prefixed "effective" return values from the running config.
 
 In operational mode, all functions return values from the running config.
 """
 
 import re
 import json
 from typing import Union
 
 import vyos.configtree
 from vyos.xml_ref import multi_to_list
 from vyos.xml_ref import from_source
 from vyos.xml_ref import ext_dict_merge
 from vyos.xml_ref import relative_defaults
 from vyos.utils.dict import get_sub_dict
 from vyos.utils.dict import mangle_dict_keys
 from vyos.configsource import ConfigSource
 from vyos.configsource import ConfigSourceSession
 
 class ConfigDict(dict):
     _from_defaults = {}
     _dict_kwargs = {}
     def from_defaults(self, path: list[str]) -> bool:
         return from_source(self._from_defaults, path)
     @property
     def kwargs(self) -> dict:
         return self._dict_kwargs
 
 def config_dict_merge(src: dict, dest: Union[dict, ConfigDict]) -> ConfigDict:
     if not isinstance(dest, ConfigDict):
         dest = ConfigDict(dest)
     return ext_dict_merge(src, dest)
 
 def config_dict_mangle_acme(name, cli_dict):
     """
     Load CLI PKI dictionary and if an ACME certificate is used, load it's content
     and place it into the CLI dictionary as it would be a "regular" CLI PKI based
     certificate with private key
     """
     from vyos.base import ConfigError
     from vyos.defaults import directories
     from vyos.utils.file import read_file
     from vyos.pki import encode_certificate
     from vyos.pki import encode_private_key
     from vyos.pki import load_certificate
     from vyos.pki import load_private_key
 
     try:
         vyos_certbot_dir = directories['certbot']
 
         if 'acme' in cli_dict:
             tmp = read_file(f'{vyos_certbot_dir}/live/{name}/cert.pem')
             tmp = load_certificate(tmp, wrap_tags=False)
             cert_base64 = "".join(encode_certificate(tmp).strip().split("\n")[1:-1])
 
             tmp = read_file(f'{vyos_certbot_dir}/live/{name}/privkey.pem')
             tmp = load_private_key(tmp, wrap_tags=False)
             key_base64 = "".join(encode_private_key(tmp).strip().split("\n")[1:-1])
             # install ACME based PEM keys into "regular" CLI config keys
             cli_dict.update({'certificate' : cert_base64, 'private' : {'key' : key_base64}})
     except:
         raise ConfigError(f'Unable to load ACME certificates for "{name}"!')
 
     return cli_dict
 
 class Config(object):
     """
     The class of config access objects.
 
     Internally, in the current implementation, this object is *almost* stateless,
     the only state it keeps is relative *config path* for convenient access to config
     subtrees.
     """
     def __init__(self, session_env=None, config_source=None):
         if config_source is None:
             self._config_source = ConfigSourceSession(session_env)
         else:
             if not isinstance(config_source, ConfigSource):
                 raise TypeError("config_source not of type ConfigSource")
             self._config_source = config_source
 
         self._level = []
         self._dict_cache = {}
         self.dependency_list = []
         (self._running_config,
          self._session_config) = self._config_source.get_configtree_tuple()
 
     def get_config_tree(self, effective=False):
         if effective:
             return self._running_config
         return self._session_config
 
     def _make_path(self, path):
         # Backwards-compatibility stuff: original implementation used string paths
         # libvyosconfig paths are lists, but since node names cannot contain whitespace,
         # splitting at whitespace is reasonably safe.
         # It may cause problems with exists() when it's used for checking values,
         # since values may contain whitespace.
         if isinstance(path, str):
             path = re.split(r'\s+', path)
         elif isinstance(path, list):
             pass
         else:
             raise TypeError("Path must be a whitespace-separated string or a list")
         return (self._level + path)
 
     def set_level(self, path):
         """
         Set the *edit level*, that is, a relative config tree path.
         Once set, all operations will be relative to this path,
         for example, after ``set_level("system")``, calling
         ``exists("name-server")`` is equivalent to calling
         ``exists("system name-server"`` without ``set_level``.
 
         Args:
             path (str|list): relative config path
         """
         # Make sure there's always a space between default path (level)
         # and path supplied as method argument
         # XXX: for small strings in-place concatenation is not a problem
         if isinstance(path, str):
             if path:
                 self._level = re.split(r'\s+', path)
             else:
                 self._level = []
         elif isinstance(path, list):
             self._level = path.copy()
         else:
             raise TypeError("Level path must be either a whitespace-separated string or a list")
 
     def get_level(self):
         """
         Gets the current edit level.
 
         Returns:
             str: current edit level
         """
         return(self._level.copy())
 
     def exists(self, path):
         """
         Checks if a node or value with given path exists in the proposed config.
 
         Args:
             path (str): Configuration tree path
 
         Returns:
             True if node or value exists in the proposed config, False otherwise
 
         Note:
             This function should not be used outside of configuration sessions.
             In operational mode scripts, use ``exists_effective``.
         """
         if self._session_config is None:
             return False
 
         # Assume the path is a node path first
         if self._session_config.exists(self._make_path(path)):
             return True
         else:
             # If that check fails, it may mean the path has a value at the end.
             # libvyosconfig exists() works only for _nodes_, not _values_
             # libvyattacfg also worked for values, so we emulate that case here
             if isinstance(path, str):
                 path = re.split(r'\s+', path)
             path_without_value = path[:-1]
             try:
                 # return_values() is safe to use with single-value nodes,
                 # it simply returns a single-item list in that case.
                 values = self._session_config.return_values(self._make_path(path_without_value))
 
                 # If we got this far, the node does exist and has values,
                 # so we need to check if it has the value in question among its values.
                 return (path[-1] in values)
             except vyos.configtree.ConfigTreeError:
                 # Even the parent node doesn't exist at all
                 return False
 
     def session_changed(self):
         """
         Returns:
             True if the config session has uncommited changes, False otherwise.
         """
         return self._config_source.session_changed()
 
     def in_session(self):
         """
         Returns:
             True if called from a configuration session, False otherwise.
         """
         return self._config_source.in_session()
 
     def show_config(self, path=[], default=None, effective=False):
         """
         Args:
             path (str list): Configuration tree path, or empty
             default (str): Default value to return
 
         Returns:
             str: working configuration
         """
         return self._config_source.show_config(path, default, effective)
 
     def get_cached_root_dict(self, effective=False):
         cached = self._dict_cache.get(effective, {})
         if cached:
             return cached
 
         if effective:
             config = self._running_config
         else:
             config = self._session_config
 
         if config:
             config_dict = json.loads(config.to_json())
         else:
             config_dict = {}
 
         self._dict_cache[effective] = config_dict
 
         return config_dict
 
     def verify_mangling(self, key_mangling):
         if not (isinstance(key_mangling, tuple) and \
                 (len(key_mangling) == 2) and \
                 isinstance(key_mangling[0], str) and \
                 isinstance(key_mangling[1], str)):
             raise ValueError("key_mangling must be a tuple of two strings")
 
     def get_config_dict(self, path=[], effective=False, key_mangling=None,
                         get_first_key=False, no_multi_convert=False,
                         no_tag_node_value_mangle=False,
                         with_defaults=False,
                         with_recursive_defaults=False,
                         with_pki=False):
         """
         Args:
             path (str list): Configuration tree path, can be empty
             effective=False: effective or session config
             key_mangling=None: mangle dict keys according to regex and replacement
             get_first_key=False: if k = path[:-1], return sub-dict d[k] instead of {k: d[k]}
             no_multi_convert=False: if convert, return single value of multi node as list
 
         Returns: a dict representation of the config under path
         """
         kwargs = locals().copy()
         del kwargs['self']
         del kwargs['no_multi_convert']
         del kwargs['with_defaults']
         del kwargs['with_recursive_defaults']
         del kwargs['with_pki']
 
         lpath = self._make_path(path)
         root_dict = self.get_cached_root_dict(effective)
         conf_dict = get_sub_dict(root_dict, lpath, get_first_key=get_first_key)
 
         rpath = lpath if get_first_key else lpath[:-1]
 
         if not no_multi_convert:
             conf_dict = multi_to_list(rpath, conf_dict)
 
         if key_mangling is not None:
             self.verify_mangling(key_mangling)
             conf_dict = mangle_dict_keys(conf_dict,
                                          key_mangling[0], key_mangling[1],
                                          abs_path=rpath,
                                          no_tag_node_value_mangle=no_tag_node_value_mangle)
 
         if with_defaults or with_recursive_defaults:
             defaults = self.get_config_defaults(**kwargs,
                                                 recursive=with_recursive_defaults)
             conf_dict = config_dict_merge(defaults, conf_dict)
         else:
             conf_dict = ConfigDict(conf_dict)
 
         if with_pki and conf_dict:
             pki_dict = self.get_config_dict(['pki'], key_mangling=('-', '_'),
                                             no_tag_node_value_mangle=True,
                                             get_first_key=True)
             if pki_dict:
                 if 'certificate' in pki_dict:
                     for certificate in pki_dict['certificate']:
                         pki_dict['certificate'][certificate] = config_dict_mangle_acme(
                             certificate, pki_dict['certificate'][certificate])
 
             conf_dict['pki'] = pki_dict
 
+        interfaces_root = root_dict.get('interfaces', {})
+        setattr(conf_dict, 'interfaces_root', interfaces_root)
+
         # save optional args for a call to get_config_defaults
         setattr(conf_dict, '_dict_kwargs', kwargs)
 
         return conf_dict
 
     def get_config_defaults(self, path=[], effective=False, key_mangling=None,
                             no_tag_node_value_mangle=False, get_first_key=False,
                             recursive=False) -> dict:
         lpath = self._make_path(path)
         root_dict = self.get_cached_root_dict(effective)
         conf_dict = get_sub_dict(root_dict, lpath, get_first_key)
 
         defaults = relative_defaults(lpath, conf_dict,
                                      get_first_key=get_first_key,
                                      recursive=recursive)
 
         rpath = lpath if get_first_key else lpath[:-1]
 
         if key_mangling is not None:
             self.verify_mangling(key_mangling)
             defaults = mangle_dict_keys(defaults,
                                         key_mangling[0], key_mangling[1],
                                         abs_path=rpath,
                                         no_tag_node_value_mangle=no_tag_node_value_mangle)
 
         return defaults
 
     def merge_defaults(self, config_dict: ConfigDict, recursive=False):
         if not isinstance(config_dict, ConfigDict):
             raise TypeError('argument is not of type ConfigDict')
         if not config_dict.kwargs:
             raise ValueError('argument missing metadata')
 
         args = config_dict.kwargs
         d = self.get_config_defaults(**args, recursive=recursive)
         config_dict = config_dict_merge(d, config_dict)
         return config_dict
 
     def is_multi(self, path):
         """
         Args:
             path (str): Configuration tree path
 
         Returns:
             True if a node can have multiple values, False otherwise.
 
         Note:
             It also returns False if node doesn't exist.
         """
         self._config_source.set_level(self.get_level)
         return self._config_source.is_multi(path)
 
     def is_tag(self, path):
         """
          Args:
             path (str): Configuration tree path
 
         Returns:
             True if a node is a tag node, False otherwise.
 
         Note:
             It also returns False if node doesn't exist.
         """
         self._config_source.set_level(self.get_level)
         return self._config_source.is_tag(path)
 
     def is_leaf(self, path):
         """
          Args:
             path (str): Configuration tree path
 
         Returns:
             True if a node is a leaf node, False otherwise.
 
         Note:
             It also returns False if node doesn't exist.
         """
         self._config_source.set_level(self.get_level)
         return self._config_source.is_leaf(path)
 
     def return_value(self, path, default=None):
         """
         Retrieve a value of single-value leaf node in the running or proposed config
 
         Args:
             path (str): Configuration tree path
             default (str): Default value to return if node does not exist
 
         Returns:
             str: Node value, if it has any
             None: if node is valueless *or* if it doesn't exist
 
         Note:
             Due to the issue with treatment of valueless nodes by this function,
             valueless nodes should be checked with ``exists`` instead.
 
             This function cannot be used outside a configuration session.
             In operational mode scripts, use ``return_effective_value``.
         """
         if self._session_config:
             try:
                 value = self._session_config.return_value(self._make_path(path))
             except vyos.configtree.ConfigTreeError:
                 value = None
         else:
             value = None
 
         if not value:
             return(default)
         else:
             return(value)
 
     def return_values(self, path, default=[]):
         """
         Retrieve all values of a multi-value leaf node in the running or proposed config
 
         Args:
             path (str): Configuration tree path
 
         Returns:
             str list: Node values, if it has any
             []: if node does not exist
 
         Note:
             This function cannot be used outside a configuration session.
             In operational mode scripts, use ``return_effective_values``.
         """
         if self._session_config:
             try:
                 values = self._session_config.return_values(self._make_path(path))
             except vyos.configtree.ConfigTreeError:
                 values = []
         else:
             values = []
 
         if not values:
             return(default.copy())
         else:
             return(values)
 
     def list_nodes(self, path, default=[]):
         """
         Retrieve names of all children of a tag node in the running or proposed config
 
         Args:
             path (str): Configuration tree path
 
         Returns:
             string list: child node names
 
         """
         if self._session_config:
             try:
                 nodes = self._session_config.list_nodes(self._make_path(path))
             except vyos.configtree.ConfigTreeError:
                 nodes = []
         else:
             nodes = []
 
         if not nodes:
             return(default.copy())
         else:
             return(nodes)
 
     def exists_effective(self, path):
         """
         Checks if a node or value exists in the running (effective) config.
 
         Args:
             path (str): Configuration tree path
 
         Returns:
             True if node exists in the running config, False otherwise
 
         Note:
             This function is safe to use in operational mode. In configuration mode,
             it ignores uncommited changes.
         """
         if self._running_config is None:
             return False
 
         # Assume the path is a node path first
         if self._running_config.exists(self._make_path(path)):
             return True
         else:
             # If that check fails, it may mean the path has a value at the end.
             # libvyosconfig exists() works only for _nodes_, not _values_
             # libvyattacfg also worked for values, so we emulate that case here
             if isinstance(path, str):
                 path = re.split(r'\s+', path)
             path_without_value = path[:-1]
             try:
                 # return_values() is safe to use with single-value nodes,
                 # it simply returns a single-item list in that case.
                 values = self._running_config.return_values(self._make_path(path_without_value))
 
                 # If we got this far, the node does exist and has values,
                 # so we need to check if it has the value in question among its values.
                 return (path[-1] in values)
             except vyos.configtree.ConfigTreeError:
                 # Even the parent node doesn't exist at all
                 return False
 
 
     def return_effective_value(self, path, default=None):
         """
         Retrieve a values of a single-value leaf node in a running (effective) config
 
         Args:
             path (str): Configuration tree path
             default (str): Default value to return if node does not exist
 
         Returns:
             str: Node value
         """
         if self._running_config:
             try:
                 value = self._running_config.return_value(self._make_path(path))
             except vyos.configtree.ConfigTreeError:
                 value = None
         else:
             value = None
 
         if not value:
             return(default)
         else:
             return(value)
 
     def return_effective_values(self, path, default=[]):
         """
         Retrieve all values of a multi-value node in a running (effective) config
 
         Args:
             path (str): Configuration tree path
 
         Returns:
             str list: A list of values
         """
         if self._running_config:
             try:
                 values = self._running_config.return_values(self._make_path(path))
             except vyos.configtree.ConfigTreeError:
                 values = []
         else:
             values = []
 
         if not values:
             return(default.copy())
         else:
             return(values)
 
     def list_effective_nodes(self, path, default=[]):
         """
         Retrieve names of all children of a tag node in the running config
 
         Args:
             path (str): Configuration tree path
 
         Returns:
             str list: child node names
         """
         if self._running_config:
             try:
                 nodes = self._running_config.list_nodes(self._make_path(path))
             except vyos.configtree.ConfigTreeError:
                 nodes = []
         else:
             nodes = []
 
         if not nodes:
             return(default.copy())
         else:
             return(nodes)
diff --git a/python/vyos/configverify.py b/python/vyos/configverify.py
index b49d66c36..59b67300d 100644
--- a/python/vyos/configverify.py
+++ b/python/vyos/configverify.py
@@ -1,524 +1,522 @@
 # Copyright 2020-2024 VyOS maintainers and contributors <maintainers@vyos.io>
 #
 # This library is free software; you can redistribute it and/or
 # modify it under the terms of the GNU Lesser General Public
 # License as published by the Free Software Foundation; either
 # version 2.1 of the License, or (at your option) any later version.
 #
 # This library is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 # Lesser General Public License for more details.
 #
 # You should have received a copy of the GNU Lesser General Public
 # License along with this library.  If not, see <http://www.gnu.org/licenses/>.
 
 # The sole purpose of this module is to hold common functions used in
 # all kinds of implementations to verify the CLI configuration.
 # It is started by migrating the interfaces to the new get_config_dict()
 # approach which will lead to a lot of code that can be reused.
 
 # NOTE: imports should be as local as possible to the function which
 # makes use of it!
 
 from vyos import ConfigError
 from vyos.utils.dict import dict_search
 # pattern re-used in ipsec migration script
 dynamic_interface_pattern = r'(ppp|pppoe|sstpc|l2tp|ipoe)[0-9]+'
 
 def verify_mtu(config):
     """
     Common helper function used by interface implementations to perform
     recurring validation if the specified MTU can be used by the underlaying
     hardware.
     """
     from vyos.ifconfig import Interface
     if 'mtu' in config:
         mtu = int(config['mtu'])
 
         tmp = Interface(config['ifname'])
         # Not all interfaces support min/max MTU
         # https://vyos.dev/T5011
         try:
             min_mtu = tmp.get_min_mtu()
             max_mtu = tmp.get_max_mtu()
         except: # Fallback to defaults
             min_mtu = 68
             max_mtu = 9000
 
         if mtu < min_mtu:
             raise ConfigError(f'Interface MTU too low, ' \
                               f'minimum supported MTU is {min_mtu}!')
         if mtu > max_mtu:
             raise ConfigError(f'Interface MTU too high, ' \
                               f'maximum supported MTU is {max_mtu}!')
 
 def verify_mtu_parent(config, parent):
     if 'mtu' not in config or 'mtu' not in parent:
         return
 
     mtu = int(config['mtu'])
     parent_mtu = int(parent['mtu'])
     if mtu > parent_mtu:
         raise ConfigError(f'Interface MTU "{mtu}" too high, ' \
                           f'parent interface MTU is "{parent_mtu}"!')
 
 def verify_mtu_ipv6(config):
     """
     Common helper function used by interface implementations to perform
     recurring validation if the specified MTU can be used when IPv6 is
     configured on the interface. IPv6 requires a 1280 bytes MTU.
     """
     from vyos.template import is_ipv6
     if 'mtu' in config:
         # IPv6 minimum required link mtu
         min_mtu = 1280
         if int(config['mtu']) < min_mtu:
             interface = config['ifname']
             error_msg = f'IPv6 address will be configured on interface "{interface}",\n' \
                         f'the required minimum MTU is "{min_mtu}"!'
 
             if 'address' in config:
                 for address in config['address']:
                     if address in ['dhcpv6'] or is_ipv6(address):
                         raise ConfigError(error_msg)
 
             tmp = dict_search('ipv6.address.no_default_link_local', config)
             if tmp == None: raise ConfigError('link-local ' + error_msg)
 
             tmp = dict_search('ipv6.address.autoconf', config)
             if tmp != None: raise ConfigError(error_msg)
 
             tmp = dict_search('ipv6.address.eui64', config)
             if tmp != None: raise ConfigError(error_msg)
 
 def verify_vrf(config):
     """
     Common helper function used by interface implementations to perform
     recurring validation of VRF configuration.
     """
     from vyos.utils.network import interface_exists
     if 'vrf' in config:
         vrfs = config['vrf']
         if isinstance(vrfs, str):
             vrfs = [vrfs]
 
         for vrf in vrfs:
             if vrf == 'default':
                 continue
             if not interface_exists(vrf):
                 raise ConfigError(f'VRF "{vrf}" does not exist!')
 
         if 'is_bridge_member' in config:
             raise ConfigError(
                 'Interface "{ifname}" cannot be both a member of VRF "{vrf}" '
                 'and bridge "{is_bridge_member}"!'.format(**config))
 
 def verify_bond_bridge_member(config):
     """
     Checks if interface has a VRF configured and is also part of a bond or
     bridge, which is not allowed!
     """
     if 'vrf' in config:
         ifname = config['ifname']
         if 'is_bond_member' in config:
             raise ConfigError(f'Can not add interface "{ifname}" to bond, it has a VRF assigned!')
         if 'is_bridge_member' in config:
             raise ConfigError(f'Can not add interface "{ifname}" to bridge, it has a VRF assigned!')
 
 def verify_tunnel(config):
     """
     This helper is used to verify the common part of the tunnel
     """
     from vyos.template import is_ipv4
     from vyos.template import is_ipv6
 
     if 'encapsulation' not in config:
         raise ConfigError('Must configure the tunnel encapsulation for '\
                           '{ifname}!'.format(**config))
 
     if 'source_address' not in config and 'source_interface' not in config:
         raise ConfigError('source-address or source-interface required for tunnel!')
 
     if 'remote' not in config and config['encapsulation'] != 'gre':
         raise ConfigError('remote ip address is mandatory for tunnel')
 
     if config['encapsulation'] in ['ipip6', 'ip6ip6', 'ip6gre', 'ip6gretap', 'ip6erspan']:
         error_ipv6 = 'Encapsulation mode requires IPv6'
         if 'source_address' in config and not is_ipv6(config['source_address']):
             raise ConfigError(f'{error_ipv6} source-address')
 
         if 'remote' in config and not is_ipv6(config['remote']):
             raise ConfigError(f'{error_ipv6} remote')
     else:
         error_ipv4 = 'Encapsulation mode requires IPv4'
         if 'source_address' in config and not is_ipv4(config['source_address']):
             raise ConfigError(f'{error_ipv4} source-address')
 
         if 'remote' in config and not is_ipv4(config['remote']):
             raise ConfigError(f'{error_ipv4} remote address')
 
     if config['encapsulation'] in ['sit', 'gretap', 'ip6gretap']:
         if 'source_interface' in config:
             encapsulation = config['encapsulation']
             raise ConfigError(f'Option source-interface can not be used with ' \
                               f'encapsulation "{encapsulation}"!')
     elif config['encapsulation'] == 'gre':
         if 'source_address' in config and is_ipv6(config['source_address']):
             raise ConfigError('Can not use local IPv6 address is for mGRE tunnels')
 
 def verify_mirror_redirect(config):
     """
     Common helper function used by interface implementations to perform
     recurring validation of mirror and redirect interface configuration via tc(8)
 
     It makes no sense to mirror traffic back at yourself!
     """
     from vyos.utils.network import interface_exists
     if {'mirror', 'redirect'} <= set(config):
         raise ConfigError('Mirror and redirect can not be enabled at the same time!')
 
     if 'mirror' in config:
         for direction, mirror_interface in config['mirror'].items():
             if not interface_exists(mirror_interface):
                 raise ConfigError(f'Requested mirror interface "{mirror_interface}" '\
                                    'does not exist!')
 
             if mirror_interface == config['ifname']:
                 raise ConfigError(f'Can not mirror "{direction}" traffic back '\
                                    'the originating interface!')
 
     if 'redirect' in config:
         redirect_ifname = config['redirect']
         if not interface_exists(redirect_ifname):
             raise ConfigError(f'Requested redirect interface "{redirect_ifname}" '\
                                'does not exist!')
 
     if ('mirror' in config or 'redirect' in config) and dict_search('traffic_policy.in', config) is not None:
         # XXX: support combination of limiting and redirect/mirror - this is an
         # artificial limitation
         raise ConfigError('Can not use ingress policy together with mirror or redirect!')
 
 def verify_authentication(config):
     """
     Common helper function used by interface implementations to perform
     recurring validation of authentication for either PPPoE or WWAN interfaces.
 
     If authentication CLI option is defined, both username and password must
     be set!
     """
     if 'authentication' not in config:
         return
     if not {'username', 'password'} <= set(config['authentication']):
         raise ConfigError('Authentication requires both username and ' \
                           'password to be set!')
 
 def verify_address(config):
     """
     Common helper function used by interface implementations to perform
     recurring validation of IP address assignment when interface is part
     of a bridge or bond.
     """
     if {'is_bridge_member', 'address'} <= set(config):
         interface = config['ifname']
         bridge_name = next(iter(config['is_bridge_member']))
         raise ConfigError(f'Cannot assign address to interface "{interface}" '
                           f'as it is a member of bridge "{bridge_name}"!')
 
 def verify_bridge_delete(config):
     """
     Common helper function used by interface implementations to
     perform recurring validation of IP address assignmenr
     when interface also is part of a bridge.
     """
     if 'is_bridge_member' in config:
         interface = config['ifname']
         bridge_name = next(iter(config['is_bridge_member']))
         raise ConfigError(f'Interface "{interface}" cannot be deleted as it '
                           f'is a member of bridge "{bridge_name}"!')
 
-def verify_interface_exists(ifname, state_required=False, warning_only=False):
+def verify_interface_exists(config, ifname, state_required=False, warning_only=False):
     """
     Common helper function used by interface implementations to perform
     recurring validation if an interface actually exists. We first probe
     if the interface is defined on the CLI, if it's not found we try if
     it exists at the OS level.
     """
     from vyos.base import Warning
-    from vyos.configquery import ConfigTreeQuery
     from vyos.utils.dict import dict_search_recursive
     from vyos.utils.network import interface_exists
 
     if not state_required:
         # Check if interface is present in CLI config
-        config = ConfigTreeQuery()
-        tmp = config.get_config_dict(['interfaces'], get_first_key=True)
+        tmp = getattr(config, 'interfaces_root', {})
         if bool(list(dict_search_recursive(tmp, ifname))):
             return True
 
     # Interface not found on CLI, try Linux Kernel
     if interface_exists(ifname):
         return True
 
     message = f'Interface "{ifname}" does not exist!'
     if warning_only:
         Warning(message)
         return False
     raise ConfigError(message)
 
 def verify_source_interface(config):
     """
     Common helper function used by interface implementations to
     perform recurring validation of the existence of a source-interface
     required by e.g. peth/MACvlan, MACsec ...
     """
     import re
     from vyos.utils.network import interface_exists
 
     ifname = config['ifname']
     if 'source_interface' not in config:
         raise ConfigError(f'Physical source-interface required for "{ifname}"!')
 
     src_ifname = config['source_interface']
     # We do not allow sourcing other interfaces (e.g. tunnel) from dynamic interfaces
     tmp = re.compile(dynamic_interface_pattern)
     if tmp.match(src_ifname):
         raise ConfigError(f'Can not source "{ifname}" from dynamic interface "{src_ifname}"!')
 
     if not interface_exists(src_ifname):
         raise ConfigError(f'Specified source-interface {src_ifname} does not exist')
 
     if 'source_interface_is_bridge_member' in config:
         bridge_name = next(iter(config['source_interface_is_bridge_member']))
         raise ConfigError(f'Invalid source-interface "{src_ifname}". Interface '
                           f'is already a member of bridge "{bridge_name}"!')
 
     if 'source_interface_is_bond_member' in config:
         bond_name = next(iter(config['source_interface_is_bond_member']))
         raise ConfigError(f'Invalid source-interface "{src_ifname}". Interface '
                           f'is already a member of bond "{bond_name}"!')
 
     if 'is_source_interface' in config:
         tmp = config['is_source_interface']
         raise ConfigError(f'Can not use source-interface "{src_ifname}", it already ' \
                           f'belongs to interface "{tmp}"!')
 
 def verify_dhcpv6(config):
     """
     Common helper function used by interface implementations to perform
     recurring validation of DHCPv6 options which are mutually exclusive.
     """
     if 'dhcpv6_options' in config:
         if {'parameters_only', 'temporary'} <= set(config['dhcpv6_options']):
             raise ConfigError('DHCPv6 temporary and parameters-only options '
                               'are mutually exclusive!')
 
         # It is not allowed to have duplicate SLA-IDs as those identify an
         # assigned IPv6 subnet from a delegated prefix
         for pd in (dict_search('dhcpv6_options.pd', config) or []):
             sla_ids = []
             interfaces = dict_search(f'dhcpv6_options.pd.{pd}.interface', config)
 
             if not interfaces:
                 raise ConfigError('DHCPv6-PD requires an interface where to assign '
                                   'the delegated prefix!')
 
             for count, interface in enumerate(interfaces):
                 if 'sla_id' in interfaces[interface]:
                     sla_ids.append(interfaces[interface]['sla_id'])
                 else:
                     sla_ids.append(str(count))
 
             # Check for duplicates
             duplicates = [x for n, x in enumerate(sla_ids) if x in sla_ids[:n]]
             if duplicates:
                 raise ConfigError('Site-Level Aggregation Identifier (SLA-ID) '
                                   'must be unique per prefix-delegation!')
 
 def verify_vlan_config(config):
     """
     Common helper function used by interface implementations to perform
     recurring validation of interface VLANs
     """
 
     # VLAN and Q-in-Q IDs are not allowed to overlap
     if 'vif' in config and 'vif_s' in config:
         duplicate = list(set(config['vif']) & set(config['vif_s']))
         if duplicate:
             raise ConfigError(f'Duplicate VLAN id "{duplicate[0]}" used for vif and vif-s interfaces!')
 
     parent_ifname = config['ifname']
     # 802.1q VLANs
     for vlan_id in config.get('vif', {}):
         vlan = config['vif'][vlan_id]
         vlan['ifname'] = f'{parent_ifname}.{vlan_id}'
 
         verify_dhcpv6(vlan)
         verify_address(vlan)
         verify_vrf(vlan)
         verify_mirror_redirect(vlan)
         verify_mtu_parent(vlan, config)
 
     # 802.1ad (Q-in-Q) VLANs
     for s_vlan_id in config.get('vif_s', {}):
         s_vlan = config['vif_s'][s_vlan_id]
         s_vlan['ifname'] = f'{parent_ifname}.{s_vlan_id}'
 
         verify_dhcpv6(s_vlan)
         verify_address(s_vlan)
         verify_vrf(s_vlan)
         verify_mirror_redirect(s_vlan)
         verify_mtu_parent(s_vlan, config)
 
         for c_vlan_id in s_vlan.get('vif_c', {}):
             c_vlan = s_vlan['vif_c'][c_vlan_id]
             c_vlan['ifname'] = f'{parent_ifname}.{s_vlan_id}.{c_vlan_id}'
 
             verify_dhcpv6(c_vlan)
             verify_address(c_vlan)
             verify_vrf(c_vlan)
             verify_mirror_redirect(c_vlan)
             verify_mtu_parent(c_vlan, config)
             verify_mtu_parent(c_vlan, s_vlan)
 
 
 def verify_diffie_hellman_length(file, min_keysize):
     """ Verify Diffie-Hellamn keypair length given via file. It must be greater
     then or equal to min_keysize """
     import os
     import re
     from vyos.utils.process import cmd
 
     try:
         keysize = str(min_keysize)
     except:
         return False
 
     if os.path.exists(file):
         out = cmd(f'openssl dhparam -inform PEM -in {file} -text')
         prog = re.compile('\d+\s+bit')
         if prog.search(out):
             bits = prog.search(out)[0].split()[0]
             if int(bits) >= int(min_keysize):
                 return True
 
     return False
 
 def verify_common_route_maps(config):
     """
     Common helper function used by routing protocol implementations to perform
     recurring validation if the specified route-map for either zebra to kernel
     installation exists (this is the top-level route_map key) or when a route
     is redistributed with a route-map that it exists!
     """
     # XXX: This function is called in combination with a previous call to:
     # tmp = conf.get_config_dict(['policy']) - see protocols_ospf.py as example.
     # We should NOT call this with the key_mangling option as this would rename
     # route-map hypens '-' to underscores '_' and one could no longer distinguish
     # what should have been the "proper" route-map name, as foo-bar and foo_bar
     # are two entire different route-map instances!
     for route_map in ['route-map', 'route_map']:
         if route_map not in config:
             continue
         tmp = config[route_map]
         # Check if the specified route-map exists, if not error out
         if dict_search(f'policy.route-map.{tmp}', config) == None:
             raise ConfigError(f'Specified route-map "{tmp}" does not exist!')
 
     if 'redistribute' in config:
         for protocol, protocol_config in config['redistribute'].items():
             if 'route_map' in protocol_config:
                 verify_route_map(protocol_config['route_map'], config)
 
 def verify_route_map(route_map_name, config):
     """
     Common helper function used by routing protocol implementations to perform
     recurring validation if a specified route-map exists!
     """
     # Check if the specified route-map exists, if not error out
     if dict_search(f'policy.route-map.{route_map_name}', config) == None:
         raise ConfigError(f'Specified route-map "{route_map_name}" does not exist!')
 
 def verify_prefix_list(prefix_list, config, version=''):
     """
     Common helper function used by routing protocol implementations to perform
     recurring validation if a specified prefix-list exists!
     """
     # Check if the specified prefix-list exists, if not error out
     if dict_search(f'policy.prefix-list{version}.{prefix_list}', config) == None:
         raise ConfigError(f'Specified prefix-list{version} "{prefix_list}" does not exist!')
 
 def verify_access_list(access_list, config, version=''):
     """
     Common helper function used by routing protocol implementations to perform
     recurring validation if a specified prefix-list exists!
     """
     # Check if the specified ACL exists, if not error out
     if dict_search(f'policy.access-list{version}.{access_list}', config) == None:
         raise ConfigError(f'Specified access-list{version} "{access_list}" does not exist!')
 
 def verify_pki_certificate(config: dict, cert_name: str, no_password_protected: bool=False):
     """
     Common helper function user by PKI consumers to perform recurring
     validation functions for PEM based certificates
     """
     if 'pki' not in config:
         raise ConfigError('PKI is not configured!')
 
     if 'certificate' not in config['pki']:
         raise ConfigError('PKI does not contain any certificates!')
 
     if cert_name not in config['pki']['certificate']:
         raise ConfigError(f'Certificate "{cert_name}" not found in configuration!')
 
     pki_cert = config['pki']['certificate'][cert_name]
     if 'certificate' not in pki_cert:
         raise ConfigError(f'PEM certificate for "{cert_name}" missing in configuration!')
 
     if 'private' not in pki_cert or 'key' not in pki_cert['private']:
         raise ConfigError(f'PEM private key for "{cert_name}" missing in configuration!')
 
     if no_password_protected and 'password_protected' in pki_cert['private']:
         raise ConfigError('Password protected PEM private key is not supported!')
 
 def verify_pki_ca_certificate(config: dict, ca_name: str):
     """
     Common helper function user by PKI consumers to perform recurring
     validation functions for PEM based CA certificates
     """
     if 'pki' not in config:
         raise ConfigError('PKI is not configured!')
 
     if 'ca' not in config['pki']:
         raise ConfigError('PKI does not contain any CA certificates!')
 
     if ca_name not in config['pki']['ca']:
         raise ConfigError(f'CA Certificate "{ca_name}" not found in configuration!')
 
     pki_cert = config['pki']['ca'][ca_name]
     if 'certificate' not in pki_cert:
         raise ConfigError(f'PEM CA certificate for "{cert_name}" missing in configuration!')
 
 def verify_pki_dh_parameters(config: dict, dh_name: str, min_key_size: int=0):
     """
     Common helper function user by PKI consumers to perform recurring
     validation functions on DH parameters
     """
     from vyos.pki import load_dh_parameters
 
     if 'pki' not in config:
         raise ConfigError('PKI is not configured!')
 
     if 'dh' not in config['pki']:
         raise ConfigError('PKI does not contain any DH parameters!')
 
     if dh_name not in config['pki']['dh']:
         raise ConfigError(f'DH parameter "{dh_name}" not found in configuration!')
 
     if min_key_size:
         pki_dh = config['pki']['dh'][dh_name]
         dh_params = load_dh_parameters(pki_dh['parameters'])
         dh_numbers = dh_params.parameter_numbers()
         dh_bits = dh_numbers.p.bit_length()
         if dh_bits < min_key_size:
             raise ConfigError(f'Minimum DH key-size is {min_key_size} bits!')
diff --git a/src/conf_mode/firewall.py b/src/conf_mode/firewall.py
index e96e57154..9e95d7794 100755
--- a/src/conf_mode/firewall.py
+++ b/src/conf_mode/firewall.py
@@ -1,524 +1,524 @@
 #!/usr/bin/env python3
 #
 # Copyright (C) 2021-2024 VyOS maintainers and contributors
 #
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License version 2 or later as
 # published by the Free Software Foundation.
 #
 # This program is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 # GNU General Public License for more details.
 #
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 import os
 import re
 
 from glob import glob
 from sys import exit
 
 from vyos.base import Warning
 from vyos.config import Config
 from vyos.configdict import is_node_changed
 from vyos.configdiff import get_config_diff, Diff
 from vyos.configdep import set_dependents, call_dependents
 from vyos.configverify import verify_interface_exists
 from vyos.ethtool import Ethtool
 from vyos.firewall import fqdn_config_parse
 from vyos.firewall import geoip_update
 from vyos.template import render
 from vyos.utils.dict import dict_search_args
 from vyos.utils.dict import dict_search_recursive
 from vyos.utils.process import call
 from vyos.utils.process import rc_cmd
 from vyos import ConfigError
 from vyos import airbag
 
 airbag.enable()
 
 nftables_conf = '/run/nftables.conf'
 
 sysfs_config = {
     'all_ping': {'sysfs': '/proc/sys/net/ipv4/icmp_echo_ignore_all', 'enable': '0', 'disable': '1'},
     'broadcast_ping': {'sysfs': '/proc/sys/net/ipv4/icmp_echo_ignore_broadcasts', 'enable': '0', 'disable': '1'},
     'directed_broadcast' : {'sysfs': '/proc/sys/net/ipv4/conf/all/bc_forwarding', 'enable': '1', 'disable': '0'},
     'ip_src_route': {'sysfs': '/proc/sys/net/ipv4/conf/*/accept_source_route'},
     'ipv6_receive_redirects': {'sysfs': '/proc/sys/net/ipv6/conf/*/accept_redirects'},
     'ipv6_src_route': {'sysfs': '/proc/sys/net/ipv6/conf/*/accept_source_route', 'enable': '0', 'disable': '-1'},
     'log_martians': {'sysfs': '/proc/sys/net/ipv4/conf/all/log_martians'},
     'receive_redirects': {'sysfs': '/proc/sys/net/ipv4/conf/*/accept_redirects'},
     'send_redirects': {'sysfs': '/proc/sys/net/ipv4/conf/*/send_redirects'},
     'syn_cookies': {'sysfs': '/proc/sys/net/ipv4/tcp_syncookies'},
     'twa_hazards_protection': {'sysfs': '/proc/sys/net/ipv4/tcp_rfc1337'}
 }
 
 valid_groups = [
     'address_group',
     'domain_group',
     'network_group',
     'port_group',
     'interface_group'
 ]
 
 nested_group_types = [
     'address_group', 'network_group', 'mac_group',
     'port_group', 'ipv6_address_group', 'ipv6_network_group'
 ]
 
 snmp_change_type = {
     'unknown': 0,
     'add': 1,
     'delete': 2,
     'change': 3
 }
 snmp_event_source = 1
 snmp_trap_mib = 'VYATTA-TRAP-MIB'
 snmp_trap_name = 'mgmtEventTrap'
 
 def geoip_updated(conf, firewall):
     diff = get_config_diff(conf)
     node_diff = diff.get_child_nodes_diff(['firewall'], expand_nodes=Diff.DELETE, recursive=True)
 
     out = {
         'name': [],
         'ipv6_name': [],
         'deleted_name': [],
         'deleted_ipv6_name': []
     }
     updated = False
 
     for key, path in dict_search_recursive(firewall, 'geoip'):
         set_name = f'GEOIP_CC_{path[1]}_{path[2]}_{path[4]}'
         if (path[0] == 'ipv4'):
             out['name'].append(set_name)
         elif (path[0] == 'ipv6'):
             set_name = f'GEOIP_CC6_{path[1]}_{path[2]}_{path[4]}'
             out['ipv6_name'].append(set_name)
 
         updated = True
 
     if 'delete' in node_diff:
         for key, path in dict_search_recursive(node_diff['delete'], 'geoip'):
             set_name = f'GEOIP_CC_{path[1]}_{path[2]}_{path[4]}'
             if (path[0] == 'ipv4'):
                 out['deleted_name'].append(set_name)
             elif (path[0] == 'ipv6'):
                 set_name = f'GEOIP_CC_{path[1]}_{path[2]}_{path[4]}'
                 out['deleted_ipv6_name'].append(set_name)
             updated = True
 
     if updated:
         return out
 
     return False
 
 def get_config(config=None):
     if config:
         conf = config
     else:
         conf = Config()
     base = ['firewall']
 
     firewall = conf.get_config_dict(base, key_mangling=('-', '_'),
                                     no_tag_node_value_mangle=True,
                                     get_first_key=True,
                                     with_recursive_defaults=True)
 
 
     firewall['group_resync'] = bool('group' in firewall or is_node_changed(conf, base + ['group']))
     if firewall['group_resync']:
         # Update nat and policy-route as firewall groups were updated
         set_dependents('group_resync', conf)
 
     firewall['geoip_updated'] = geoip_updated(conf, firewall)
 
     fqdn_config_parse(firewall)
 
     set_dependents('conntrack', conf)
 
     return firewall
 
 def verify_rule(firewall, rule_conf, ipv6):
     if 'action' not in rule_conf:
         raise ConfigError('Rule action must be defined')
 
     if 'jump' in rule_conf['action'] and 'jump_target' not in rule_conf:
         raise ConfigError('Action set to jump, but no jump-target specified')
 
     if 'jump_target' in rule_conf:
         if 'jump' not in rule_conf['action']:
             raise ConfigError('jump-target defined, but action jump needed and it is not defined')
         target = rule_conf['jump_target']
         if not ipv6:
             if target not in dict_search_args(firewall, 'ipv4', 'name'):
                 raise ConfigError(f'Invalid jump-target. Firewall name {target} does not exist on the system')
         else:
             if target not in dict_search_args(firewall, 'ipv6', 'name'):
                 raise ConfigError(f'Invalid jump-target. Firewall ipv6 name {target} does not exist on the system')
 
     if rule_conf['action'] == 'offload':
         if 'offload_target' not in rule_conf:
             raise ConfigError('Action set to offload, but no offload-target specified')
 
         offload_target = rule_conf['offload_target']
 
         if not dict_search_args(firewall, 'flowtable', offload_target):
             raise ConfigError(f'Invalid offload-target. Flowtable "{offload_target}" does not exist on the system')
 
     if rule_conf['action'] != 'synproxy' and 'synproxy' in rule_conf:
         raise ConfigError('"synproxy" option allowed only for action synproxy')
     if rule_conf['action'] == 'synproxy':
         if 'state' in rule_conf:
             raise ConfigError('For action "synproxy" state cannot be defined')
         if not rule_conf.get('synproxy', {}).get('tcp'):
             raise ConfigError('synproxy TCP MSS is not defined')
         if rule_conf.get('protocol', {}) != 'tcp':
             raise ConfigError('For action "synproxy" the protocol must be set to TCP')
 
     if 'queue_options' in rule_conf:
         if 'queue' not in rule_conf['action']:
             raise ConfigError('queue-options defined, but action queue needed and it is not defined')
         if 'fanout' in rule_conf['queue_options'] and ('queue' not in rule_conf or '-' not in rule_conf['queue']):
             raise ConfigError('queue-options fanout defined, then queue needs to be defined as a range')
 
     if 'queue' in rule_conf and 'queue' not in rule_conf['action']:
         raise ConfigError('queue defined, but action queue needed and it is not defined')
 
     if 'fragment' in rule_conf:
         if {'match_frag', 'match_non_frag'} <= set(rule_conf['fragment']):
             raise ConfigError('Cannot specify both "match-frag" and "match-non-frag"')
 
     if 'limit' in rule_conf:
         if 'rate' in rule_conf['limit']:
             rate_int = re.sub(r'\D', '', rule_conf['limit']['rate'])
             if int(rate_int) < 1:
                 raise ConfigError('Limit rate integer cannot be less than 1')
 
     if 'ipsec' in rule_conf:
         if {'match_ipsec', 'match_non_ipsec'} <= set(rule_conf['ipsec']):
             raise ConfigError('Cannot specify both "match-ipsec" and "match-non-ipsec"')
 
     if 'recent' in rule_conf:
         if not {'count', 'time'} <= set(rule_conf['recent']):
             raise ConfigError('Recent "count" and "time" values must be defined')
 
     tcp_flags = dict_search_args(rule_conf, 'tcp', 'flags')
     if tcp_flags:
         if dict_search_args(rule_conf, 'protocol') != 'tcp':
             raise ConfigError('Protocol must be tcp when specifying tcp flags')
 
         not_flags = dict_search_args(rule_conf, 'tcp', 'flags', 'not')
         if not_flags:
             duplicates = [flag for flag in tcp_flags if flag in not_flags]
             if duplicates:
                 raise ConfigError(f'Cannot match a tcp flag as set and not set')
 
     if 'protocol' in rule_conf:
         if rule_conf['protocol'] == 'icmp' and ipv6:
             raise ConfigError(f'Cannot match IPv4 ICMP protocol on IPv6, use ipv6-icmp')
         if rule_conf['protocol'] == 'ipv6-icmp' and not ipv6:
             raise ConfigError(f'Cannot match IPv6 ICMP protocol on IPv4, use icmp')
 
     for side in ['destination', 'source']:
         if side in rule_conf:
             side_conf = rule_conf[side]
 
             if len({'address', 'fqdn', 'geoip'} & set(side_conf)) > 1:
                 raise ConfigError('Only one of address, fqdn or geoip can be specified')
 
             if 'group' in side_conf:
                 if len({'address_group', 'network_group', 'domain_group'} & set(side_conf['group'])) > 1:
                     raise ConfigError('Only one address-group, network-group or domain-group can be specified')
 
                 for group in valid_groups:
                     if group in side_conf['group']:
                         group_name = side_conf['group'][group]
 
                         fw_group = f'ipv6_{group}' if ipv6 and group in ['address_group', 'network_group'] else group
                         error_group = fw_group.replace("_", "-")
 
                         if group in ['address_group', 'network_group', 'domain_group']:
                             types = [t for t in ['address', 'fqdn', 'geoip'] if t in side_conf]
                             if types:
                                 raise ConfigError(f'{error_group} and {types[0]} cannot both be defined')
 
                         if group_name and group_name[0] == '!':
                             group_name = group_name[1:]
 
                         group_obj = dict_search_args(firewall, 'group', fw_group, group_name)
 
                         if group_obj is None:
                             raise ConfigError(f'Invalid {error_group} "{group_name}" on firewall rule')
 
                         if not group_obj:
                             Warning(f'{error_group} "{group_name}" has no members!')
 
             if 'port' in side_conf or dict_search_args(side_conf, 'group', 'port_group'):
                 if 'protocol' not in rule_conf:
                     raise ConfigError('Protocol must be defined if specifying a port or port-group')
 
                 if rule_conf['protocol'] not in ['tcp', 'udp', 'tcp_udp']:
                     raise ConfigError('Protocol must be tcp, udp, or tcp_udp when specifying a port or port-group')
 
             if 'port' in side_conf and dict_search_args(side_conf, 'group', 'port_group'):
                 raise ConfigError(f'{side} port-group and port cannot both be defined')
 
     if 'add_address_to_group' in rule_conf:
         for type in ['destination_address', 'source_address']:
             if type in rule_conf['add_address_to_group']:
                 if 'address_group' not in rule_conf['add_address_to_group'][type]:
                     raise ConfigError(f'Dynamic address group must be defined.')
                 else:
                     target = rule_conf['add_address_to_group'][type]['address_group']
                     fwall_group = 'ipv6_address_group' if ipv6 else 'address_group'
                     group_obj = dict_search_args(firewall, 'group', 'dynamic_group', fwall_group, target)
                     if group_obj is None:
                             raise ConfigError(f'Invalid dynamic address group on firewall rule')
 
     if 'log_options' in rule_conf:
         if 'log' not in rule_conf:
             raise ConfigError('log-options defined, but log is not enable')
 
         if 'snapshot_length' in rule_conf['log_options'] and 'group' not in rule_conf['log_options']:
             raise ConfigError('log-options snapshot-length defined, but log group is not define')
 
         if 'queue_threshold' in rule_conf['log_options'] and 'group' not in rule_conf['log_options']:
             raise ConfigError('log-options queue-threshold defined, but log group is not define')
 
     for direction in ['inbound_interface','outbound_interface']:
         if direction in rule_conf:
             if 'name' in rule_conf[direction] and 'group' in rule_conf[direction]:
                 raise ConfigError(f'Cannot specify both interface group and interface name for {direction}')
             if 'group' in rule_conf[direction]:
                 group_name = rule_conf[direction]['group']
                 if group_name[0] == '!':
                     group_name = group_name[1:]
                 group_obj = dict_search_args(firewall, 'group', 'interface_group', group_name)
                 if group_obj is None:
                     raise ConfigError(f'Invalid interface group "{group_name}" on firewall rule')
                 if not group_obj:
                     Warning(f'interface-group "{group_name}" has no members!')
 
 def verify_nested_group(group_name, group, groups, seen):
     if 'include' not in group:
         return
 
     seen.append(group_name)
 
     for g in group['include']:
         if g not in groups:
             raise ConfigError(f'Nested group "{g}" does not exist')
 
         if g in seen:
             raise ConfigError(f'Group "{group_name}" has a circular reference')
 
         if 'include' in groups[g]:
             verify_nested_group(g, groups[g], groups, seen)
 
 def verify_hardware_offload(ifname):
     ethtool = Ethtool(ifname)
     enabled, fixed = ethtool.get_hw_tc_offload()
 
     if not enabled and fixed:
         raise ConfigError(f'Interface "{ifname}" does not support hardware offload')
 
     if not enabled:
         raise ConfigError(f'Interface "{ifname}" requires "offload hw-tc-offload"')
 
 def verify(firewall):
     if 'flowtable' in firewall:
         for flowtable, flowtable_conf in firewall['flowtable'].items():
             if 'interface' not in flowtable_conf:
                 raise ConfigError(f'Flowtable "{flowtable}" requires at least one interface')
 
             for ifname in flowtable_conf['interface']:
-                verify_interface_exists(ifname)
+                verify_interface_exists(firewall, ifname)
 
             if dict_search_args(flowtable_conf, 'offload') == 'hardware':
                 interfaces = flowtable_conf['interface']
 
                 for ifname in interfaces:
                     verify_hardware_offload(ifname)
 
     if 'group' in firewall:
         for group_type in nested_group_types:
             if group_type in firewall['group']:
                 groups = firewall['group'][group_type]
                 for group_name, group in groups.items():
                     verify_nested_group(group_name, group, groups, [])
 
     if 'ipv4' in firewall:
         for name in ['name','forward','input','output']:
             if name in firewall['ipv4']:
                 for name_id, name_conf in firewall['ipv4'][name].items():
                     if 'jump' in name_conf['default_action'] and 'default_jump_target' not in name_conf:
                         raise ConfigError('default-action set to jump, but no default-jump-target specified')
                     if 'default_jump_target' in name_conf:
                         target = name_conf['default_jump_target']
                         if 'jump' not in name_conf['default_action']:
                             raise ConfigError('default-jump-target defined, but default-action jump needed and it is not defined')
                         if name_conf['default_jump_target'] == name_id:
                             raise ConfigError(f'Loop detected on default-jump-target.')
                         ## Now need to check that default-jump-target exists (other firewall chain/name)
                         if target not in dict_search_args(firewall['ipv4'], 'name'):
                             raise ConfigError(f'Invalid jump-target. Firewall name {target} does not exist on the system')
 
                     if 'rule' in name_conf:
                         for rule_id, rule_conf in name_conf['rule'].items():
                             verify_rule(firewall, rule_conf, False)
 
     if 'ipv6' in firewall:
         for name in ['name','forward','input','output']:
             if name in firewall['ipv6']:
                 for name_id, name_conf in firewall['ipv6'][name].items():
                     if 'jump' in name_conf['default_action'] and 'default_jump_target' not in name_conf:
                         raise ConfigError('default-action set to jump, but no default-jump-target specified')
                     if 'default_jump_target' in name_conf:
                         target = name_conf['default_jump_target']
                         if 'jump' not in name_conf['default_action']:
                             raise ConfigError('default-jump-target defined, but default-action jump needed and it is not defined')
                         if name_conf['default_jump_target'] == name_id:
                             raise ConfigError(f'Loop detected on default-jump-target.')
                         ## Now need to check that default-jump-target exists (other firewall chain/name)
                         if target not in dict_search_args(firewall['ipv6'], 'name'):
                             raise ConfigError(f'Invalid jump-target. Firewall name {target} does not exist on the system')
 
                     if 'rule' in name_conf:
                         for rule_id, rule_conf in name_conf['rule'].items():
                             verify_rule(firewall, rule_conf, True)
 
     #### ZONESSSS
     local_zone = False
     zone_interfaces = []
 
     if 'zone' in firewall:
         for zone, zone_conf in firewall['zone'].items():
             if 'local_zone' not in zone_conf and 'interface' not in zone_conf:
                 raise ConfigError(f'Zone "{zone}" has no interfaces and is not the local zone')
 
             if 'local_zone' in zone_conf:
                 if local_zone:
                     raise ConfigError('There cannot be multiple local zones')
                 if 'interface' in zone_conf:
                     raise ConfigError('Local zone cannot have interfaces assigned')
                 if 'intra_zone_filtering' in zone_conf:
                     raise ConfigError('Local zone cannot use intra-zone-filtering')
                 local_zone = True
 
             if 'interface' in zone_conf:
                 found_duplicates = [intf for intf in zone_conf['interface'] if intf in zone_interfaces]
 
                 if found_duplicates:
                     raise ConfigError(f'Interfaces cannot be assigned to multiple zones')
 
                 zone_interfaces += zone_conf['interface']
 
             if 'intra_zone_filtering' in zone_conf:
                 intra_zone = zone_conf['intra_zone_filtering']
 
                 if len(intra_zone) > 1:
                     raise ConfigError('Only one intra-zone-filtering action must be specified')
 
                 if 'firewall' in intra_zone:
                     v4_name = dict_search_args(intra_zone, 'firewall', 'name')
                     if v4_name and not dict_search_args(firewall, 'ipv4', 'name', v4_name):
                         raise ConfigError(f'Firewall name "{v4_name}" does not exist')
 
                     v6_name = dict_search_args(intra_zone, 'firewall', 'ipv6_name')
                     if v6_name and not dict_search_args(firewall, 'ipv6', 'name', v6_name):
                         raise ConfigError(f'Firewall ipv6-name "{v6_name}" does not exist')
 
                     if not v4_name and not v6_name:
                         raise ConfigError('No firewall names specified for intra-zone-filtering')
 
             if 'from' in zone_conf:
                 for from_zone, from_conf in zone_conf['from'].items():
                     if from_zone not in firewall['zone']:
                         raise ConfigError(f'Zone "{zone}" refers to a non-existent or deleted zone "{from_zone}"')
 
                     v4_name = dict_search_args(from_conf, 'firewall', 'name')
                     if v4_name and not dict_search_args(firewall, 'ipv4', 'name', v4_name):
                         raise ConfigError(f'Firewall name "{v4_name}" does not exist')
 
                     v6_name = dict_search_args(from_conf, 'firewall', 'ipv6_name')
                     if v6_name and not dict_search_args(firewall, 'ipv6', 'name', v6_name):
                         raise ConfigError(f'Firewall ipv6-name "{v6_name}" does not exist')
 
     return None
 
 def generate(firewall):
     if not os.path.exists(nftables_conf):
         firewall['first_install'] = True
 
     if 'zone' in firewall:
         for local_zone, local_zone_conf in firewall['zone'].items():
             if 'local_zone' not in local_zone_conf:
                 continue
 
             local_zone_conf['from_local'] = {}
 
             for zone, zone_conf in firewall['zone'].items():
                 if zone == local_zone or 'from' not in zone_conf:
                     continue
                 if local_zone in zone_conf['from']:
                     local_zone_conf['from_local'][zone] = zone_conf['from'][local_zone]
 
     render(nftables_conf, 'firewall/nftables.j2', firewall)
     return None
 
 def apply_sysfs(firewall):
     for name, conf in sysfs_config.items():
         paths = glob(conf['sysfs'])
         value = None
 
         if name in firewall['global_options']:
             conf_value = firewall['global_options'][name]
             if conf_value in conf:
                 value = conf[conf_value]
             elif conf_value == 'enable':
                 value = '1'
             elif conf_value == 'disable':
                 value = '0'
 
         if value:
             for path in paths:
                 with open(path, 'w') as f:
                     f.write(value)
 
 def apply(firewall):
     install_result, output = rc_cmd(f'nft --file {nftables_conf}')
     if install_result == 1:
         raise ConfigError(f'Failed to apply firewall: {output}')
 
     apply_sysfs(firewall)
 
     call_dependents()
 
     # T970 Enable a resolver (systemd daemon) that checks
     # domain-group/fqdn addresses and update entries for domains by timeout
     # If router loaded without internet connection or for synchronization
     domain_action = 'stop'
     if dict_search_args(firewall, 'group', 'domain_group') or firewall['ip_fqdn'] or firewall['ip6_fqdn']:
         domain_action = 'restart'
     call(f'systemctl {domain_action} vyos-domain-resolver.service')
 
     if firewall['geoip_updated']:
         # Call helper script to Update set contents
         if 'name' in firewall['geoip_updated'] or 'ipv6_name' in firewall['geoip_updated']:
             print('Updating GeoIP. Please wait...')
             geoip_update(firewall)
 
     return None
 
 if __name__ == '__main__':
     try:
         c = get_config()
         verify(c)
         generate(c)
         apply(c)
     except ConfigError as e:
         print(e)
         exit(1)
diff --git a/src/conf_mode/interfaces_ethernet.py b/src/conf_mode/interfaces_ethernet.py
index 54d0669cb..afc48ead8 100755
--- a/src/conf_mode/interfaces_ethernet.py
+++ b/src/conf_mode/interfaces_ethernet.py
@@ -1,436 +1,436 @@
 #!/usr/bin/env python3
 #
 # Copyright (C) 2019-2024 VyOS maintainers and contributors
 #
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License version 2 or later as
 # published by the Free Software Foundation.
 #
 # This program is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 # GNU General Public License for more details.
 #
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 import os
 
 from sys import exit
 
 from vyos.base import Warning
 from vyos.config import Config
 from vyos.configdict import get_interface_dict
 from vyos.configdict import is_node_changed
 from vyos.configverify import verify_address
 from vyos.configverify import verify_dhcpv6
 from vyos.configverify import verify_interface_exists
 from vyos.configverify import verify_mirror_redirect
 from vyos.configverify import verify_mtu
 from vyos.configverify import verify_mtu_ipv6
 from vyos.configverify import verify_vlan_config
 from vyos.configverify import verify_vrf
 from vyos.configverify import verify_bond_bridge_member
 from vyos.configverify import verify_pki_certificate
 from vyos.configverify import verify_pki_ca_certificate
 from vyos.ethtool import Ethtool
 from vyos.ifconfig import EthernetIf
 from vyos.ifconfig import BondIf
 from vyos.pki import find_chain
 from vyos.pki import encode_certificate
 from vyos.pki import load_certificate
 from vyos.pki import wrap_private_key
 from vyos.template import render
 from vyos.template import render_to_string
 from vyos.utils.process import call
 from vyos.utils.dict import dict_search
 from vyos.utils.dict import dict_to_paths_values
 from vyos.utils.dict import dict_set
 from vyos.utils.dict import dict_delete
 from vyos.utils.file import write_file
 from vyos import ConfigError
 from vyos import frr
 from vyos import airbag
 airbag.enable()
 
 # XXX: wpa_supplicant works on the source interface
 cfg_dir = '/run/wpa_supplicant'
 wpa_suppl_conf = '/run/wpa_supplicant/{ifname}.conf'
 
 def update_bond_options(conf: Config, eth_conf: dict) -> list:
     """
     Return list of blocked options if interface is a bond member
     :param conf: Config object
     :type conf: Config
     :param eth_conf: Ethernet config dictionary
     :type eth_conf: dict
     :return: List of blocked options
     :rtype: list
     """
     blocked_list = []
     bond_name = list(eth_conf['is_bond_member'].keys())[0]
     config_without_defaults = conf.get_config_dict(
         ['interfaces', 'ethernet', eth_conf['ifname']],
         key_mangling=('-', '_'),
         get_first_key=True,
         no_tag_node_value_mangle=True,
         with_defaults=False,
         with_recursive_defaults=False)
     config_with_defaults = conf.get_config_dict(
         ['interfaces', 'ethernet', eth_conf['ifname']],
         key_mangling=('-', '_'),
         get_first_key=True,
         no_tag_node_value_mangle=True,
         with_defaults=True,
         with_recursive_defaults=True)
     bond_config_with_defaults = conf.get_config_dict(
         ['interfaces', 'bonding', bond_name],
         key_mangling=('-', '_'),
         get_first_key=True,
         no_tag_node_value_mangle=True,
         with_defaults=True,
         with_recursive_defaults=True)
     eth_dict_paths = dict_to_paths_values(config_without_defaults)
     eth_path_base = ['interfaces', 'ethernet', eth_conf['ifname']]
 
     #if option is configured under ethernet section
     for option_path, option_value in eth_dict_paths.items():
         bond_option_value = dict_search(option_path, bond_config_with_defaults)
 
         #If option is allowed for changing then continue
         if option_path in EthernetIf.get_bond_member_allowed_options():
             continue
         # if option is inherited from bond then set valued from bond interface
         if option_path in BondIf.get_inherit_bond_options():
             # If option equals to bond option then do nothing
             if option_value == bond_option_value:
                 continue
             else:
                 # if ethernet has option and bond interface has
                 # then copy it from bond
                 if bond_option_value is not None:
                     if is_node_changed(conf, eth_path_base + option_path.split('.')):
                         Warning(
                             f'Cannot apply "{option_path.replace(".", " ")}" to "{option_value}".' \
                             f' Interface "{eth_conf["ifname"]}" is a bond member.' \
                             f' Option is inherited from bond "{bond_name}"')
                     dict_set(option_path, bond_option_value, eth_conf)
                     continue
                 # if ethernet has option and bond interface does not have
                 # then delete it form dict and do not apply it
                 else:
                     if is_node_changed(conf, eth_path_base + option_path.split('.')):
                         Warning(
                             f'Cannot apply "{option_path.replace(".", " ")}".' \
                             f' Interface "{eth_conf["ifname"]}" is a bond member.' \
                             f' Option is inherited from bond "{bond_name}"')
                     dict_delete(option_path, eth_conf)
         blocked_list.append(option_path)
 
     # if inherited option is not configured under ethernet section but configured under bond section
     for option_path in BondIf.get_inherit_bond_options():
         bond_option_value = dict_search(option_path, bond_config_with_defaults)
         if bond_option_value is not None:
             if option_path not in eth_dict_paths:
                 if is_node_changed(conf, eth_path_base + option_path.split('.')):
                     Warning(
                         f'Cannot apply "{option_path.replace(".", " ")}" to "{dict_search(option_path, config_with_defaults)}".' \
                         f' Interface "{eth_conf["ifname"]}" is a bond member. ' \
                         f'Option is inherited from bond "{bond_name}"')
                 dict_set(option_path, bond_option_value, eth_conf)
     eth_conf['bond_blocked_changes'] = blocked_list
     return None
 
 def get_config(config=None):
     """
     Retrive CLI config as dictionary. Dictionary can never be empty, as at least the
     interface name will be added or a deleted flag
     """
     if config:
         conf = config
     else:
         conf = Config()
 
     base = ['interfaces', 'ethernet']
     ifname, ethernet = get_interface_dict(conf, base, with_pki=True)
 
     # T5862 - default MTU is not acceptable in some environments
     # There are cloud environments available where the maximum supported
     # ethernet MTU is e.g. 1450 bytes, thus we clamp this to the adapters
     # maximum MTU value or 1500 bytes - whatever is lower
     if 'mtu' not in ethernet:
         try:
             ethernet['mtu'] = '1500'
             max_mtu = EthernetIf(ifname).get_max_mtu()
             if max_mtu < int(ethernet['mtu']):
                 ethernet['mtu'] = str(max_mtu)
         except:
             pass
 
     if 'is_bond_member' in ethernet:
         update_bond_options(conf, ethernet)
 
     tmp = is_node_changed(conf, base + [ifname, 'speed'])
     if tmp: ethernet.update({'speed_duplex_changed': {}})
 
     tmp = is_node_changed(conf, base + [ifname, 'duplex'])
     if tmp: ethernet.update({'speed_duplex_changed': {}})
 
     return ethernet
 
 def verify_speed_duplex(ethernet: dict, ethtool: Ethtool):
     """
      Verify speed and duplex
     :param ethernet: dictionary which is received from get_interface_dict
     :type ethernet: dict
     :param ethtool: Ethernet object
     :type ethtool: Ethtool
     """
     if ((ethernet['speed'] == 'auto' and ethernet['duplex'] != 'auto') or
             (ethernet['speed'] != 'auto' and ethernet['duplex'] == 'auto')):
         raise ConfigError(
             'Speed/Duplex missmatch. Must be both auto or manually configured')
 
     if ethernet['speed'] != 'auto' and ethernet['duplex'] != 'auto':
         # We need to verify if the requested speed and duplex setting is
         # supported by the underlaying NIC.
         speed = ethernet['speed']
         duplex = ethernet['duplex']
         if not ethtool.check_speed_duplex(speed, duplex):
             raise ConfigError(
                 f'Adapter does not support changing speed ' \
                 f'and duplex settings to: {speed}/{duplex}!')
 
 
 def verify_flow_control(ethernet: dict, ethtool: Ethtool):
     """
      Verify flow control
     :param ethernet: dictionary which is received from get_interface_dict
     :type ethernet: dict
     :param ethtool: Ethernet object
     :type ethtool: Ethtool
     """
     if 'disable_flow_control' in ethernet:
         if not ethtool.check_flow_control():
             raise ConfigError(
                 'Adapter does not support changing flow-control settings!')
 
 
 def verify_ring_buffer(ethernet: dict, ethtool: Ethtool):
     """
      Verify ring buffer
     :param ethernet: dictionary which is received from get_interface_dict
     :type ethernet: dict
     :param ethtool: Ethernet object
     :type ethtool: Ethtool
     """
     if 'ring_buffer' in ethernet:
         max_rx = ethtool.get_ring_buffer_max('rx')
         if not max_rx:
             raise ConfigError(
                 'Driver does not support RX ring-buffer configuration!')
 
         max_tx = ethtool.get_ring_buffer_max('tx')
         if not max_tx:
             raise ConfigError(
                 'Driver does not support TX ring-buffer configuration!')
 
         rx = dict_search('ring_buffer.rx', ethernet)
         if rx and int(rx) > int(max_rx):
             raise ConfigError(f'Driver only supports a maximum RX ring-buffer ' \
                               f'size of "{max_rx}" bytes!')
 
         tx = dict_search('ring_buffer.tx', ethernet)
         if tx and int(tx) > int(max_tx):
             raise ConfigError(f'Driver only supports a maximum TX ring-buffer ' \
                               f'size of "{max_tx}" bytes!')
 
 
 def verify_offload(ethernet: dict, ethtool: Ethtool):
     """
      Verify offloading capabilities
     :param ethernet: dictionary which is received from get_interface_dict
     :type ethernet: dict
     :param ethtool: Ethernet object
     :type ethtool: Ethtool
     """
     if dict_search('offload.rps', ethernet) != None:
         if not os.path.exists(f'/sys/class/net/{ethernet["ifname"]}/queues/rx-0/rps_cpus'):
             raise ConfigError('Interface does not suport RPS!')
     driver = ethtool.get_driver_name()
     # T3342 - Xen driver requires special treatment
     if driver == 'vif':
         if int(ethernet['mtu']) > 1500 and dict_search('offload.sg', ethernet) == None:
             raise ConfigError('Xen netback drivers requires scatter-gatter offloading '\
                               'for MTU size larger then 1500 bytes')
 
 
 def verify_allowedbond_changes(ethernet: dict):
     """
      Verify changed options if interface is in bonding
     :param ethernet: dictionary which is received from get_interface_dict
     :type ethernet: dict
     """
     if 'bond_blocked_changes' in ethernet:
         for option in ethernet['bond_blocked_changes']:
             raise ConfigError(f'Cannot configure "{option.replace(".", " ")}"' \
                               f' on interface "{ethernet["ifname"]}".' \
                               f' Interface is a bond member')
 
 def verify_eapol(ethernet: dict):
     """
     Common helper function used by interface implementations to perform
     recurring validation of EAPoL configuration.
     """
     if 'eapol' not in ethernet:
         return
 
     if 'certificate' not in ethernet['eapol']:
         raise ConfigError('Certificate must be specified when using EAPoL!')
 
     verify_pki_certificate(ethernet, ethernet['eapol']['certificate'], no_password_protected=True)
 
     if 'ca_certificate' in ethernet['eapol']:
         for ca_cert in ethernet['eapol']['ca_certificate']:
             verify_pki_ca_certificate(ethernet, ca_cert)
 
 def verify(ethernet):
     if 'deleted' in ethernet:
         return None
     if 'is_bond_member' in ethernet:
         verify_bond_member(ethernet)
     else:
         verify_ethernet(ethernet)
 
 
 def verify_bond_member(ethernet):
     """
      Verification function for ethernet interface which is in bonding
     :param ethernet: dictionary which is received from get_interface_dict
     :type ethernet: dict
     """
     ifname = ethernet['ifname']
-    verify_interface_exists(ifname)
+    verify_interface_exists(ethernet, ifname)
     verify_eapol(ethernet)
     verify_mirror_redirect(ethernet)
     ethtool = Ethtool(ifname)
     verify_speed_duplex(ethernet, ethtool)
     verify_flow_control(ethernet, ethtool)
     verify_ring_buffer(ethernet, ethtool)
     verify_offload(ethernet, ethtool)
     verify_allowedbond_changes(ethernet)
 
 def verify_ethernet(ethernet):
     """
      Verification function for simple ethernet interface
     :param ethernet: dictionary which is received from get_interface_dict
     :type ethernet: dict
     """
     ifname = ethernet['ifname']
-    verify_interface_exists(ifname)
+    verify_interface_exists(ethernet, ifname)
     verify_mtu(ethernet)
     verify_mtu_ipv6(ethernet)
     verify_dhcpv6(ethernet)
     verify_address(ethernet)
     verify_vrf(ethernet)
     verify_bond_bridge_member(ethernet)
     verify_eapol(ethernet)
     verify_mirror_redirect(ethernet)
     ethtool = Ethtool(ifname)
     # No need to check speed and duplex keys as both have default values.
     verify_speed_duplex(ethernet, ethtool)
     verify_flow_control(ethernet, ethtool)
     verify_ring_buffer(ethernet, ethtool)
     verify_offload(ethernet, ethtool)
     # use common function to verify VLAN configuration
     verify_vlan_config(ethernet)
     return None
 
 
 def generate(ethernet):
     # render real configuration file once
     wpa_supplicant_conf = wpa_suppl_conf.format(**ethernet)
 
     if 'deleted' in ethernet:
         # delete configuration on interface removal
         if os.path.isfile(wpa_supplicant_conf):
             os.unlink(wpa_supplicant_conf)
         return None
 
     if 'eapol' in ethernet:
         ifname = ethernet['ifname']
 
         render(wpa_supplicant_conf, 'ethernet/wpa_supplicant.conf.j2', ethernet)
 
         cert_file_path = os.path.join(cfg_dir, f'{ifname}_cert.pem')
         cert_key_path = os.path.join(cfg_dir, f'{ifname}_cert.key')
 
         cert_name = ethernet['eapol']['certificate']
         pki_cert = ethernet['pki']['certificate'][cert_name]
 
         loaded_pki_cert = load_certificate(pki_cert['certificate'])
         loaded_ca_certs = {load_certificate(c['certificate'])
             for c in ethernet['pki']['ca'].values()} if 'ca' in ethernet['pki'] else {}
 
         cert_full_chain = find_chain(loaded_pki_cert, loaded_ca_certs)
 
         write_file(cert_file_path,
                    '\n'.join(encode_certificate(c) for c in cert_full_chain))
         write_file(cert_key_path, wrap_private_key(pki_cert['private']['key']))
 
         if 'ca_certificate' in ethernet['eapol']:
             ca_cert_file_path = os.path.join(cfg_dir, f'{ifname}_ca.pem')
             ca_chains = []
 
             for ca_cert_name in ethernet['eapol']['ca_certificate']:
                 pki_ca_cert = ethernet['pki']['ca'][ca_cert_name]
                 loaded_ca_cert = load_certificate(pki_ca_cert['certificate'])
                 ca_full_chain = find_chain(loaded_ca_cert, loaded_ca_certs)
                 ca_chains.append(
                     '\n'.join(encode_certificate(c) for c in ca_full_chain))
 
             write_file(ca_cert_file_path, '\n'.join(ca_chains))
 
     ethernet['frr_zebra_config'] = ''
     if 'deleted' not in ethernet:
         ethernet['frr_zebra_config'] = render_to_string('frr/evpn.mh.frr.j2', ethernet)
 
     return None
 
 def apply(ethernet):
     ifname = ethernet['ifname']
     # take care about EAPoL supplicant daemon
     eapol_action='stop'
 
     e = EthernetIf(ifname)
     if 'deleted' in ethernet:
         # delete interface
         e.remove()
     else:
         e.update(ethernet)
         if 'eapol' in ethernet:
             eapol_action='reload-or-restart'
 
     call(f'systemctl {eapol_action} wpa_supplicant-wired@{ifname}')
 
     zebra_daemon = 'zebra'
     # Save original configuration prior to starting any commit actions
     frr_cfg = frr.FRRConfig()
 
     # The route-map used for the FIB (zebra) is part of the zebra daemon
     frr_cfg.load_configuration(zebra_daemon)
     frr_cfg.modify_section(f'^interface {ifname}', stop_pattern='^exit', remove_stop_mark=True)
     if 'frr_zebra_config' in ethernet:
         frr_cfg.add_before(frr.default_add_before, ethernet['frr_zebra_config'])
     frr_cfg.commit_configuration(zebra_daemon)
 
 if __name__ == '__main__':
     try:
         c = get_config()
         verify(c)
         generate(c)
 
         apply(c)
     except ConfigError as e:
         print(e)
         exit(1)
diff --git a/src/conf_mode/interfaces_wwan.py b/src/conf_mode/interfaces_wwan.py
index 2515dc838..230eb14d6 100755
--- a/src/conf_mode/interfaces_wwan.py
+++ b/src/conf_mode/interfaces_wwan.py
@@ -1,189 +1,189 @@
 #!/usr/bin/env python3
 #
 # Copyright (C) 2020-2022 VyOS maintainers and contributors
 #
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License version 2 or later as
 # published by the Free Software Foundation.
 #
 # This program is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 # GNU General Public License for more details.
 #
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 import os
 
 from sys import exit
 from time import sleep
 
 from vyos.config import Config
 from vyos.configdict import get_interface_dict
 from vyos.configdict import is_node_changed
 from vyos.configverify import verify_authentication
 from vyos.configverify import verify_interface_exists
 from vyos.configverify import verify_mirror_redirect
 from vyos.configverify import verify_vrf
 from vyos.ifconfig import WWANIf
 from vyos.utils.dict import dict_search
 from vyos.utils.process import cmd
 from vyos.utils.process import call
 from vyos.utils.process import DEVNULL
 from vyos.utils.process import is_systemd_service_active
 from vyos.utils.file import write_file
 from vyos import ConfigError
 from vyos import airbag
 airbag.enable()
 
 service_name = 'ModemManager.service'
 cron_script = '/etc/cron.d/vyos-wwan'
 
 def get_config(config=None):
     """
     Retrive CLI config as dictionary. Dictionary can never be empty, as at least the
     interface name will be added or a deleted flag
     """
     if config:
         conf = config
     else:
         conf = Config()
     base = ['interfaces', 'wwan']
     ifname, wwan = get_interface_dict(conf, base)
 
     # We should only terminate the WWAN session if critical parameters change.
     # All parameters that can be changed on-the-fly (like interface description)
     # should not lead to a reconnect!
     tmp = is_node_changed(conf, base + [ifname, 'address'])
     if tmp: wwan.update({'shutdown_required': {}})
 
     tmp = is_node_changed(conf, base + [ifname, 'apn'])
     if tmp: wwan.update({'shutdown_required': {}})
 
     tmp = is_node_changed(conf, base + [ifname, 'disable'])
     if tmp: wwan.update({'shutdown_required': {}})
 
     tmp = is_node_changed(conf, base + [ifname, 'vrf'])
     if tmp: wwan.update({'shutdown_required': {}})
 
     tmp = is_node_changed(conf, base + [ifname, 'authentication'])
     if tmp: wwan.update({'shutdown_required': {}})
 
     tmp = is_node_changed(conf, base + [ifname, 'ipv6', 'address', 'autoconf'])
     if tmp: wwan.update({'shutdown_required': {}})
 
     # We need to know the amount of other WWAN interfaces as ModemManager needs
     # to be started or stopped.
     wwan['other_interfaces'] = conf.get_config_dict([], key_mangling=('-', '_'),
                                                        get_first_key=True,
                                                        no_tag_node_value_mangle=True)
 
     # This if-clause is just to be sure - it will always evaluate to true
     if ifname in wwan['other_interfaces']:
         del wwan['other_interfaces'][ifname]
     if len(wwan['other_interfaces']) == 0:
         del wwan['other_interfaces']
 
     return wwan
 
 def verify(wwan):
     if 'deleted' in wwan:
         return None
 
     ifname = wwan['ifname']
     if not 'apn' in wwan:
         raise ConfigError(f'No APN configured for "{ifname}"!')
 
-    verify_interface_exists(ifname)
+    verify_interface_exists(wwan, ifname)
     verify_authentication(wwan)
     verify_vrf(wwan)
     verify_mirror_redirect(wwan)
 
     return None
 
 def generate(wwan):
     if 'deleted' in wwan:
         # We are the last WWAN interface - there are no other ones remaining
         # thus the cronjob needs to go away, too
         if 'other_interfaces' not in wwan:
             if os.path.exists(cron_script):
                 os.unlink(cron_script)
         return None
 
     # Install cron triggered helper script to re-dial WWAN interfaces on
     # disconnect - e.g. happens during RF signal loss. The script watches every
     # WWAN interface - so there is only one instance.
     if not os.path.exists(cron_script):
         write_file(cron_script, '*/5 * * * * root /usr/libexec/vyos/vyos-check-wwan.py\n')
 
     return None
 
 def apply(wwan):
     # ModemManager is required to dial WWAN connections - one instance is
     # required to serve all modems. Activate ModemManager on first invocation
     # of any WWAN interface.
     if not is_systemd_service_active(service_name):
         cmd(f'systemctl start {service_name}')
 
         counter = 100
         # Wait until a modem is detected and then we can continue
         while counter > 0:
             counter -= 1
             tmp = cmd('mmcli -L')
             if tmp != 'No modems were found':
                 break
             sleep(0.250)
 
     if 'shutdown_required' in wwan:
         # we only need the modem number. wwan0 -> 0, wwan1 -> 1
         modem = wwan['ifname'].lstrip('wwan')
         base_cmd = f'mmcli --modem {modem}'
         # Number of bearers is limited - always disconnect first
         cmd(f'{base_cmd} --simple-disconnect')
 
     w = WWANIf(wwan['ifname'])
     if 'deleted' in wwan or 'disable' in wwan:
         w.remove()
 
         # We are the last WWAN interface - there are no other WWAN interfaces
         # remaining, thus we can stop ModemManager and free resources.
         if 'other_interfaces' not in wwan:
             cmd(f'systemctl stop {service_name}')
             # Clean CRON helper script which is used for to re-connect when
             # RF signal is lost
             if os.path.exists(cron_script):
                 os.unlink(cron_script)
 
         return None
 
     if 'shutdown_required' in wwan:
         ip_type = 'ipv4'
         slaac = dict_search('ipv6.address.autoconf', wwan) != None
         if 'address' in wwan:
             if 'dhcp' in wwan['address'] and ('dhcpv6' in wwan['address'] or slaac):
                 ip_type = 'ipv4v6'
             elif 'dhcpv6' in wwan['address'] or slaac:
                 ip_type = 'ipv6'
             elif 'dhcp' in wwan['address']:
                 ip_type = 'ipv4'
 
         options = f'ip-type={ip_type},apn=' + wwan['apn']
         if 'authentication' in wwan:
             options += ',user={username},password={password}'.format(**wwan['authentication'])
 
         command = f'{base_cmd} --simple-connect="{options}"'
         call(command, stdout=DEVNULL)
 
     w.update(wwan)
     return None
 
 if __name__ == '__main__':
     try:
         c = get_config()
         verify(c)
         generate(c)
         apply(c)
     except ConfigError as e:
         print(e)
         exit(1)
diff --git a/src/conf_mode/policy_local-route.py b/src/conf_mode/policy_local-route.py
index f458f4e82..331fd972d 100755
--- a/src/conf_mode/policy_local-route.py
+++ b/src/conf_mode/policy_local-route.py
@@ -1,310 +1,310 @@
 #!/usr/bin/env python3
 #
 # Copyright (C) 2020-2024 VyOS maintainers and contributors
 #
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License version 2 or later as
 # published by the Free Software Foundation.
 #
 # This program is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 # GNU General Public License for more details.
 #
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 from itertools import product
 from sys import exit
 
 from vyos.config import Config
 from vyos.configdict import dict_merge
 from vyos.configdict import node_changed
 from vyos.configdict import leaf_node_changed
 from vyos.configverify import verify_interface_exists
 from vyos.utils.process import call
 from vyos import ConfigError
 from vyos import airbag
 airbag.enable()
 
 def get_config(config=None):
 
     if config:
         conf = config
     else:
         conf = Config()
     base = ['policy']
 
     pbr = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True)
 
     for route in ['local_route', 'local_route6']:
         dict_id = 'rule_remove' if route == 'local_route' else 'rule6_remove'
         route_key = 'local-route' if route == 'local_route' else 'local-route6'
         base_rule = base + [route_key, 'rule']
 
         # delete policy local-route
         dict = {}
         tmp = node_changed(conf, base_rule, key_mangling=('-', '_'))
         if tmp:
             for rule in (tmp or []):
                 src = leaf_node_changed(conf, base_rule + [rule, 'source', 'address'])
                 src_port = leaf_node_changed(conf, base_rule + [rule, 'source', 'port'])
                 fwmk = leaf_node_changed(conf, base_rule + [rule, 'fwmark'])
                 iif = leaf_node_changed(conf, base_rule + [rule, 'inbound-interface'])
                 dst = leaf_node_changed(conf, base_rule + [rule, 'destination', 'address'])
                 dst_port = leaf_node_changed(conf, base_rule + [rule, 'destination', 'port'])
                 table = leaf_node_changed(conf, base_rule + [rule, 'set', 'table'])
                 proto = leaf_node_changed(conf, base_rule + [rule, 'protocol'])
                 rule_def = {}
                 if src:
                     rule_def = dict_merge({'source': {'address': src}}, rule_def)
                 if src_port:
                     rule_def = dict_merge({'source': {'port': src_port}}, rule_def)
                 if fwmk:
                     rule_def = dict_merge({'fwmark' : fwmk}, rule_def)
                 if iif:
                     rule_def = dict_merge({'inbound_interface' : iif}, rule_def)
                 if dst:
                     rule_def = dict_merge({'destination': {'address': dst}}, rule_def)
                 if dst_port:
                     rule_def = dict_merge({'destination': {'port': dst_port}}, rule_def)
                 if table:
                     rule_def = dict_merge({'table' : table}, rule_def)
                 if proto:
                     rule_def = dict_merge({'protocol' : proto}, rule_def)
                 dict = dict_merge({dict_id : {rule : rule_def}}, dict)
                 pbr.update(dict)
 
         if not route in pbr:
             continue
 
         # delete policy local-route rule x source x.x.x.x
         # delete policy local-route rule x fwmark x
         # delete policy local-route rule x destination x.x.x.x
         if 'rule' in pbr[route]:
             for rule, rule_config in pbr[route]['rule'].items():
                 src = leaf_node_changed(conf, base_rule + [rule, 'source', 'address'])
                 src_port = leaf_node_changed(conf, base_rule + [rule, 'source', 'port'])
                 fwmk = leaf_node_changed(conf, base_rule + [rule, 'fwmark'])
                 iif = leaf_node_changed(conf, base_rule + [rule, 'inbound-interface'])
                 dst = leaf_node_changed(conf, base_rule + [rule, 'destination', 'address'])
                 dst_port = leaf_node_changed(conf, base_rule + [rule, 'destination', 'port'])
                 table = leaf_node_changed(conf, base_rule + [rule, 'set', 'table'])
                 proto = leaf_node_changed(conf, base_rule + [rule, 'protocol'])
                 # keep track of changes in configuration
                 # otherwise we might remove an existing node although nothing else has changed
                 changed = False
 
                 rule_def = {}
                 # src is None if there are no changes to src
                 if src is None:
                     # if src hasn't changed, include it in the removal selector
                     # if a new selector is added, we have to remove all previous rules without this selector
                     # to make sure we remove all previous rules with this source(s), it will be included
                     if 'source' in rule_config:
                         if 'address' in rule_config['source']:
                             rule_def = dict_merge({'source': {'address': rule_config['source']['address']}}, rule_def)
                 else:
                     # if src is not None, it's previous content will be returned
                     # this can be an empty array if it's just being set, or the previous value
                     # either way, something has to be changed and we only want to remove previous values
                     changed = True
                     # set the old value for removal if it's not empty
                     if len(src) > 0:
                         rule_def = dict_merge({'source': {'address': src}}, rule_def)
 
                 # source port
                 if src_port is None:
                     if 'source' in rule_config:
                         if 'port' in rule_config['source']:
                             tmp = rule_config['source']['port']
                             if isinstance(tmp, str):
                                 tmp = [tmp]
                             rule_def = dict_merge({'source': {'port': tmp}}, rule_def)
                 else:
                     changed = True
                     if len(src_port) > 0:
                         rule_def = dict_merge({'source': {'port': src_port}}, rule_def)
 
                 # fwmark
                 if fwmk is None:
                     if 'fwmark' in rule_config:
                         tmp = rule_config['fwmark']
                         if isinstance(tmp, str):
                             tmp = [tmp]
                         rule_def = dict_merge({'fwmark': tmp}, rule_def)
                 else:
                     changed = True
                     if len(fwmk) > 0:
                         rule_def = dict_merge({'fwmark' : fwmk}, rule_def)
 
                 # inbound-interface
                 if iif is None:
                     if 'inbound_interface' in rule_config:
                         rule_def = dict_merge({'inbound_interface': rule_config['inbound_interface']}, rule_def)
                 else:
                     changed = True
                     if len(iif) > 0:
                         rule_def = dict_merge({'inbound_interface' : iif}, rule_def)
 
                 # destination address
                 if dst is None:
                     if 'destination' in rule_config:
                         if 'address' in rule_config['destination']:
                             rule_def = dict_merge({'destination': {'address': rule_config['destination']['address']}}, rule_def)
                 else:
                     changed = True
                     if len(dst) > 0:
                         rule_def = dict_merge({'destination': {'address': dst}}, rule_def)
 
                 # destination port
                 if dst_port is None:
                     if 'destination' in rule_config:
                         if 'port' in rule_config['destination']:
                             tmp = rule_config['destination']['port']
                             if isinstance(tmp, str):
                                 tmp = [tmp]
                             rule_def = dict_merge({'destination': {'port': tmp}}, rule_def)
                 else:
                     changed = True
                     if len(dst_port) > 0:
                         rule_def = dict_merge({'destination': {'port': dst_port}}, rule_def)
 
                 # table
                 if table is None:
                     if 'set' in rule_config and 'table' in rule_config['set']:
                         rule_def = dict_merge({'table': [rule_config['set']['table']]}, rule_def)
                 else:
                     changed = True
                     if len(table) > 0:
                         rule_def = dict_merge({'table' : table}, rule_def)
 
                 # protocol
                 if proto is None:
                     if 'protocol' in rule_config:
                         tmp = rule_config['protocol']
                         if isinstance(tmp, str):
                             tmp = [tmp]
                         rule_def = dict_merge({'protocol': tmp}, rule_def)
                 else:
                     changed = True
                     if len(proto) > 0:
                         rule_def = dict_merge({'protocol' : proto}, rule_def)
 
                 if changed:
                     dict = dict_merge({dict_id : {rule : rule_def}}, dict)
                     pbr.update(dict)
 
     return pbr
 
 def verify(pbr):
     # bail out early - looks like removal from running config
     if not pbr:
         return None
 
     for route in ['local_route', 'local_route6']:
         if not route in pbr:
             continue
 
         pbr_route = pbr[route]
         if 'rule' in pbr_route:
             for rule in pbr_route['rule']:
                 if (
                     'source' not in pbr_route['rule'][rule] and
                     'destination' not in pbr_route['rule'][rule] and
                     'fwmark' not in pbr_route['rule'][rule] and
                     'inbound_interface' not in pbr_route['rule'][rule] and
                     'protocol' not in pbr_route['rule'][rule]
                 ):
                     raise ConfigError('Source or destination address or fwmark or inbound-interface or protocol is required!')
 
                 if 'set' not in pbr_route['rule'][rule] or 'table' not in pbr_route['rule'][rule]['set']:
                     raise ConfigError('Table set is required!')
 
                 if 'inbound_interface' in pbr_route['rule'][rule]:
                     interface = pbr_route['rule'][rule]['inbound_interface']
-                    verify_interface_exists(interface)
+                    verify_interface_exists(pbr, interface)
 
     return None
 
 def generate(pbr):
     if not pbr:
         return None
 
     return None
 
 def apply(pbr):
     if not pbr:
         return None
 
     # Delete old rule if needed
     for rule_rm in ['rule_remove', 'rule6_remove']:
         if rule_rm in pbr:
             v6 = " -6" if rule_rm == 'rule6_remove' else ""
 
             for rule, rule_config in pbr[rule_rm].items():
                 source = rule_config.get('source', {}).get('address', [''])
                 source_port = rule_config.get('source', {}).get('port', [''])
                 destination = rule_config.get('destination', {}).get('address', [''])
                 destination_port = rule_config.get('destination', {}).get('port', [''])
                 fwmark = rule_config.get('fwmark', [''])
                 inbound_interface = rule_config.get('inbound_interface', [''])
                 protocol = rule_config.get('protocol', [''])
                 table = rule_config.get('table', [''])
 
                 for src, dst, src_port, dst_port, fwmk, iif, proto, table in product(
                         source, destination, source_port, destination_port,
                         fwmark, inbound_interface, protocol, table):
                     f_src = '' if src == '' else f' from {src} '
                     f_src_port = '' if src_port == '' else f' sport {src_port} '
                     f_dst = '' if dst == '' else f' to {dst} '
                     f_dst_port = '' if dst_port == '' else f' dport {dst_port} '
                     f_fwmk = '' if fwmk == '' else f' fwmark {fwmk} '
                     f_iif = '' if iif == '' else f' iif {iif} '
                     f_proto = '' if proto == '' else f' ipproto {proto} '
                     f_table = '' if table == '' else f' lookup {table} '
 
                     call(f'ip{v6} rule del prio {rule} {f_src}{f_dst}{f_proto}{f_src_port}{f_dst_port}{f_fwmk}{f_iif}{f_table}')
 
     # Generate new config
     for route in ['local_route', 'local_route6']:
         if not route in pbr:
             continue
 
         v6 = " -6" if route == 'local_route6' else ""
         pbr_route = pbr[route]
 
         if 'rule' in pbr_route:
             for rule, rule_config in pbr_route['rule'].items():
                 table = rule_config['set'].get('table', '')
                 source = rule_config.get('source', {}).get('address', ['all'])
                 source_port = rule_config.get('source', {}).get('port', '')
                 destination = rule_config.get('destination', {}).get('address', ['all'])
                 destination_port = rule_config.get('destination', {}).get('port', '')
                 fwmark = rule_config.get('fwmark', '')
                 inbound_interface = rule_config.get('inbound_interface', '')
                 protocol = rule_config.get('protocol', '')
 
                 for src in source:
                     f_src = f' from {src} ' if src else ''
                     for dst in destination:
                         f_dst = f' to {dst} ' if dst else ''
                         f_src_port = f' sport {source_port} ' if source_port else ''
                         f_dst_port = f' dport {destination_port} ' if destination_port else ''
                         f_fwmk = f' fwmark {fwmark} ' if fwmark else ''
                         f_iif = f' iif {inbound_interface} ' if inbound_interface else ''
                         f_proto = f' ipproto {protocol} ' if protocol else ''
 
                         call(f'ip{v6} rule add prio {rule}{f_src}{f_dst}{f_proto}{f_src_port}{f_dst_port}{f_fwmk}{f_iif} lookup {table}')
 
     return None
 
 if __name__ == '__main__':
     try:
         c = get_config()
         verify(c)
         generate(c)
         apply(c)
     except ConfigError as e:
         print(e)
         exit(1)
diff --git a/src/conf_mode/protocols_igmp-proxy.py b/src/conf_mode/protocols_igmp-proxy.py
index afcef0985..9a07adf05 100755
--- a/src/conf_mode/protocols_igmp-proxy.py
+++ b/src/conf_mode/protocols_igmp-proxy.py
@@ -1,112 +1,112 @@
 #!/usr/bin/env python3
 #
 # Copyright (C) 2018-2024 VyOS maintainers and contributors
 #
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License version 2 or later as
 # published by the Free Software Foundation.
 #
 # This program is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 # GNU General Public License for more details.
 #
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 import os
 
 from sys import exit
 
 from vyos.base import Warning
 from vyos.config import Config
 from vyos.configverify import verify_interface_exists
 from vyos.template import render
 from vyos.utils.process import call
 from vyos.utils.dict import dict_search
 from vyos import ConfigError
 from vyos import airbag
 airbag.enable()
 
 config_file = r'/etc/igmpproxy.conf'
 
 def get_config(config=None):
     if config:
         conf = config
     else:
         conf = Config()
 
     base = ['protocols', 'igmp-proxy']
     igmp_proxy = conf.get_config_dict(base, key_mangling=('-', '_'),
                                       get_first_key=True,
                                       with_defaults=True)
 
     if conf.exists(['protocols', 'igmp']):
         igmp_proxy.update({'igmp_configured': ''})
 
     if conf.exists(['protocols', 'pim']):
         igmp_proxy.update({'pim_configured': ''})
 
     return igmp_proxy
 
 def verify(igmp_proxy):
     # bail out early - looks like removal from running config
     if not igmp_proxy or 'disable' in igmp_proxy:
         return None
 
     if 'igmp_configured' in igmp_proxy or 'pim_configured' in igmp_proxy:
         raise ConfigError('Can not configure both IGMP proxy and PIM '\
                           'at the same time')
 
     # at least two interfaces are required, one upstream and one downstream
     if 'interface' not in igmp_proxy or len(igmp_proxy['interface']) < 2:
         raise ConfigError('Must define exactly one upstream and at least one ' \
                           'downstream interface!')
 
     upstream = 0
     for interface, config in igmp_proxy['interface'].items():
-        verify_interface_exists(interface)
+        verify_interface_exists(igmp_proxy, interface)
         if dict_search('role', config) == 'upstream':
             upstream += 1
 
     if upstream == 0:
         raise ConfigError('At least 1 upstream interface is required!')
     elif upstream > 1:
         raise ConfigError('Only 1 upstream interface allowed!')
 
     return None
 
 def generate(igmp_proxy):
     # bail out early - looks like removal from running config
     if not igmp_proxy:
         return None
 
     # bail out early - service is disabled, but inform user
     if 'disable' in igmp_proxy:
         Warning('IGMP Proxy will be deactivated because it is disabled')
         return None
 
     render(config_file, 'igmp-proxy/igmpproxy.conf.j2', igmp_proxy)
 
     return None
 
 def apply(igmp_proxy):
     if not igmp_proxy or 'disable' in igmp_proxy:
          # IGMP Proxy support is removed in the commit
          call('systemctl stop igmpproxy.service')
          if os.path.exists(config_file):
              os.unlink(config_file)
     else:
         call('systemctl restart igmpproxy.service')
 
     return None
 
 if __name__ == '__main__':
     try:
         c = get_config()
         verify(c)
         generate(c)
         apply(c)
     except ConfigError as e:
         print(e)
         exit(1)
diff --git a/src/conf_mode/protocols_isis.py b/src/conf_mode/protocols_isis.py
index 9cadfd081..ba2f3cf0d 100755
--- a/src/conf_mode/protocols_isis.py
+++ b/src/conf_mode/protocols_isis.py
@@ -1,312 +1,312 @@
 #!/usr/bin/env python3
 #
 # Copyright (C) 2020-2024 VyOS maintainers and contributors
 #
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License version 2 or later as
 # published by the Free Software Foundation.
 #
 # This program is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 # GNU General Public License for more details.
 #
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 from sys import exit
 from sys import argv
 
 from vyos.config import Config
 from vyos.configdict import dict_merge
 from vyos.configdict import node_changed
 from vyos.configverify import verify_common_route_maps
 from vyos.configverify import verify_interface_exists
 from vyos.ifconfig import Interface
 from vyos.utils.dict import dict_search
 from vyos.utils.network import get_interface_config
 from vyos.template import render_to_string
 from vyos import ConfigError
 from vyos import frr
 from vyos import airbag
 airbag.enable()
 
 def get_config(config=None):
     if config:
         conf = config
     else:
         conf = Config()
 
     vrf = None
     if len(argv) > 1:
         vrf = argv[1]
 
     base_path = ['protocols', 'isis']
 
     # eqivalent of the C foo ? 'a' : 'b' statement
     base = vrf and ['vrf', 'name', vrf, 'protocols', 'isis'] or base_path
     isis = conf.get_config_dict(base, key_mangling=('-', '_'),
                                 get_first_key=True,
                                 no_tag_node_value_mangle=True)
 
     # Assign the name of our VRF context. This MUST be done before the return
     # statement below, else on deletion we will delete the default instance
     # instead of the VRF instance.
     if vrf: isis['vrf'] = vrf
 
     # FRR has VRF support for different routing daemons. As interfaces belong
     # to VRFs - or the global VRF, we need to check for changed interfaces so
     # that they will be properly rendered for the FRR config. Also this eases
     # removal of interfaces from the running configuration.
     interfaces_removed = node_changed(conf, base + ['interface'])
     if interfaces_removed:
         isis['interface_removed'] = list(interfaces_removed)
 
     # Bail out early if configuration tree does no longer exist. this must
     # be done after retrieving the list of interfaces to be removed.
     if not conf.exists(base):
         isis.update({'deleted' : ''})
         return isis
 
     # merge in default values
     isis = conf.merge_defaults(isis, recursive=True)
 
     # We also need some additional information from the config, prefix-lists
     # and route-maps for instance. They will be used in verify().
     #
     # XXX: one MUST always call this without the key_mangling() option! See
     # vyos.configverify.verify_common_route_maps() for more information.
     tmp = conf.get_config_dict(['policy'])
     # Merge policy dict into "regular" config dict
     isis = dict_merge(tmp, isis)
 
     return isis
 
 def verify(isis):
     # bail out early - looks like removal from running config
     if not isis or 'deleted' in isis:
         return None
 
     if 'net' not in isis:
         raise ConfigError('Network entity is mandatory!')
 
     # last byte in IS-IS area address must be 0
     tmp = isis['net'].split('.')
     if int(tmp[-1]) != 0:
         raise ConfigError('Last byte of IS-IS network entity title must always be 0!')
 
     verify_common_route_maps(isis)
 
     # If interface not set
     if 'interface' not in isis:
         raise ConfigError('Interface used for routing updates is mandatory!')
 
     for interface in isis['interface']:
-        verify_interface_exists(interface)
+        verify_interface_exists(isis, interface)
         # Interface MTU must be >= configured lsp-mtu
         mtu = Interface(interface).get_mtu()
         area_mtu = isis['lsp_mtu']
         # Recommended maximum PDU size = interface MTU - 3 bytes
         recom_area_mtu = mtu - 3
         if mtu < int(area_mtu) or int(area_mtu) > recom_area_mtu:
             raise ConfigError(f'Interface {interface} has MTU {mtu}, ' \
                               f'current area MTU is {area_mtu}! \n' \
                               f'Recommended area lsp-mtu {recom_area_mtu} or less ' \
                               '(calculated on MTU size).')
 
         if 'vrf' in isis:
             # If interface specific options are set, we must ensure that the
             # interface is bound to our requesting VRF. Due to the VyOS
             # priorities the interface is bound to the VRF after creation of
             # the VRF itself, and before any routing protocol is configured.
             vrf = isis['vrf']
             tmp = get_interface_config(interface)
             if 'master' not in tmp or tmp['master'] != vrf:
                 raise ConfigError(f'Interface "{interface}" is not a member of VRF "{vrf}"!')
 
     # If md5 and plaintext-password set at the same time
     for password in ['area_password', 'domain_password']:
         if password in isis:
             if {'md5', 'plaintext_password'} <= set(isis[password]):
                 tmp = password.replace('_', '-')
                 raise ConfigError(f'Can use either md5 or plaintext-password for {tmp}!')
 
     # If one param from delay set, but not set others
     if 'spf_delay_ietf' in isis:
         required_timers = ['holddown', 'init_delay', 'long_delay', 'short_delay', 'time_to_learn']
         exist_timers = []
         for elm_timer in required_timers:
             if elm_timer in isis['spf_delay_ietf']:
                 exist_timers.append(elm_timer)
 
         exist_timers = set(required_timers).difference(set(exist_timers))
         if len(exist_timers) > 0:
             raise ConfigError('All types of spf-delay must be configured. Missing: ' + ', '.join(exist_timers).replace('_', '-'))
 
     # If Redistribute set, but level don't set
     if 'redistribute' in isis:
         proc_level = isis.get('level','').replace('-','_')
         for afi in ['ipv4', 'ipv6']:
             if afi not in isis['redistribute']:
                 continue
 
             for proto, proto_config in isis['redistribute'][afi].items():
                 if 'level_1' not in proto_config and 'level_2' not in proto_config:
                     raise ConfigError(f'Redistribute level-1 or level-2 should be specified in ' \
                                       f'"protocols isis redistribute {afi} {proto}"!')
 
                 for redistr_level, redistr_config in proto_config.items():
                     if proc_level and proc_level != 'level_1_2' and proc_level != redistr_level:
                         raise ConfigError(f'"protocols isis redistribute {afi} {proto} {redistr_level}" ' \
                                           f'can not be used with \"protocols isis level {proc_level}\"!')
 
     # Segment routing checks
     if dict_search('segment_routing.global_block', isis):
         g_high_label_value = dict_search('segment_routing.global_block.high_label_value', isis)
         g_low_label_value = dict_search('segment_routing.global_block.low_label_value', isis)
 
         # If segment routing global block high or low value is blank, throw error
         if not (g_low_label_value or g_high_label_value):
             raise ConfigError('Segment routing global-block requires both low and high value!')
 
         # If segment routing global block low value is higher than the high value, throw error
         if int(g_low_label_value) > int(g_high_label_value):
             raise ConfigError('Segment routing global-block low value must be lower than high value')
 
     if dict_search('segment_routing.local_block', isis):
         if dict_search('segment_routing.global_block', isis) == None:
             raise ConfigError('Segment routing local-block requires global-block to be configured!')
 
         l_high_label_value = dict_search('segment_routing.local_block.high_label_value', isis)
         l_low_label_value = dict_search('segment_routing.local_block.low_label_value', isis)
 
         # If segment routing local-block high or low value is blank, throw error
         if not (l_low_label_value or l_high_label_value):
             raise ConfigError('Segment routing local-block requires both high and low value!')
 
         # If segment routing local-block low value is higher than the high value, throw error
         if int(l_low_label_value) > int(l_high_label_value):
             raise ConfigError('Segment routing local-block low value must be lower than high value')
 
         # local-block most live outside global block
         global_range = range(int(g_low_label_value), int(g_high_label_value) +1)
         local_range  = range(int(l_low_label_value), int(l_high_label_value) +1)
 
         # Check for overlapping ranges
         if list(set(global_range) & set(local_range)):
             raise ConfigError(f'Segment-Routing Global Block ({g_low_label_value}/{g_high_label_value}) '\
                               f'conflicts with Local Block ({l_low_label_value}/{l_high_label_value})!')
 
     # Check for a blank or invalid value per prefix
     if dict_search('segment_routing.prefix', isis):
         for prefix, prefix_config in isis['segment_routing']['prefix'].items():
             if 'absolute' in prefix_config:
                 if prefix_config['absolute'].get('value') is None:
                     raise ConfigError(f'Segment routing prefix {prefix} absolute value cannot be blank.')
             elif 'index' in prefix_config:
                 if prefix_config['index'].get('value') is None:
                     raise ConfigError(f'Segment routing prefix {prefix} index value cannot be blank.')
 
     # Check for explicit-null and no-php-flag configured at the same time per prefix
     if dict_search('segment_routing.prefix', isis):
         for prefix, prefix_config in isis['segment_routing']['prefix'].items():
             if 'absolute' in prefix_config:
                 if ("explicit_null" in prefix_config['absolute']) and ("no_php_flag" in prefix_config['absolute']):
                     raise ConfigError(f'Segment routing prefix {prefix} cannot have both explicit-null '\
                                       f'and no-php-flag configured at the same time.')
             elif 'index' in prefix_config:
                 if ("explicit_null" in prefix_config['index']) and ("no_php_flag" in prefix_config['index']):
                     raise ConfigError(f'Segment routing prefix {prefix} cannot have both explicit-null '\
                                       f'and no-php-flag configured at the same time.')
 
     # Check for index ranges being larger than the segment routing global block
     if dict_search('segment_routing.global_block', isis):
         g_high_label_value = dict_search('segment_routing.global_block.high_label_value', isis)
         g_low_label_value = dict_search('segment_routing.global_block.low_label_value', isis)
         g_label_difference = int(g_high_label_value) - int(g_low_label_value)
         if dict_search('segment_routing.prefix', isis):
             for prefix, prefix_config in isis['segment_routing']['prefix'].items():
                 if 'index' in prefix_config:
                     index_size = isis['segment_routing']['prefix'][prefix]['index']['value']
                     if int(index_size) > int(g_label_difference):
                         raise ConfigError(f'Segment routing prefix {prefix} cannot have an '\
                                           f'index base size larger than the SRGB label base.')
 
     # Check for LFA tiebreaker index duplication
     if dict_search('fast_reroute.lfa.local.tiebreaker', isis):
         comparison_dictionary = {}
         for item, item_options in isis['fast_reroute']['lfa']['local']['tiebreaker'].items():
             for index, index_options in item_options.items():
                 for index_value, index_value_options in index_options.items():
                     if index_value not in comparison_dictionary.keys():
                         comparison_dictionary[index_value] = [item]
                     else:
                         comparison_dictionary[index_value].append(item)
         for index, index_length in comparison_dictionary.items():
             if int(len(index_length)) > 1:
                 raise ConfigError(f'LFA index {index} cannot have more than one tiebreaker configured.')
 
     # Check for LFA priority-limit configured multiple times per level
     if dict_search('fast_reroute.lfa.local.priority_limit', isis):
         comparison_dictionary = {}
         for priority, priority_options in isis['fast_reroute']['lfa']['local']['priority_limit'].items():
             for level, level_options in priority_options.items():
                 if level not in comparison_dictionary.keys():
                     comparison_dictionary[level] = [priority]
                 else:
                     comparison_dictionary[level].append(priority)
             for level, level_length in comparison_dictionary.items():
                 if int(len(level_length)) > 1:
                     raise ConfigError(f'LFA priority-limit on {level.replace("_", "-")} cannot have more than one priority configured.')
 
     # Check for LFA remote prefix list configured with more than one list
     if dict_search('fast_reroute.lfa.remote.prefix_list', isis):
         if int(len(isis['fast_reroute']['lfa']['remote']['prefix_list'].items())) > 1:
             raise ConfigError(f'LFA remote prefix-list has more than one configured. Cannot have more than one configured.')
 
     return None
 
 def generate(isis):
     if not isis or 'deleted' in isis:
         return None
 
     isis['frr_isisd_config'] = render_to_string('frr/isisd.frr.j2', isis)
     return None
 
 def apply(isis):
     isis_daemon = 'isisd'
 
     # Save original configuration prior to starting any commit actions
     frr_cfg = frr.FRRConfig()
 
     # Generate empty helper string which can be ammended to FRR commands, it
     # will be either empty (default VRF) or contain the "vrf <name" statement
     vrf = ''
     if 'vrf' in isis:
         vrf = ' vrf ' + isis['vrf']
 
     frr_cfg.load_configuration(isis_daemon)
     frr_cfg.modify_section(f'^router isis VyOS{vrf}', stop_pattern='^exit', remove_stop_mark=True)
 
     for key in ['interface', 'interface_removed']:
         if key not in isis:
             continue
         for interface in isis[key]:
             frr_cfg.modify_section(f'^interface {interface}', stop_pattern='^exit', remove_stop_mark=True)
 
     if 'frr_isisd_config' in isis:
         frr_cfg.add_before(frr.default_add_before, isis['frr_isisd_config'])
 
     frr_cfg.commit_configuration(isis_daemon)
 
     return None
 
 if __name__ == '__main__':
     try:
         c = get_config()
         verify(c)
         generate(c)
         apply(c)
     except ConfigError as e:
         print(e)
         exit(1)
diff --git a/src/conf_mode/protocols_mpls.py b/src/conf_mode/protocols_mpls.py
index 177a43444..ad164db9f 100755
--- a/src/conf_mode/protocols_mpls.py
+++ b/src/conf_mode/protocols_mpls.py
@@ -1,148 +1,148 @@
 #!/usr/bin/env python3
 #
 # Copyright (C) 2020-2022 VyOS maintainers and contributors
 #
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License version 2 or later as
 # published by the Free Software Foundation.
 #
 # This program is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 # GNU General Public License for more details.
 #
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 import os
 
 from sys import exit
 
 from glob import glob
 from vyos.config import Config
 from vyos.template import render_to_string
 from vyos.utils.dict import dict_search
 from vyos.utils.file import read_file
 from vyos.utils.system import sysctl_write
 from vyos.configverify import verify_interface_exists
 from vyos import ConfigError
 from vyos import frr
 from vyos import airbag
 airbag.enable()
 
 config_file = r'/tmp/ldpd.frr'
 
 def get_config(config=None):
     if config:
         conf = config
     else:
         conf = Config()
     base = ['protocols', 'mpls']
 
     mpls = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True)
     return mpls
 
 def verify(mpls):
     # If no config, then just bail out early.
     if not mpls:
         return None
 
     if 'interface' in mpls:
         for interface in mpls['interface']:
-            verify_interface_exists(interface)
+            verify_interface_exists(mpls, interface)
 
     # Checks to see if LDP is properly configured
     if 'ldp' in mpls:
         # If router ID not defined
         if 'router_id' not in mpls['ldp']:
             raise ConfigError('Router ID missing. An LDP router id is mandatory!')
 
         # If interface not set
         if 'interface' not in mpls['ldp']:
             raise ConfigError('LDP interfaces are missing. An LDP interface is mandatory!')
 
         # If transport addresses are not set
         if not dict_search('ldp.discovery.transport_ipv4_address', mpls) and \
            not dict_search('ldp.discovery.transport_ipv6_address', mpls):
                 raise ConfigError('LDP transport address missing!')
 
     return None
 
 def generate(mpls):
     # If there's no MPLS config generated, create dictionary key with no value.
     if not mpls or 'deleted' in mpls:
         return None
 
     mpls['frr_ldpd_config'] = render_to_string('frr/ldpd.frr.j2', mpls)
     return None
 
 def apply(mpls):
     ldpd_damon = 'ldpd'
 
     # Save original configuration prior to starting any commit actions
     frr_cfg = frr.FRRConfig()
 
     frr_cfg.load_configuration(ldpd_damon)
     frr_cfg.modify_section(f'^mpls ldp', stop_pattern='^exit', remove_stop_mark=True)
 
     if 'frr_ldpd_config' in mpls:
         frr_cfg.add_before(frr.default_add_before, mpls['frr_ldpd_config'])
     frr_cfg.commit_configuration(ldpd_damon)
 
     # Set number of entries in the platform label tables
     labels = '0'
     if 'interface' in mpls:
         labels = '1048575'
     sysctl_write('net.mpls.platform_labels', labels)
 
     # Check for changes in global MPLS options
     if 'parameters' in mpls:
             # Choose whether to copy IP TTL to MPLS header TTL
         if 'no_propagate_ttl' in mpls['parameters']:
             sysctl_write('net.mpls.ip_ttl_propagate', 0)
             # Choose whether to limit maximum MPLS header TTL
         if 'maximum_ttl' in mpls['parameters']:
             ttl = mpls['parameters']['maximum_ttl']
             sysctl_write('net.mpls.default_ttl', ttl)
     else:
         # Set default global MPLS options if not defined.
         sysctl_write('net.mpls.ip_ttl_propagate', 1)
         sysctl_write('net.mpls.default_ttl', 255)
 
     # Enable and disable MPLS processing on interfaces per configuration
     if 'interface' in mpls:
         system_interfaces = []
         # Populate system interfaces list with local MPLS capable interfaces
         for interface in glob('/proc/sys/net/mpls/conf/*'):
             system_interfaces.append(os.path.basename(interface))
         # This is where the comparison is done on if an interface needs to be enabled/disabled.
         for system_interface in system_interfaces:
             interface_state = read_file(f'/proc/sys/net/mpls/conf/{system_interface}/input')
             if '1' in interface_state:
                 if system_interface not in mpls['interface']:
                     system_interface = system_interface.replace('.', '/')
                     sysctl_write(f'net.mpls.conf.{system_interface}.input', 0)
             elif '0' in interface_state:
                 if system_interface in mpls['interface']:
                     system_interface = system_interface.replace('.', '/')
                     sysctl_write(f'net.mpls.conf.{system_interface}.input', 1)
     else:
         system_interfaces = []
         # If MPLS interfaces are not configured, set MPLS processing disabled
         for interface in glob('/proc/sys/net/mpls/conf/*'):
             system_interfaces.append(os.path.basename(interface))
         for system_interface in system_interfaces:
             system_interface = system_interface.replace('.', '/')
             sysctl_write(f'net.mpls.conf.{system_interface}.input', 0)
 
     return None
 
 if __name__ == '__main__':
     try:
         c = get_config()
         verify(c)
         generate(c)
         apply(c)
     except ConfigError as e:
         print(e)
         exit(1)
diff --git a/src/conf_mode/protocols_ospf.py b/src/conf_mode/protocols_ospf.py
index 6fffe7e0d..7347c4faa 100755
--- a/src/conf_mode/protocols_ospf.py
+++ b/src/conf_mode/protocols_ospf.py
@@ -1,290 +1,290 @@
 #!/usr/bin/env python3
 #
 # Copyright (C) 2021-2024 VyOS maintainers and contributors
 #
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License version 2 or later as
 # published by the Free Software Foundation.
 #
 # This program is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 # GNU General Public License for more details.
 #
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 from sys import exit
 from sys import argv
 
 from vyos.config import Config
 from vyos.config import config_dict_merge
 from vyos.configdict import dict_merge
 from vyos.configdict import node_changed
 from vyos.configverify import verify_common_route_maps
 from vyos.configverify import verify_route_map
 from vyos.configverify import verify_interface_exists
 from vyos.configverify import verify_access_list
 from vyos.template import render_to_string
 from vyos.utils.dict import dict_search
 from vyos.utils.network import get_interface_config
 from vyos import ConfigError
 from vyos import frr
 from vyos import airbag
 airbag.enable()
 
 def get_config(config=None):
     if config:
         conf = config
     else:
         conf = Config()
 
     vrf = None
     if len(argv) > 1:
         vrf = argv[1]
 
     base_path = ['protocols', 'ospf']
 
     # eqivalent of the C foo ? 'a' : 'b' statement
     base = vrf and ['vrf', 'name', vrf, 'protocols', 'ospf'] or base_path
     ospf = conf.get_config_dict(base, key_mangling=('-', '_'),
                                 get_first_key=True)
 
     # Assign the name of our VRF context. This MUST be done before the return
     # statement below, else on deletion we will delete the default instance
     # instead of the VRF instance.
     if vrf: ospf['vrf'] = vrf
 
     # FRR has VRF support for different routing daemons. As interfaces belong
     # to VRFs - or the global VRF, we need to check for changed interfaces so
     # that they will be properly rendered for the FRR config. Also this eases
     # removal of interfaces from the running configuration.
     interfaces_removed = node_changed(conf, base + ['interface'])
     if interfaces_removed:
         ospf['interface_removed'] = list(interfaces_removed)
 
     # Bail out early if configuration tree does no longer exist. this must
     # be done after retrieving the list of interfaces to be removed.
     if not conf.exists(base):
         ospf.update({'deleted' : ''})
         return ospf
 
     # We have gathered the dict representation of the CLI, but there are default
     # options which we need to update into the dictionary retrived.
     default_values = conf.get_config_defaults(**ospf.kwargs, recursive=True)
 
     # We have to cleanup the default dict, as default values could enable features
     # which are not explicitly enabled on the CLI. Example: default-information
     # originate comes with a default metric-type of 2, which will enable the
     # entire default-information originate tree, even when not set via CLI so we
     # need to check this first and probably drop that key.
     if dict_search('default_information.originate', ospf) is None:
         del default_values['default_information']
     if 'mpls_te' not in ospf:
         del default_values['mpls_te']
     if 'graceful_restart' not in ospf:
         del default_values['graceful_restart']
     for area_num in default_values.get('area', []):
         if dict_search(f'area.{area_num}.area_type.nssa', ospf) is None:
             del default_values['area'][area_num]['area_type']['nssa']
 
     for protocol in ['babel', 'bgp', 'connected', 'isis', 'kernel', 'rip', 'static']:
         if dict_search(f'redistribute.{protocol}', ospf) is None:
             del default_values['redistribute'][protocol]
     if not bool(default_values['redistribute']):
         del default_values['redistribute']
 
     for interface in ospf.get('interface', []):
         # We need to reload the defaults on every pass b/c of
         # hello-multiplier dependency on dead-interval
         # If hello-multiplier is set, we need to remove the default from
         # dead-interval.
         if 'hello_multiplier' in ospf['interface'][interface]:
             del default_values['interface'][interface]['dead_interval']
 
     ospf = config_dict_merge(default_values, ospf)
 
     # We also need some additional information from the config, prefix-lists
     # and route-maps for instance. They will be used in verify().
     #
     # XXX: one MUST always call this without the key_mangling() option! See
     # vyos.configverify.verify_common_route_maps() for more information.
     tmp = conf.get_config_dict(['policy'])
     # Merge policy dict into "regular" config dict
     ospf = dict_merge(tmp, ospf)
 
     return ospf
 
 def verify(ospf):
     if not ospf:
         return None
 
     verify_common_route_maps(ospf)
 
     # As we can have a default-information route-map, we need to validate it!
     route_map_name = dict_search('default_information.originate.route_map', ospf)
     if route_map_name: verify_route_map(route_map_name, ospf)
 
     # Validate if configured Access-list exists
     if 'area' in ospf:
           networks = []
           for area, area_config in ospf['area'].items():
               if 'import_list' in area_config:
                   acl_import = area_config['import_list']
                   if acl_import: verify_access_list(acl_import, ospf)
               if 'export_list' in area_config:
                   acl_export = area_config['export_list']
                   if acl_export: verify_access_list(acl_export, ospf)
 
               if 'network' in area_config:
                   for network in area_config['network']:
                       if network in networks:
                           raise ConfigError(f'Network "{network}" already defined in different area!')
                       networks.append(network)
 
     if 'interface' in ospf:
         for interface, interface_config in ospf['interface'].items():
-            verify_interface_exists(interface)
+            verify_interface_exists(ospf, interface)
             # One can not use dead-interval and hello-multiplier at the same
             # time. FRR will only activate the last option set via CLI.
             if {'hello_multiplier', 'dead_interval'} <= set(interface_config):
                 raise ConfigError(f'Can not use hello-multiplier and dead-interval ' \
                                   f'concurrently for {interface}!')
 
             # One can not use the "network <prefix> area <id>" command and an
             # per interface area assignment at the same time. FRR will error
             # out using: "Please remove all network commands first."
             if 'area' in ospf and 'area' in interface_config:
                 for area, area_config in ospf['area'].items():
                     if 'network' in area_config:
                         raise ConfigError('Can not use OSPF interface area and area ' \
                                           'network configuration at the same time!')
 
             # If interface specific options are set, we must ensure that the
             # interface is bound to our requesting VRF. Due to the VyOS
             # priorities the interface is bound to the VRF after creation of
             # the VRF itself, and before any routing protocol is configured.
             if 'vrf' in ospf:
                 vrf = ospf['vrf']
                 tmp = get_interface_config(interface)
                 if 'master' not in tmp or tmp['master'] != vrf:
                     raise ConfigError(f'Interface "{interface}" is not a member of VRF "{vrf}"!')
 
     # Segment routing checks
     if dict_search('segment_routing.global_block', ospf):
         g_high_label_value = dict_search('segment_routing.global_block.high_label_value', ospf)
         g_low_label_value = dict_search('segment_routing.global_block.low_label_value', ospf)
 
         # If segment routing global block high or low value is blank, throw error
         if not (g_low_label_value or g_high_label_value):
             raise ConfigError('Segment routing global-block requires both low and high value!')
 
         # If segment routing global block low value is higher than the high value, throw error
         if int(g_low_label_value) > int(g_high_label_value):
             raise ConfigError('Segment routing global-block low value must be lower than high value')
 
     if dict_search('segment_routing.local_block', ospf):
         if dict_search('segment_routing.global_block', ospf) == None:
             raise ConfigError('Segment routing local-block requires global-block to be configured!')
 
         l_high_label_value = dict_search('segment_routing.local_block.high_label_value', ospf)
         l_low_label_value = dict_search('segment_routing.local_block.low_label_value', ospf)
 
         # If segment routing local-block high or low value is blank, throw error
         if not (l_low_label_value or l_high_label_value):
             raise ConfigError('Segment routing local-block requires both high and low value!')
 
         # If segment routing local-block low value is higher than the high value, throw error
         if int(l_low_label_value) > int(l_high_label_value):
             raise ConfigError('Segment routing local-block low value must be lower than high value')
 
         # local-block most live outside global block
         global_range = range(int(g_low_label_value), int(g_high_label_value) +1)
         local_range  = range(int(l_low_label_value), int(l_high_label_value) +1)
 
         # Check for overlapping ranges
         if list(set(global_range) & set(local_range)):
             raise ConfigError(f'Segment-Routing Global Block ({g_low_label_value}/{g_high_label_value}) '\
                               f'conflicts with Local Block ({l_low_label_value}/{l_high_label_value})!')
 
     # Check for a blank or invalid value per prefix
     if dict_search('segment_routing.prefix', ospf):
         for prefix, prefix_config in ospf['segment_routing']['prefix'].items():
             if 'index' in prefix_config:
                 if prefix_config['index'].get('value') is None:
                     raise ConfigError(f'Segment routing prefix {prefix} index value cannot be blank.')
 
     # Check for explicit-null and no-php-flag configured at the same time per prefix
     if dict_search('segment_routing.prefix', ospf):
         for prefix, prefix_config in ospf['segment_routing']['prefix'].items():
             if 'index' in prefix_config:
                 if ("explicit_null" in prefix_config['index']) and ("no_php_flag" in prefix_config['index']):
                     raise ConfigError(f'Segment routing prefix {prefix} cannot have both explicit-null '\
                                       f'and no-php-flag configured at the same time.')
 
     # Check for index ranges being larger than the segment routing global block
     if dict_search('segment_routing.global_block', ospf):
         g_high_label_value = dict_search('segment_routing.global_block.high_label_value', ospf)
         g_low_label_value = dict_search('segment_routing.global_block.low_label_value', ospf)
         g_label_difference = int(g_high_label_value) - int(g_low_label_value)
         if dict_search('segment_routing.prefix', ospf):
             for prefix, prefix_config in ospf['segment_routing']['prefix'].items():
                 if 'index' in prefix_config:
                     index_size = ospf['segment_routing']['prefix'][prefix]['index']['value']
                     if int(index_size) > int(g_label_difference):
                         raise ConfigError(f'Segment routing prefix {prefix} cannot have an '\
                                           f'index base size larger than the SRGB label base.')
 
     # Check route summarisation
     if 'summary_address' in ospf:
         for prefix, prefix_options in ospf['summary_address'].items():
             if {'tag', 'no_advertise'} <= set(prefix_options):
                 raise ConfigError(f'Can not set both "tag" and "no-advertise" for Type-5 '\
                                   f'and Type-7 route summarisation of "{prefix}"!')
 
     return None
 
 def generate(ospf):
     if not ospf or 'deleted' in ospf:
         return None
 
     ospf['frr_ospfd_config'] = render_to_string('frr/ospfd.frr.j2', ospf)
     return None
 
 def apply(ospf):
     ospf_daemon = 'ospfd'
 
     # Save original configuration prior to starting any commit actions
     frr_cfg = frr.FRRConfig()
 
     # Generate empty helper string which can be ammended to FRR commands, it
     # will be either empty (default VRF) or contain the "vrf <name" statement
     vrf = ''
     if 'vrf' in ospf:
         vrf = ' vrf ' + ospf['vrf']
 
     frr_cfg.load_configuration(ospf_daemon)
     frr_cfg.modify_section(f'^router ospf{vrf}', stop_pattern='^exit', remove_stop_mark=True)
 
     for key in ['interface', 'interface_removed']:
         if key not in ospf:
             continue
         for interface in ospf[key]:
             frr_cfg.modify_section(f'^interface {interface}', stop_pattern='^exit', remove_stop_mark=True)
 
     if 'frr_ospfd_config' in ospf:
         frr_cfg.add_before(frr.default_add_before, ospf['frr_ospfd_config'])
 
     frr_cfg.commit_configuration(ospf_daemon)
 
     return None
 
 if __name__ == '__main__':
     try:
         c = get_config()
         verify(c)
         generate(c)
         apply(c)
     except ConfigError as e:
         print(e)
         exit(1)
diff --git a/src/conf_mode/protocols_ospfv3.py b/src/conf_mode/protocols_ospfv3.py
index 1bb172293..60c2a9b16 100755
--- a/src/conf_mode/protocols_ospfv3.py
+++ b/src/conf_mode/protocols_ospfv3.py
@@ -1,191 +1,191 @@
 #!/usr/bin/env python3
 #
 # Copyright (C) 2021-2024 VyOS maintainers and contributors
 #
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License version 2 or later as
 # published by the Free Software Foundation.
 #
 # This program is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 # GNU General Public License for more details.
 #
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 from sys import exit
 from sys import argv
 
 from vyos.config import Config
 from vyos.config import config_dict_merge
 from vyos.configdict import dict_merge
 from vyos.configdict import node_changed
 from vyos.configverify import verify_common_route_maps
 from vyos.configverify import verify_route_map
 from vyos.configverify import verify_interface_exists
 from vyos.template import render_to_string
 from vyos.ifconfig import Interface
 from vyos.utils.dict import dict_search
 from vyos.utils.network import get_interface_config
 from vyos import ConfigError
 from vyos import frr
 from vyos import airbag
 airbag.enable()
 
 def get_config(config=None):
     if config:
         conf = config
     else:
         conf = Config()
 
     vrf = None
     if len(argv) > 1:
         vrf = argv[1]
 
     base_path = ['protocols', 'ospfv3']
 
     # eqivalent of the C foo ? 'a' : 'b' statement
     base = vrf and ['vrf', 'name', vrf, 'protocols', 'ospfv3'] or base_path
     ospfv3 = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True)
 
     # Assign the name of our VRF context. This MUST be done before the return
     # statement below, else on deletion we will delete the default instance
     # instead of the VRF instance.
     if vrf: ospfv3['vrf'] = vrf
 
     # FRR has VRF support for different routing daemons. As interfaces belong
     # to VRFs - or the global VRF, we need to check for changed interfaces so
     # that they will be properly rendered for the FRR config. Also this eases
     # removal of interfaces from the running configuration.
     interfaces_removed = node_changed(conf, base + ['interface'])
     if interfaces_removed:
         ospfv3['interface_removed'] = list(interfaces_removed)
 
     # Bail out early if configuration tree does no longer exist. this must
     # be done after retrieving the list of interfaces to be removed.
     if not conf.exists(base):
         ospfv3.update({'deleted' : ''})
         return ospfv3
 
     # We have gathered the dict representation of the CLI, but there are default
     # options which we need to update into the dictionary retrived.
     default_values = conf.get_config_defaults(**ospfv3.kwargs,
                                               recursive=True)
 
     # We have to cleanup the default dict, as default values could enable features
     # which are not explicitly enabled on the CLI. Example: default-information
     # originate comes with a default metric-type of 2, which will enable the
     # entire default-information originate tree, even when not set via CLI so we
     # need to check this first and probably drop that key.
     if dict_search('default_information.originate', ospfv3) is None:
         del default_values['default_information']
     if 'graceful_restart' not in ospfv3:
         del default_values['graceful_restart']
 
     for protocol in ['babel', 'bgp', 'connected', 'isis', 'kernel', 'ripng', 'static']:
         if dict_search(f'redistribute.{protocol}', ospfv3) is None:
             del default_values['redistribute'][protocol]
     if not bool(default_values['redistribute']):
         del default_values['redistribute']
 
     default_values.pop('interface', {})
 
     # merge in remaining default values
     ospfv3 = config_dict_merge(default_values, ospfv3)
 
     # We also need some additional information from the config, prefix-lists
     # and route-maps for instance. They will be used in verify().
     #
     # XXX: one MUST always call this without the key_mangling() option! See
     # vyos.configverify.verify_common_route_maps() for more information.
     tmp = conf.get_config_dict(['policy'])
     # Merge policy dict into "regular" config dict
     ospfv3 = dict_merge(tmp, ospfv3)
 
     return ospfv3
 
 def verify(ospfv3):
     if not ospfv3:
         return None
 
     verify_common_route_maps(ospfv3)
 
     # As we can have a default-information route-map, we need to validate it!
     route_map_name = dict_search('default_information.originate.route_map', ospfv3)
     if route_map_name: verify_route_map(route_map_name, ospfv3)
 
     if 'area' in ospfv3:
         for area, area_config in ospfv3['area'].items():
             if 'area_type' in area_config:
                 if len(area_config['area_type']) > 1:
                     raise ConfigError(f'Can only configure one area-type for OSPFv3 area "{area}"!')
             if 'range' in area_config:
                 for range, range_config in area_config['range'].items():
                     if {'not_advertise', 'advertise'} <= range_config.keys():
                         raise ConfigError(f'"not-advertise" and "advertise" for "range {range}" cannot be both configured at the same time!')
 
     if 'interface' in ospfv3:
         for interface, interface_config in ospfv3['interface'].items():
-            verify_interface_exists(interface)
+            verify_interface_exists(ospfv3, interface)
             if 'ifmtu' in interface_config:
                 mtu = Interface(interface).get_mtu()
                 if int(interface_config['ifmtu']) > int(mtu):
                     raise ConfigError(f'OSPFv3 ifmtu can not exceed physical MTU of "{mtu}"')
 
             # If interface specific options are set, we must ensure that the
             # interface is bound to our requesting VRF. Due to the VyOS
             # priorities the interface is bound to the VRF after creation of
             # the VRF itself, and before any routing protocol is configured.
             if 'vrf' in ospfv3:
                 vrf = ospfv3['vrf']
                 tmp = get_interface_config(interface)
                 if 'master' not in tmp or tmp['master'] != vrf:
                     raise ConfigError(f'Interface "{interface}" is not a member of VRF "{vrf}"!')
 
     return None
 
 def generate(ospfv3):
     if not ospfv3 or 'deleted' in ospfv3:
         return None
 
     ospfv3['new_frr_config'] = render_to_string('frr/ospf6d.frr.j2', ospfv3)
     return None
 
 def apply(ospfv3):
     ospf6_daemon = 'ospf6d'
 
     # Save original configuration prior to starting any commit actions
     frr_cfg = frr.FRRConfig()
 
     # Generate empty helper string which can be ammended to FRR commands, it
     # will be either empty (default VRF) or contain the "vrf <name" statement
     vrf = ''
     if 'vrf' in ospfv3:
         vrf = ' vrf ' + ospfv3['vrf']
 
     frr_cfg.load_configuration(ospf6_daemon)
     frr_cfg.modify_section(f'^router ospf6{vrf}', stop_pattern='^exit', remove_stop_mark=True)
 
     for key in ['interface', 'interface_removed']:
         if key not in ospfv3:
             continue
         for interface in ospfv3[key]:
             frr_cfg.modify_section(f'^interface {interface}', stop_pattern='^exit', remove_stop_mark=True)
 
     if 'new_frr_config' in ospfv3:
         frr_cfg.add_before(frr.default_add_before, ospfv3['new_frr_config'])
 
     frr_cfg.commit_configuration(ospf6_daemon)
 
     return None
 
 if __name__ == '__main__':
     try:
         c = get_config()
         verify(c)
         generate(c)
         apply(c)
     except ConfigError as e:
         print(e)
         exit(1)
diff --git a/src/conf_mode/protocols_pim.py b/src/conf_mode/protocols_pim.py
index d450d11ca..79294a1f0 100755
--- a/src/conf_mode/protocols_pim.py
+++ b/src/conf_mode/protocols_pim.py
@@ -1,172 +1,172 @@
 #!/usr/bin/env python3
 #
 # Copyright (C) 2020-2024 VyOS maintainers and contributors
 #
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License version 2 or later as
 # published by the Free Software Foundation.
 #
 # This program is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 # GNU General Public License for more details.
 #
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 import os
 
 from ipaddress import IPv4Address
 from ipaddress import IPv4Network
 from signal import SIGTERM
 from sys import exit
 
 from vyos.config import Config
 from vyos.config import config_dict_merge
 from vyos.configdict import node_changed
 from vyos.configverify import verify_interface_exists
 from vyos.utils.process import process_named_running
 from vyos.utils.process import call
 from vyos.template import render_to_string
 from vyos import ConfigError
 from vyos import frr
 from vyos import airbag
 airbag.enable()
 
 RESERVED_MC_NET = '224.0.0.0/24'
 
 
 def get_config(config=None):
     if config:
         conf = config
     else:
         conf = Config()
 
     base = ['protocols', 'pim']
 
     pim = conf.get_config_dict(base, key_mangling=('-', '_'),
                                get_first_key=True, no_tag_node_value_mangle=True)
 
     # We can not run both IGMP proxy and PIM at the same time - get IGMP
     # proxy status
     if conf.exists(['protocols', 'igmp-proxy']):
         pim.update({'igmp_proxy_enabled' : {}})
 
     # FRR has VRF support for different routing daemons. As interfaces belong
     # to VRFs - or the global VRF, we need to check for changed interfaces so
     # that they will be properly rendered for the FRR config. Also this eases
     # removal of interfaces from the running configuration.
     interfaces_removed = node_changed(conf, base + ['interface'])
     if interfaces_removed:
         pim['interface_removed'] = list(interfaces_removed)
 
     # Bail out early if configuration tree does no longer exist. this must
     # be done after retrieving the list of interfaces to be removed.
     if not conf.exists(base):
         pim.update({'deleted' : ''})
         return pim
 
     # We have gathered the dict representation of the CLI, but there are default
     # options which we need to update into the dictionary retrived.
     default_values = conf.get_config_defaults(**pim.kwargs, recursive=True)
 
     # We have to cleanup the default dict, as default values could enable features
     # which are not explicitly enabled on the CLI. Example: default-information
     # originate comes with a default metric-type of 2, which will enable the
     # entire default-information originate tree, even when not set via CLI so we
     # need to check this first and probably drop that key.
     for interface in pim.get('interface', []):
         # We need to reload the defaults on every pass b/c of
         # hello-multiplier dependency on dead-interval
         # If hello-multiplier is set, we need to remove the default from
         # dead-interval.
         if 'igmp' not in pim['interface'][interface]:
             del default_values['interface'][interface]['igmp']
 
     pim = config_dict_merge(default_values, pim)
     return pim
 
 def verify(pim):
     if not pim or 'deleted' in pim:
         return None
 
     if 'igmp_proxy_enabled' in pim:
         raise ConfigError('IGMP proxy and PIM cannot be configured at the same time!')
 
     if 'interface' not in pim:
         raise ConfigError('PIM require defined interfaces!')
 
     for interface, interface_config in pim['interface'].items():
-        verify_interface_exists(interface)
+        verify_interface_exists(pim, interface)
 
         # Check join group in reserved net
         if 'igmp' in interface_config and 'join' in interface_config['igmp']:
             for join_addr in interface_config['igmp']['join']:
                 if IPv4Address(join_addr) in IPv4Network(RESERVED_MC_NET):
                     raise ConfigError(f'Groups within {RESERVED_MC_NET} are reserved and cannot be joined!')
 
     if 'rp' in pim:
         if 'address' not in pim['rp']:
             raise ConfigError('PIM rendezvous point needs to be defined!')
 
         # Check unique multicast groups
         unique = []
         pim_base_error = 'PIM rendezvous point group'
         for address, address_config in pim['rp']['address'].items():
             if 'group' not in address_config:
                 raise ConfigError(f'{pim_base_error} should be defined for "{address}"!')
 
             # Check if it is a multicast group
             for gr_addr in address_config['group']:
                 if not IPv4Network(gr_addr).is_multicast:
                     raise ConfigError(f'{pim_base_error} "{gr_addr}" is not a multicast group!')
                 if gr_addr in unique:
                     raise ConfigError(f'{pim_base_error} must be unique!')
                 unique.append(gr_addr)
 
 def generate(pim):
     if not pim or 'deleted' in pim:
         return None
     pim['frr_pimd_config']  = render_to_string('frr/pimd.frr.j2', pim)
     return None
 
 def apply(pim):
     pim_daemon = 'pimd'
     pim_pid = process_named_running(pim_daemon)
 
     if not pim or 'deleted' in pim:
         if 'deleted' in pim:
             os.kill(int(pim_pid), SIGTERM)
 
         return None
 
     if not pim_pid:
         call('/usr/lib/frr/pimd -d -F traditional --daemon -A 127.0.0.1')
 
     # Save original configuration prior to starting any commit actions
     frr_cfg = frr.FRRConfig()
 
     frr_cfg.load_configuration(pim_daemon)
     frr_cfg.modify_section(f'^ip pim')
     frr_cfg.modify_section(f'^ip igmp')
 
     for key in ['interface', 'interface_removed']:
         if key not in pim:
             continue
         for interface in pim[key]:
             frr_cfg.modify_section(f'^interface {interface}', stop_pattern='^exit', remove_stop_mark=True)
 
     if 'frr_pimd_config' in pim:
         frr_cfg.add_before(frr.default_add_before, pim['frr_pimd_config'])
     frr_cfg.commit_configuration(pim_daemon)
     return None
 
 if __name__ == '__main__':
     try:
         c = get_config()
         verify(c)
         generate(c)
         apply(c)
     except ConfigError as e:
         print(e)
         exit(1)
diff --git a/src/conf_mode/protocols_pim6.py b/src/conf_mode/protocols_pim6.py
index 2003a1014..581ffe238 100755
--- a/src/conf_mode/protocols_pim6.py
+++ b/src/conf_mode/protocols_pim6.py
@@ -1,133 +1,133 @@
 #!/usr/bin/env python3
 #
 # Copyright (C) 2023 VyOS maintainers and contributors
 #
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License version 2 or later as
 # published by the Free Software Foundation.
 #
 # This program is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 # GNU General Public License for more details.
 #
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 from ipaddress import IPv6Address
 from ipaddress import IPv6Network
 from sys import exit
 
 from vyos.config import Config
 from vyos.config import config_dict_merge
 from vyos.configdict import node_changed
 from vyos.configverify import verify_interface_exists
 from vyos.template import render_to_string
 from vyos import ConfigError
 from vyos import frr
 from vyos import airbag
 airbag.enable()
 
 def get_config(config=None):
     if config:
         conf = config
     else:
         conf = Config()
     base = ['protocols', 'pim6']
     pim6 = conf.get_config_dict(base, key_mangling=('-', '_'),
                                  get_first_key=True, with_recursive_defaults=True)
 
     # FRR has VRF support for different routing daemons. As interfaces belong
     # to VRFs - or the global VRF, we need to check for changed interfaces so
     # that they will be properly rendered for the FRR config. Also this eases
     # removal of interfaces from the running configuration.
     interfaces_removed = node_changed(conf, base + ['interface'])
     if interfaces_removed:
         pim6['interface_removed'] = list(interfaces_removed)
 
     # Bail out early if configuration tree does no longer exist. this must
     # be done after retrieving the list of interfaces to be removed.
     if not conf.exists(base):
         pim6.update({'deleted' : ''})
         return pim6
 
     # We have gathered the dict representation of the CLI, but there are default
     # options which we need to update into the dictionary retrived.
     default_values = conf.get_config_defaults(**pim6.kwargs, recursive=True)
 
     pim6 = config_dict_merge(default_values, pim6)
     return pim6
 
 def verify(pim6):
     if not pim6 or 'deleted' in pim6:
         return
 
     for interface, interface_config in pim6.get('interface', {}).items():
-        verify_interface_exists(interface)
+        verify_interface_exists(pim6, interface)
         if 'mld' in interface_config:
             mld = interface_config['mld']
             for group in mld.get('join', {}).keys():
                 # Validate multicast group address
                 if not IPv6Address(group).is_multicast:
                     raise ConfigError(f"{group} is not a multicast group")
 
     if 'rp' in pim6:
         if 'address' not in pim6['rp']:
             raise ConfigError('PIM6 rendezvous point needs to be defined!')
 
         # Check unique multicast groups
         unique = []
         pim_base_error = 'PIM6 rendezvous point group'
 
         if {'address', 'prefix-list6'} <= set(pim6['rp']):
             raise ConfigError(f'{pim_base_error} supports either address or a prefix-list!')
 
         for address, address_config in pim6['rp']['address'].items():
             if 'group' not in address_config:
                 raise ConfigError(f'{pim_base_error} should be defined for "{address}"!')
 
             # Check if it is a multicast group
             for gr_addr in address_config['group']:
                 if not IPv6Network(gr_addr).is_multicast:
                     raise ConfigError(f'{pim_base_error} "{gr_addr}" is not a multicast group!')
                 if gr_addr in unique:
                     raise ConfigError(f'{pim_base_error} must be unique!')
                 unique.append(gr_addr)
 
 def generate(pim6):
     if not pim6 or 'deleted' in pim6:
         return
     pim6['new_frr_config'] = render_to_string('frr/pim6d.frr.j2', pim6)
     return None
 
 def apply(pim6):
     if pim6 is None:
         return
 
     pim6_daemon = 'pim6d'
 
     # Save original configuration prior to starting any commit actions
     frr_cfg = frr.FRRConfig()
 
     frr_cfg.load_configuration(pim6_daemon)
 
     for key in ['interface', 'interface_removed']:
         if key not in pim6:
             continue
         for interface in pim6[key]:
             frr_cfg.modify_section(f'^interface {interface}', stop_pattern='^exit', remove_stop_mark=True)
 
     if 'new_frr_config' in pim6:
         frr_cfg.add_before(frr.default_add_before, pim6['new_frr_config'])
     frr_cfg.commit_configuration(pim6_daemon)
     return None
 
 if __name__ == '__main__':
     try:
         c = get_config()
         verify(c)
         generate(c)
         apply(c)
     except ConfigError as e:
         print(e)
         exit(1)
diff --git a/src/conf_mode/qos.py b/src/conf_mode/qos.py
index a97a09ba0..2445b8515 100755
--- a/src/conf_mode/qos.py
+++ b/src/conf_mode/qos.py
@@ -1,265 +1,265 @@
 #!/usr/bin/env python3
 #
 # Copyright (C) 2023-2024 VyOS maintainers and contributors
 #
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License version 2 or later as
 # published by the Free Software Foundation.
 #
 # This program is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 # GNU General Public License for more details.
 #
 # You should have received a copy of the GNU General Public License
 # along with this program. If not, see <http://www.gnu.org/licenses/>.
 
 from sys import exit
 from netifaces import interfaces
 
 from vyos.config import Config
 from vyos.configdep import set_dependents
 from vyos.configdep import call_dependents
 from vyos.configdict import dict_merge
 from vyos.configverify import verify_interface_exists
 from vyos.ifconfig import Section
 from vyos.qos import CAKE
 from vyos.qos import DropTail
 from vyos.qos import FairQueue
 from vyos.qos import FQCodel
 from vyos.qos import Limiter
 from vyos.qos import NetEm
 from vyos.qos import Priority
 from vyos.qos import RandomDetect
 from vyos.qos import RateLimiter
 from vyos.qos import RoundRobin
 from vyos.qos import TrafficShaper
 from vyos.qos import TrafficShaperHFSC
 from vyos.utils.dict import dict_search_recursive
 from vyos.utils.process import run
 from vyos import ConfigError
 from vyos import airbag
 from vyos.xml_ref import relative_defaults
 
 
 airbag.enable()
 
 map_vyops_tc = {
     'cake'             : CAKE,
     'drop_tail'        : DropTail,
     'fair_queue'       : FairQueue,
     'fq_codel'         : FQCodel,
     'limiter'          : Limiter,
     'network_emulator' : NetEm,
     'priority_queue'   : Priority,
     'random_detect'    : RandomDetect,
     'rate_control'     : RateLimiter,
     'round_robin'      : RoundRobin,
     'shaper'           : TrafficShaper,
     'shaper_hfsc'      : TrafficShaperHFSC,
 }
 
 def get_shaper(qos, interface_config, direction):
     policy_name = interface_config[direction]
     # An interface might have a QoS configuration, search the used
     # configuration referenced by this. Path will hold the dict element
     # referenced by the config, as this will be of sort:
     #
     # ['policy', 'drop_tail', 'foo-dtail'] <- we are only interested in
     # drop_tail as the policy/shaper type
     _, path = next(dict_search_recursive(qos, policy_name))
     shaper_type = path[1]
     shaper_config = qos['policy'][shaper_type][policy_name]
 
     return (map_vyops_tc[shaper_type], shaper_config)
 
 
 def _clean_conf_dict(conf):
     """
     Delete empty nodes from config e.g.
         match ADDRESS30 {
             ip {
                 source {}
             }
         }
     """
     if isinstance(conf, dict):
         return {node: _clean_conf_dict(val) for node, val in conf.items() if val != {} and _clean_conf_dict(val) != {}}
     else:
         return conf
 
 
 def get_config(config=None):
     if config:
         conf = config
     else:
         conf = Config()
     base = ['qos']
     if not conf.exists(base):
         return None
 
     qos = conf.get_config_dict(base, key_mangling=('-', '_'),
                                get_first_key=True,
                                no_tag_node_value_mangle=True)
 
     for ifname in interfaces():
         if_node = Section.get_config_path(ifname)
 
         if not if_node:
             continue
 
         path = f'interfaces {if_node}'
         if conf.exists(f'{path} mirror') or conf.exists(f'{path} redirect'):
             type_node = path.split(" ")[1] # return only interface type node
             set_dependents(type_node, conf, ifname.split(".")[0])
 
     for policy in qos.get('policy', []):
         if policy in ['random_detect']:
             for rd_name in list(qos['policy'][policy]):
                 # There are eight precedence levels - ensure all are present
                 # to be filled later down with the appropriate default values
                 default_p_val = relative_defaults(
                     ['qos', 'policy', 'random-detect', rd_name, 'precedence'],
                     {'precedence': {'0': {}}},
                     get_first_key=True, recursive=True
                 )['0']
                 default_p_val = {key.replace('-', '_'): value for key, value in default_p_val.items()}
                 default_precedence = {
                     'precedence': {'0': default_p_val, '1': default_p_val,
                                    '2': default_p_val, '3': default_p_val,
                                    '4': default_p_val, '5': default_p_val,
                                    '6': default_p_val, '7': default_p_val}}
 
                 qos['policy']['random_detect'][rd_name] = dict_merge(
                     default_precedence, qos['policy']['random_detect'][rd_name])
 
     qos = conf.merge_defaults(qos, recursive=True)
 
     for policy in qos.get('policy', []):
         for p_name, p_config in qos['policy'][policy].items():
             # cleanup empty match config
             if 'class' in p_config:
                 for cls, cls_config in p_config['class'].items():
                     if 'match' in cls_config:
                         cls_config['match'] = _clean_conf_dict(cls_config['match'])
                         if cls_config['match'] == {}:
                             del cls_config['match']
 
     return qos
 
 def verify(qos):
     if not qos or 'interface' not in qos:
         return None
 
     # network policy emulator
     # reorder rerquires delay to be set
     if 'policy' in qos:
         for policy_type in qos['policy']:
             for policy, policy_config in qos['policy'][policy_type].items():
                 # a policy with it's given name is only allowed to exist once
                 # on the system. This is because an interface selects a policy
                 # for ingress/egress traffic, and thus there can only be one
                 # policy with a given name.
                 #
                 # We check if the policy name occurs more then once - error out
                 # if this is true
                 counter = 0
                 for _, path in dict_search_recursive(qos['policy'], policy):
                     counter += 1
                     if counter > 1:
                         raise ConfigError(f'Conflicting policy name "{policy}", already in use!')
 
                 if 'class' in policy_config:
                     for cls, cls_config in policy_config['class'].items():
                         # bandwidth is not mandatory for priority-queue - that is why this is on the exception list
                         if 'bandwidth' not in cls_config and policy_type not in ['priority_queue', 'round_robin', 'shaper_hfsc']:
                             raise ConfigError(f'Bandwidth must be defined for policy "{policy}" class "{cls}"!')
                     if 'match' in cls_config:
                         for match, match_config in cls_config['match'].items():
                             if {'ip', 'ipv6'} <= set(match_config):
                                  raise ConfigError(f'Can not use both IPv6 and IPv4 in one match ({match})!')
 
                 if policy_type in ['random_detect']:
                     if 'precedence' in policy_config:
                         for precedence, precedence_config in policy_config['precedence'].items():
                             max_tr = int(precedence_config['maximum_threshold'])
                             if {'maximum_threshold', 'minimum_threshold'} <= set(precedence_config):
                                 min_tr = int(precedence_config['minimum_threshold'])
                                 if min_tr >= max_tr:
                                     raise ConfigError(f'Policy "{policy}" uses min-threshold "{min_tr}" >= max-threshold "{max_tr}"!')
 
                             if {'maximum_threshold', 'queue_limit'} <= set(precedence_config):
                                 queue_lim = int(precedence_config['queue_limit'])
                                 if queue_lim < max_tr:
                                     raise ConfigError(f'Policy "{policy}" uses queue-limit "{queue_lim}" < max-threshold "{max_tr}"!')
                 if policy_type in ['priority_queue']:
                     if 'default' not in policy_config:
                         raise ConfigError(f'Policy {policy} misses "default" class!')
                 if 'default' in policy_config:
                     if 'bandwidth' not in policy_config['default'] and policy_type not in ['priority_queue', 'round_robin', 'shaper_hfsc']:
                         raise ConfigError('Bandwidth not defined for default traffic!')
 
     # we should check interface ingress/egress configuration after verifying that
     # the policy name is used only once - this makes the logic easier!
     for interface, interface_config in qos['interface'].items():
         for direction in ['egress', 'ingress']:
             # bail out early if shaper for given direction is not used at all
             if direction not in interface_config:
                 continue
 
             policy_name = interface_config[direction]
             if 'policy' not in qos or list(dict_search_recursive(qos['policy'], policy_name)) == []:
                 raise ConfigError(f'Selected QoS policy "{policy_name}" does not exist!')
 
             shaper_type, shaper_config = get_shaper(qos, interface_config, direction)
             tmp = shaper_type(interface).get_direction()
             if direction not in tmp:
                 raise ConfigError(f'Selected QoS policy on interface "{interface}" only supports "{tmp}"!')
 
     return None
 
 def generate(qos):
     if not qos or 'interface' not in qos:
         return None
 
     return None
 
 def apply(qos):
     # Always delete "old" shapers first
     for interface in interfaces():
         # Ignore errors (may have no qdisc)
         run(f'tc qdisc del dev {interface} parent ffff:')
         run(f'tc qdisc del dev {interface} root')
 
     call_dependents()
 
     if not qos or 'interface' not in qos:
         return None
 
     for interface, interface_config in qos['interface'].items():
-        if not verify_interface_exists(interface, state_required=True, warning_only=True):
+        if not verify_interface_exists(qos, interface, state_required=True, warning_only=True):
             # When shaper is bound to a dialup (e.g. PPPoE) interface it is
             # possible that it is yet not availbale when to QoS code runs.
             # Skip the configuration and inform the user via warning_only=True
             continue
 
         for direction in ['egress', 'ingress']:
             # bail out early if shaper for given direction is not used at all
             if direction not in interface_config:
                 continue
 
             shaper_type, shaper_config = get_shaper(qos, interface_config, direction)
             tmp = shaper_type(interface)
             tmp.update(shaper_config, direction)
 
     return None
 
 if __name__ == '__main__':
     try:
         c = get_config()
         verify(c)
         generate(c)
         apply(c)
     except ConfigError as e:
         print(e)
         exit(1)
diff --git a/src/conf_mode/service_broadcast-relay.py b/src/conf_mode/service_broadcast-relay.py
index 31c552f5a..d35954718 100755
--- a/src/conf_mode/service_broadcast-relay.py
+++ b/src/conf_mode/service_broadcast-relay.py
@@ -1,111 +1,111 @@
 #!/usr/bin/env python3
 #
 # Copyright (C) 2017-2023 VyOS maintainers and contributors
 #
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License version 2 or later as
 # published by the Free Software Foundation.
 #
 # This program is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 # GNU General Public License for more details.
 #
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 import os
 
 from glob import glob
 from netifaces import AF_INET
 from sys import exit
 
 from vyos.config import Config
 from vyos.configverify import verify_interface_exists
 from vyos.template import render
 from vyos.utils.process import call
 from vyos.utils.network import is_afi_configured
 from vyos import ConfigError
 from vyos import airbag
 airbag.enable()
 
 config_file_base = r'/etc/default/udp-broadcast-relay'
 
 def get_config(config=None):
     if config:
         conf = config
     else:
         conf = Config()
     base = ['service', 'broadcast-relay']
 
     relay = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True)
     return relay
 
 def verify(relay):
     if not relay or 'disabled' in relay:
         return None
 
     for instance, config in relay.get('id', {}).items():
         # we don't have to check this instance when it's disabled
         if 'disabled' in config:
             continue
 
         # we certainly require a UDP port to listen to
         if 'port' not in config:
             raise ConfigError(f'Port number is mandatory for UDP broadcast relay "{instance}"')
 
         # Relaying data without two interface is kinda senseless ...
         if len(config.get('interface', [])) < 2:
             raise ConfigError('At least two interfaces are required for UDP broadcast relay "{instance}"')
 
         for interface in config.get('interface', []):
-            verify_interface_exists(interface)
+            verify_interface_exists(relay, interface)
             if not is_afi_configured(interface, AF_INET):
                 raise ConfigError(f'Interface "{interface}" has no IPv4 address configured!')
 
     return None
 
 def generate(relay):
     if not relay or 'disabled' in relay:
         return None
 
     for config in glob(config_file_base + '*'):
         os.remove(config)
 
     for instance, config in relay.get('id').items():
         # we don't have to check this instance when it's disabled
         if 'disabled' in config:
             continue
 
         config['instance'] = instance
         render(config_file_base + instance, 'bcast-relay/udp-broadcast-relay.j2',
                config)
 
     return None
 
 def apply(relay):
     # first stop all running services
     call('systemctl stop udp-broadcast-relay@*.service')
 
     if not relay or 'disable' in relay:
         return None
 
     # start only required service instances
     for instance, config in relay.get('id').items():
         # we don't have to check this instance when it's disabled
         if 'disabled' in config:
             continue
 
         call(f'systemctl start udp-broadcast-relay@{instance}.service')
 
     return None
 
 if __name__ == '__main__':
     try:
         c = get_config()
         verify(c)
         generate(c)
         apply(c)
     except ConfigError as e:
         print(e)
         exit(1)
diff --git a/src/conf_mode/service_conntrack-sync.py b/src/conf_mode/service_conntrack-sync.py
index 4fb2ce27f..3a233a172 100755
--- a/src/conf_mode/service_conntrack-sync.py
+++ b/src/conf_mode/service_conntrack-sync.py
@@ -1,141 +1,141 @@
 #!/usr/bin/env python3
 #
 # Copyright (C) 2021 VyOS maintainers and contributors
 #
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License version 2 or later as
 # published by the Free Software Foundation.
 #
 # This program is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 # GNU General Public License for more details.
 #
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 import os
 
 from sys import exit
 from vyos.config import Config
 from vyos.configverify import verify_interface_exists
 from vyos.utils.dict import dict_search
 from vyos.utils.process import process_named_running
 from vyos.utils.file import read_file
 from vyos.utils.process import call
 from vyos.utils.process import run
 from vyos.template import render
 from vyos.template import get_ipv4
 from vyos.utils.network import is_addr_assigned
 from vyos import ConfigError
 from vyos import airbag
 airbag.enable()
 
 config_file = '/run/conntrackd/conntrackd.conf'
 
 def resync_vrrp():
     tmp = run('/usr/libexec/vyos/conf_mode/high-availability.py')
     if tmp > 0:
         print('ERROR: error restarting VRRP daemon!')
 
 def get_config(config=None):
     if config:
         conf = config
     else:
         conf = Config()
     base = ['service', 'conntrack-sync']
     if not conf.exists(base):
         return None
 
     conntrack = conf.get_config_dict(base, key_mangling=('-', '_'),
                                      get_first_key=True, with_defaults=True)
 
     conntrack['hash_size'] = read_file('/sys/module/nf_conntrack/parameters/hashsize')
     conntrack['table_size'] = read_file('/proc/sys/net/netfilter/nf_conntrack_max')
 
     conntrack['vrrp'] = conf.get_config_dict(['high-availability', 'vrrp', 'sync-group'],
                                      get_first_key=True)
 
     return conntrack
 
 def verify(conntrack):
     if not conntrack:
         return None
 
     if 'interface' not in conntrack:
         raise ConfigError('Interface not defined!')
 
     has_peer = False
     for interface, interface_config in conntrack['interface'].items():
-        verify_interface_exists(interface)
+        verify_interface_exists(conntrack, interface)
         # Interface must not only exist, it must also carry an IP address
         if len(get_ipv4(interface)) < 1:
             raise ConfigError(f'Interface {interface} requires an IP address!')
         if 'peer' in interface_config:
             has_peer = True
 
     # If one interface runs in unicast mode instead of multicast, so must all the
     # others, else conntrackd will error out with: "cannot use UDP with other
     # dedicated link protocols"
     if has_peer:
         for interface, interface_config in conntrack['interface'].items():
             if 'peer' not in interface_config:
                 raise ConfigError('Can not mix unicast and multicast mode!')
 
     if 'expect_sync' in conntrack:
         if len(conntrack['expect_sync']) > 1 and 'all' in conntrack['expect_sync']:
             raise ConfigError('Can not configure expect-sync "all" with other protocols!')
 
     if 'listen_address' in conntrack:
         for address in conntrack['listen_address']:
             if not is_addr_assigned(address):
                 raise ConfigError(f'Specified listen-address {address} not assigned to any interface!')
 
     vrrp_group = dict_search('failover_mechanism.vrrp.sync_group', conntrack)
     if vrrp_group == None:
         raise ConfigError(f'No VRRP sync-group defined!')
     if vrrp_group not in conntrack['vrrp']:
         raise ConfigError(f'VRRP sync-group {vrrp_group} not configured!')
 
     return None
 
 def generate(conntrack):
     if not conntrack:
         if os.path.isfile(config_file):
             os.unlink(config_file)
         return None
 
     render(config_file, 'conntrackd/conntrackd.conf.j2', conntrack)
 
     return None
 
 def apply(conntrack):
     systemd_service = 'conntrackd.service'
     if not conntrack:
         # Failover mechanism daemon should be indicated that it no longer needs
         # to execute conntrackd actions on transition. This is only required
         # once when conntrackd is stopped and taken out of service!
         if process_named_running('conntrackd'):
             resync_vrrp()
 
         call(f'systemctl stop {systemd_service}')
         return None
 
     # Failover mechanism daemon should be indicated that it needs to execute
     # conntrackd actions on transition. This is only required once when conntrackd
     # is started the first time!
     if not process_named_running('conntrackd'):
         resync_vrrp()
 
     call(f'systemctl reload-or-restart {systemd_service}')
     return None
 
 if __name__ == '__main__':
     try:
         c = get_config()
         verify(c)
         generate(c)
         apply(c)
     except ConfigError as e:
         print(e)
         exit(1)
diff --git a/src/conf_mode/service_dns_dynamic.py b/src/conf_mode/service_dns_dynamic.py
index a551a9891..5f5303856 100755
--- a/src/conf_mode/service_dns_dynamic.py
+++ b/src/conf_mode/service_dns_dynamic.py
@@ -1,192 +1,192 @@
 #!/usr/bin/env python3
 #
 # Copyright (C) 2018-2024 VyOS maintainers and contributors
 #
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License version 2 or later as
 # published by the Free Software Foundation.
 #
 # This program is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 # GNU General Public License for more details.
 #
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 import os
 import re
 from sys import exit
 
 from vyos.base import Warning
 from vyos.config import Config
 from vyos.configverify import verify_interface_exists
 from vyos.configverify import dynamic_interface_pattern
 from vyos.template import render
 from vyos.utils.process import call
 from vyos.utils.network import interface_exists
 from vyos import ConfigError
 from vyos import airbag
 airbag.enable()
 
 config_file = r'/run/ddclient/ddclient.conf'
 systemd_override = r'/run/systemd/system/ddclient.service.d/override.conf'
 
 # Protocols that require zone
 zone_necessary = ['cloudflare', 'digitalocean', 'godaddy', 'hetzner', 'gandi',
                   'nfsn', 'nsupdate']
 zone_supported = zone_necessary + ['dnsexit2', 'zoneedit1']
 
 # Protocols that do not require username
 username_unnecessary = ['1984', 'cloudflare', 'cloudns', 'digitalocean', 'dnsexit2',
                         'duckdns', 'freemyip', 'hetzner', 'keysystems', 'njalla',
                         'nsupdate', 'regfishde']
 
 # Protocols that support TTL
 ttl_supported = ['cloudflare', 'dnsexit2', 'gandi', 'hetzner', 'godaddy', 'nfsn',
                  'nsupdate']
 
 # Protocols that support both IPv4 and IPv6
 dualstack_supported = ['cloudflare', 'digitalocean', 'dnsexit2', 'duckdns',
                        'dyndns2', 'easydns', 'freedns', 'hetzner', 'infomaniak',
                        'njalla']
 
 # dyndns2 protocol in ddclient honors dual stack for selective servers
 # because of the way it is implemented in ddclient
 dyndns_dualstack_servers = ['members.dyndns.org', 'dynv6.com']
 
 def get_config(config=None):
     if config:
         conf = config
     else:
         conf = Config()
 
     base = ['service', 'dns', 'dynamic']
     if not conf.exists(base):
         return None
 
     dyndns = conf.get_config_dict(base, key_mangling=('-', '_'),
                                   no_tag_node_value_mangle=True,
                                   get_first_key=True,
                                   with_recursive_defaults=True)
 
     dyndns['config_file'] = config_file
     return dyndns
 
 def verify(dyndns):
     # bail out early - looks like removal from running config
     if not dyndns or 'name' not in dyndns:
         return None
 
     # Dynamic DNS service provider - configuration validation
     for service, config in dyndns['name'].items():
         error_msg_req = f'is required for Dynamic DNS service "{service}"'
         error_msg_uns = f'is not supported for Dynamic DNS service "{service}"'
 
         for field in ['protocol', 'address', 'host_name']:
             if field not in config:
                 raise ConfigError(f'"{field.replace("_", "-")}" {error_msg_req}')
 
         if not any(x in config['address'] for x in ['interface', 'web']):
             raise ConfigError(f'Either "interface" or "web" {error_msg_req} '
                               f'with protocol "{config["protocol"]}"')
         if all(x in config['address'] for x in ['interface', 'web']):
             raise ConfigError(f'Both "interface" and "web" at the same time {error_msg_uns} '
                               f'with protocol "{config["protocol"]}"')
 
         # If dyndns address is an interface, ensure that the interface exists
         # and warn if a non-active dynamic interface is used
         if 'interface' in config['address']:
             tmp = re.compile(dynamic_interface_pattern)
             # exclude check interface for dynamic interfaces
             if tmp.match(config['address']['interface']):
                 if not interface_exists(config['address']['interface']):
                     Warning(f'Interface "{config["address"]["interface"]}" does not exist yet and '
                             f'cannot be used for Dynamic DNS service "{service}" until it is up!')
             else:
-                verify_interface_exists(config['address']['interface'])
+                verify_interface_exists(dyndns, config['address']['interface'])
 
         if 'web' in config['address']:
             # If 'skip' is specified, 'url' is required as well
             if 'skip' in config['address']['web'] and 'url' not in config['address']['web']:
                 raise ConfigError(f'"url" along with "skip" {error_msg_req} '
                                   f'with protocol "{config["protocol"]}"')
             if 'url' in config['address']['web']:
                 # Warn if using checkip.dyndns.org, as it does not support HTTPS
                 # See: https://github.com/ddclient/ddclient/issues/597
                 if re.search("^(https?://)?checkip\.dyndns\.org", config['address']['web']['url']):
                     Warning(f'"checkip.dyndns.org" does not support HTTPS requests for IP address '
                             f'lookup. Please use a different IP address lookup service.')
 
         # RFC2136 uses 'key' instead of 'password'
         if config['protocol'] != 'nsupdate' and 'password' not in config:
             raise ConfigError(f'"password" {error_msg_req}')
 
         # Other RFC2136 specific configuration validation
         if config['protocol'] == 'nsupdate':
             if 'password' in config:
                 raise ConfigError(f'"password" {error_msg_uns} with protocol "{config["protocol"]}"')
             for field in ['server', 'key']:
                 if field not in config:
                     raise ConfigError(f'"{field}" {error_msg_req} with protocol "{config["protocol"]}"')
 
         if config['protocol'] in zone_necessary and 'zone' not in config:
             raise ConfigError(f'"zone" {error_msg_req} with protocol "{config["protocol"]}"')
 
         if config['protocol'] not in zone_supported and 'zone' in config:
             raise ConfigError(f'"zone" {error_msg_uns} with protocol "{config["protocol"]}"')
 
         if config['protocol'] not in username_unnecessary and 'username' not in config:
             raise ConfigError(f'"username" {error_msg_req} with protocol "{config["protocol"]}"')
 
         if config['protocol'] not in ttl_supported and 'ttl' in config:
             raise ConfigError(f'"ttl" {error_msg_uns} with protocol "{config["protocol"]}"')
 
         if config['ip_version'] == 'both':
             if config['protocol'] not in dualstack_supported:
                 raise ConfigError(f'Both IPv4 and IPv6 at the same time {error_msg_uns} '
                                   f'with protocol "{config["protocol"]}"')
             # dyndns2 protocol in ddclient honors dual stack only for dyn.com (dyndns.org)
             if config['protocol'] == 'dyndns2' and 'server' in config and config['server'] not in dyndns_dualstack_servers:
                 raise ConfigError(f'Both IPv4 and IPv6 at the same time {error_msg_uns} '
                                   f'for "{config["server"]}" with protocol "{config["protocol"]}"')
 
         if {'wait_time', 'expiry_time'} <= config.keys() and int(config['expiry_time']) < int(config['wait_time']):
                 raise ConfigError(f'"expiry-time" must be greater than "wait-time" for '
                                   f'Dynamic DNS service "{service}"')
 
     return None
 
 def generate(dyndns):
     # bail out early - looks like removal from running config
     if not dyndns or 'name' not in dyndns:
         return None
 
     render(config_file, 'dns-dynamic/ddclient.conf.j2', dyndns, permission=0o600)
     render(systemd_override, 'dns-dynamic/override.conf.j2', dyndns)
     return None
 
 def apply(dyndns):
     systemd_service = 'ddclient.service'
     # Reload systemd manager configuration
     call('systemctl daemon-reload')
 
     # bail out early - looks like removal from running config
     if not dyndns or 'name' not in dyndns:
         call(f'systemctl stop {systemd_service}')
         if os.path.exists(config_file):
             os.unlink(config_file)
     else:
         call(f'systemctl reload-or-restart {systemd_service}')
 
     return None
 
 if __name__ == '__main__':
     try:
         c = get_config()
         verify(c)
         generate(c)
         apply(c)
     except ConfigError as e:
         print(e)
         exit(1)
diff --git a/src/conf_mode/service_ipoe-server.py b/src/conf_mode/service_ipoe-server.py
index 28b7fb03c..16c82e591 100755
--- a/src/conf_mode/service_ipoe-server.py
+++ b/src/conf_mode/service_ipoe-server.py
@@ -1,114 +1,114 @@
 #!/usr/bin/env python3
 #
 # Copyright (C) 2018-2024 VyOS maintainers and contributors
 #
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License version 2 or later as
 # published by the Free Software Foundation.
 #
 # This program is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 # GNU General Public License for more details.
 #
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 import os
 
 from sys import exit
 
 from vyos.config import Config
 from vyos.configdict import get_accel_dict
 from vyos.configverify import verify_interface_exists
 from vyos.template import render
 from vyos.utils.process import call
 from vyos.utils.dict import dict_search
 from vyos.accel_ppp_util import get_pools_in_order
 from vyos.accel_ppp_util import verify_accel_ppp_name_servers
 from vyos.accel_ppp_util import verify_accel_ppp_wins_servers
 from vyos.accel_ppp_util import verify_accel_ppp_ip_pool
 from vyos.accel_ppp_util import verify_accel_ppp_authentication
 from vyos import ConfigError
 from vyos import airbag
 airbag.enable()
 
 
 ipoe_conf = '/run/accel-pppd/ipoe.conf'
 ipoe_chap_secrets = '/run/accel-pppd/ipoe.chap-secrets'
 
 
 def get_config(config=None):
     if config:
         conf = config
     else:
         conf = Config()
     base = ['service', 'ipoe-server']
     if not conf.exists(base):
         return None
 
     # retrieve common dictionary keys
     ipoe = get_accel_dict(conf, base, ipoe_chap_secrets)
 
     if dict_search('client_ip_pool', ipoe):
         # Multiple named pools require ordered values T5099
         ipoe['ordered_named_pools'] = get_pools_in_order(dict_search('client_ip_pool', ipoe))
 
     ipoe['server_type'] = 'ipoe'
     return ipoe
 
 
 def verify(ipoe):
     if not ipoe:
         return None
 
     if 'interface' not in ipoe:
         raise ConfigError('No IPoE interface configured')
 
     for interface, iface_config in ipoe['interface'].items():
-        verify_interface_exists(interface, warning_only=True)
+        verify_interface_exists(ipoe, interface, warning_only=True)
         if 'client_subnet' in iface_config and 'vlan' in iface_config:
             raise ConfigError('Option "client-subnet" and "vlan" are mutually exclusive, '
                               'use "client-ip-pool" instead!')
 
     verify_accel_ppp_authentication(ipoe, local_users=False)
     verify_accel_ppp_ip_pool(ipoe)
     verify_accel_ppp_name_servers(ipoe)
     verify_accel_ppp_wins_servers(ipoe)
 
     return None
 
 
 def generate(ipoe):
     if not ipoe:
         return None
 
     render(ipoe_conf, 'accel-ppp/ipoe.config.j2', ipoe)
 
     if dict_search('authentication.mode', ipoe) == 'local':
         render(ipoe_chap_secrets, 'accel-ppp/chap-secrets.ipoe.j2',
                ipoe, permission=0o640)
     return None
 
 
 def apply(ipoe):
     systemd_service = 'accel-ppp@ipoe.service'
     if ipoe == None:
         call(f'systemctl stop {systemd_service}')
         for file in [ipoe_conf, ipoe_chap_secrets]:
             if os.path.exists(file):
                 os.unlink(file)
 
         return None
 
     call(f'systemctl reload-or-restart {systemd_service}')
 
 if __name__ == '__main__':
     try:
         c = get_config()
         verify(c)
         generate(c)
         apply(c)
     except ConfigError as e:
         print(e)
         exit(1)
diff --git a/src/conf_mode/service_mdns_repeater.py b/src/conf_mode/service_mdns_repeater.py
index 207da5e03..b0ece031c 100755
--- a/src/conf_mode/service_mdns_repeater.py
+++ b/src/conf_mode/service_mdns_repeater.py
@@ -1,146 +1,146 @@
 #!/usr/bin/env python3
 #
 # Copyright (C) 2017-2024 VyOS maintainers and contributors
 #
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License version 2 or later as
 # published by the Free Software Foundation.
 #
 # This program is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 # GNU General Public License for more details.
 #
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 import os
 
 from json import loads
 from sys import exit
 from netifaces import ifaddresses, AF_INET, AF_INET6
 
 from vyos.config import Config
 from vyos.configverify import verify_interface_exists
 from vyos.ifconfig.vrrp import VRRP
 from vyos.template import render
 from vyos.utils.process import call
 from vyos import ConfigError
 from vyos import airbag
 airbag.enable()
 
 config_file = '/run/avahi-daemon/avahi-daemon.conf'
 systemd_override = r'/run/systemd/system/avahi-daemon.service.d/override.conf'
 vrrp_running_file = '/run/mdns_vrrp_active'
 
 def get_config(config=None):
     if config:
         conf = config
     else:
         conf = Config()
 
     base = ['service', 'mdns', 'repeater']
     if not conf.exists(base):
         return None
 
     mdns = conf.get_config_dict(base, key_mangling=('-', '_'),
                                 no_tag_node_value_mangle=True,
                                 get_first_key=True,
                                 with_recursive_defaults=True)
 
     if mdns:
         mdns['vrrp_exists'] = conf.exists('high-availability vrrp')
         mdns['config_file'] = config_file
 
     return mdns
 
 def verify(mdns):
     if not mdns or 'disable' in mdns:
         return None
 
     # We need at least two interfaces to repeat mDNS advertisments
     if 'interface' not in mdns or len(mdns['interface']) < 2:
         raise ConfigError('mDNS repeater requires at least 2 configured interfaces!')
 
     # For mdns-repeater to work it is essential that the interfaces has
     # an IPv4 address assigned
     for interface in mdns['interface']:
-        verify_interface_exists(interface)
+        verify_interface_exists(mdns, interface)
 
         if mdns['ip_version'] in ['ipv4', 'both'] and AF_INET not in ifaddresses(interface):
             raise ConfigError('mDNS repeater requires an IPv4 address to be '
                                   f'configured on interface "{interface}"')
 
         if mdns['ip_version'] in ['ipv6', 'both'] and AF_INET6 not in ifaddresses(interface):
             raise ConfigError('mDNS repeater requires an IPv6 address to be '
                                   f'configured on interface "{interface}"')
 
     return None
 
 # Get VRRP states from interfaces, returns only interfaces where state is MASTER
 def get_vrrp_master(interfaces):
     json_data = loads(VRRP.collect('json'))
     for group in json_data:
         if 'data' in group:
             if 'ifp_ifname' in group['data']:
                 iface = group['data']['ifp_ifname']
                 state = group['data']['state'] # 2 = Master
                 if iface in interfaces and state != 2:
                     interfaces.remove(iface)
     return interfaces
 
 def generate(mdns):
     if not mdns:
         return None
 
     if 'disable' in mdns:
         print('Warning: mDNS repeater will be deactivated because it is disabled')
         return None
 
     if mdns['vrrp_exists'] and 'vrrp_disable' in mdns:
         mdns['interface'] = get_vrrp_master(mdns['interface'])
 
         if len(mdns['interface']) < 2:
             return None
 
     render(config_file, 'mdns-repeater/avahi-daemon.conf.j2', mdns)
     render(systemd_override, 'mdns-repeater/override.conf.j2', mdns)
     return None
 
 def apply(mdns):
     systemd_service = 'avahi-daemon.service'
     # Reload systemd manager configuration
     call('systemctl daemon-reload')
 
     if not mdns or 'disable' in mdns:
         call(f'systemctl stop {systemd_service}')
         if os.path.exists(config_file):
             os.unlink(config_file)
 
         if os.path.exists(vrrp_running_file):
             os.unlink(vrrp_running_file)
     else:
         if 'vrrp_disable' not in mdns and os.path.exists(vrrp_running_file):
             os.unlink(vrrp_running_file)
 
         if mdns['vrrp_exists'] and 'vrrp_disable' in mdns:
             if not os.path.exists(vrrp_running_file):
                 os.mknod(vrrp_running_file) # vrrp script looks for this file to update mdns repeater
 
             if len(mdns['interface']) < 2:
                 call(f'systemctl stop {systemd_service}')
                 return None
 
         call(f'systemctl restart {systemd_service}')
 
     return None
 
 if __name__ == '__main__':
     try:
         c = get_config()
         verify(c)
         generate(c)
         apply(c)
     except ConfigError as e:
         print(e)
         exit(1)
diff --git a/src/conf_mode/service_ndp-proxy.py b/src/conf_mode/service_ndp-proxy.py
index aa2374f4c..024ad79f2 100755
--- a/src/conf_mode/service_ndp-proxy.py
+++ b/src/conf_mode/service_ndp-proxy.py
@@ -1,91 +1,91 @@
 #!/usr/bin/env python3
 #
 # Copyright (C) 2023 VyOS maintainers and contributors
 #
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License version 2 or later as
 # published by the Free Software Foundation.
 #
 # This program is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 # GNU General Public License for more details.
 #
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 import os
 
 from sys import exit
 
 from vyos.config import Config
 from vyos.configverify import verify_interface_exists
 from vyos.utils.process import call
 from vyos.template import render
 from vyos import ConfigError
 from vyos import airbag
 airbag.enable()
 
 systemd_service = 'ndppd.service'
 ndppd_config = '/run/ndppd/ndppd.conf'
 
 def get_config(config=None):
     if config:
         conf = config
     else:
         conf = Config()
     base = ['service', 'ndp-proxy']
     if not conf.exists(base):
         return None
 
     ndpp = conf.get_config_dict(base, key_mangling=('-', '_'),
                                 get_first_key=True,
                                 with_recursive_defaults=True)
 
     return ndpp
 
 def verify(ndpp):
     if not ndpp:
         return None
 
     if 'interface' in ndpp:
         for interface, interface_config in ndpp['interface'].items():
-            verify_interface_exists(interface)
+            verify_interface_exists(ndpp, interface)
 
             if 'rule' in interface_config:
                 for rule, rule_config in interface_config['rule'].items():
                     if rule_config['mode'] == 'interface' and 'interface' not in rule_config:
                         raise ConfigError(f'Rule "{rule}" uses interface mode but no interface defined!')
 
                     if rule_config['mode'] != 'interface' and 'interface' in rule_config:
                         if interface_config['mode'] != 'interface' and 'interface' in interface_config:
                             raise ConfigError(f'Rule "{rule}" does not use interface mode, thus interface can not be defined!')
 
     return None
 
 def generate(ndpp):
     if not ndpp:
         return None
 
     render(ndppd_config, 'ndppd/ndppd.conf.j2', ndpp)
     return None
 
 def apply(ndpp):
     if not ndpp:
         call(f'systemctl stop {systemd_service}')
         if os.path.isfile(ndppd_config):
             os.unlink(ndppd_config)
         return None
 
     call(f'systemctl reload-or-restart {systemd_service}')
     return None
 
 if __name__ == '__main__':
     try:
         c = get_config()
         verify(c)
         generate(c)
         apply(c)
     except ConfigError as e:
         print(e)
         exit(1)
diff --git a/src/conf_mode/service_ntp.py b/src/conf_mode/service_ntp.py
index f11690ee6..83880fd72 100755
--- a/src/conf_mode/service_ntp.py
+++ b/src/conf_mode/service_ntp.py
@@ -1,136 +1,136 @@
 #!/usr/bin/env python3
 #
 # Copyright (C) 2018-2024 VyOS maintainers and contributors
 #
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License version 2 or later as
 # published by the Free Software Foundation.
 #
 # This program is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 # GNU General Public License for more details.
 #
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 import os
 
 from vyos.config import Config
 from vyos.configdict import is_node_changed
 from vyos.configverify import verify_vrf
 from vyos.configverify import verify_interface_exists
 from vyos.utils.process import call
 from vyos.utils.permission import chmod_750
 from vyos.utils.network import get_interface_config
 from vyos.template import render
 from vyos.template import is_ipv4
 from vyos import ConfigError
 from vyos import airbag
 airbag.enable()
 
 config_file = r'/run/chrony/chrony.conf'
 systemd_override = r'/run/systemd/system/chrony.service.d/override.conf'
 user_group = '_chrony'
 
 def get_config(config=None):
     if config:
         conf = config
     else:
         conf = Config()
     base = ['service', 'ntp']
     if not conf.exists(base):
         return None
 
     ntp = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True, with_defaults=True)
     ntp['config_file'] = config_file
     ntp['user'] = user_group
 
     tmp = is_node_changed(conf, base + ['vrf'])
     if tmp: ntp.update({'restart_required': {}})
 
     return ntp
 
 def verify(ntp):
     # bail out early - looks like removal from running config
     if not ntp:
         return None
 
     if 'server' not in ntp:
         raise ConfigError('NTP server not configured')
 
     verify_vrf(ntp)
 
     if 'interface' in ntp:
         # If ntpd should listen on a given interface, ensure it exists
         interface = ntp['interface']
-        verify_interface_exists(interface)
+        verify_interface_exists(ntp, interface)
 
         # If we run in a VRF, our interface must belong to this VRF, too
         if 'vrf' in ntp:
             tmp = get_interface_config(interface)
             vrf_name = ntp['vrf']
             if 'master' not in tmp or tmp['master'] != vrf_name:
                 raise ConfigError(f'NTP runs in VRF "{vrf_name}" - "{interface}" '\
                                   f'does not belong to this VRF!')
 
     if 'listen_address' in ntp:
         ipv4_addresses = 0
         ipv6_addresses = 0
         for address in ntp['listen_address']:
             if is_ipv4(address):
                 ipv4_addresses += 1
             else:
                 ipv6_addresses += 1
         if ipv4_addresses > 1:
             raise ConfigError(f'NTP Only admits one ipv4 value for listen-address parameter ')
         if ipv6_addresses > 1:
             raise ConfigError(f'NTP Only admits one ipv6 value for listen-address parameter ')
 
     return None
 
 def generate(ntp):
     # bail out early - looks like removal from running config
     if not ntp:
         return None
 
     render(config_file, 'chrony/chrony.conf.j2', ntp, user=user_group, group=user_group)
     render(systemd_override, 'chrony/override.conf.j2', ntp, user=user_group, group=user_group)
 
     # Ensure proper permission for chrony command socket
     config_dir = os.path.dirname(config_file)
     chmod_750(config_dir)
 
     return None
 
 def apply(ntp):
     systemd_service = 'chrony.service'
     # Reload systemd manager configuration
     call('systemctl daemon-reload')
 
     if not ntp:
         # NTP support is removed in the commit
         call(f'systemctl stop {systemd_service}')
         if os.path.exists(config_file):
             os.unlink(config_file)
         if os.path.isfile(systemd_override):
             os.unlink(systemd_override)
         return
 
     # we need to restart the service if e.g. the VRF name changed
     systemd_action = 'reload-or-restart'
     if 'restart_required' in ntp:
         systemd_action = 'restart'
 
     call(f'systemctl {systemd_action} {systemd_service}')
     return None
 
 if __name__ == '__main__':
     try:
         c = get_config()
         verify(c)
         generate(c)
         apply(c)
     except ConfigError as e:
         print(e)
         exit(1)
diff --git a/src/conf_mode/service_pppoe-server.py b/src/conf_mode/service_pppoe-server.py
index c95f976d3..566a7b149 100755
--- a/src/conf_mode/service_pppoe-server.py
+++ b/src/conf_mode/service_pppoe-server.py
@@ -1,164 +1,164 @@
 #!/usr/bin/env python3
 #
 # Copyright (C) 2018-2024 VyOS maintainers and contributors
 #
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License version 2 or later as
 # published by the Free Software Foundation.
 #
 # This program is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 # GNU General Public License for more details.
 #
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 import os
 
 from sys import exit
 
 from vyos.config import Config
 from vyos.configdict import get_accel_dict
 from vyos.configdict import is_node_changed
 from vyos.configverify import verify_interface_exists
 from vyos.template import render
 from vyos.utils.process import call
 from vyos.utils.dict import dict_search
 from vyos.accel_ppp_util import verify_accel_ppp_name_servers
 from vyos.accel_ppp_util import verify_accel_ppp_wins_servers
 from vyos.accel_ppp_util import verify_accel_ppp_authentication
 from vyos.accel_ppp_util import verify_accel_ppp_ip_pool
 from vyos.accel_ppp_util import get_pools_in_order
 from vyos import ConfigError
 from vyos import airbag
 
 airbag.enable()
 
 pppoe_conf = r'/run/accel-pppd/pppoe.conf'
 pppoe_chap_secrets = r'/run/accel-pppd/pppoe.chap-secrets'
 
 def convert_pado_delay(pado_delay):
     new_pado_delay = {'delays_without_sessions': [],
                       'delays_with_sessions': []}
     for delay, sessions in pado_delay.items():
         if not sessions:
             new_pado_delay['delays_without_sessions'].append(delay)
         else:
             new_pado_delay['delays_with_sessions'].append((delay, int(sessions['sessions'])))
     return new_pado_delay
 
 def get_config(config=None):
     if config:
         conf = config
     else:
         conf = Config()
     base = ['service', 'pppoe-server']
     if not conf.exists(base):
         return None
 
     # retrieve common dictionary keys
     pppoe = get_accel_dict(conf, base, pppoe_chap_secrets)
 
     if dict_search('client_ip_pool', pppoe):
         # Multiple named pools require ordered values T5099
         pppoe['ordered_named_pools'] = get_pools_in_order(dict_search('client_ip_pool', pppoe))
 
     if dict_search('pado_delay', pppoe):
         pado_delay = dict_search('pado_delay', pppoe)
         pppoe['pado_delay'] = convert_pado_delay(pado_delay)
 
     # reload-or-restart does not implemented in accel-ppp
     # use this workaround until it will be implemented
     # https://phabricator.accel-ppp.org/T3
     conditions = [is_node_changed(conf, base + ['client-ip-pool']),
                   is_node_changed(conf, base + ['client-ipv6-pool']),
                   is_node_changed(conf, base + ['interface'])]
     if any(conditions):
         pppoe.update({'restart_required': {}})
     pppoe['server_type'] = 'pppoe'
     return pppoe
 
 def verify_pado_delay(pppoe):
     if 'pado_delay' in pppoe:
         pado_delay = pppoe['pado_delay']
 
         delays_without_sessions = pado_delay['delays_without_sessions']
         if 'disable' in delays_without_sessions:
             raise ConfigError(
                 'Number of sessions must be specified for "pado-delay disable"'
             )
 
         if len(delays_without_sessions) > 1:
             raise ConfigError(
                 f'Cannot add more then ONE pado-delay without sessions, '
                 f'but {len(delays_without_sessions)} were set'
             )
 
         if 'disable' in [delay[0] for delay in pado_delay['delays_with_sessions']]:
             # need to sort delays by sessions to verify if there is no delay
             # for sessions after disabling
             sorted_pado_delay = sorted(pado_delay['delays_with_sessions'], key=lambda k_v: k_v[1])
             last_delay = sorted_pado_delay[-1]
 
             if last_delay[0] != 'disable':
                 raise ConfigError(
                     f'Cannot add pado-delay after disabled sessions, but '
                     f'"pado-delay {last_delay[0]} sessions {last_delay[1]}" was set'
                 )
 
 def verify(pppoe):
     if not pppoe:
         return None
 
     verify_accel_ppp_authentication(pppoe)
     verify_accel_ppp_ip_pool(pppoe)
     verify_accel_ppp_name_servers(pppoe)
     verify_accel_ppp_wins_servers(pppoe)
     verify_pado_delay(pppoe)
 
     if 'interface' not in pppoe:
         raise ConfigError('At least one listen interface must be defined!')
 
     # Check is interface exists in the system
     for interface in pppoe['interface']:
-        verify_interface_exists(interface, warning_only=True)
+        verify_interface_exists(pppoe, interface, warning_only=True)
 
     return None
 
 
 def generate(pppoe):
     if not pppoe:
         return None
 
     render(pppoe_conf, 'accel-ppp/pppoe.config.j2', pppoe)
 
     if dict_search('authentication.mode', pppoe) == 'local':
         render(pppoe_chap_secrets, 'accel-ppp/chap-secrets.config_dict.j2',
                pppoe, permission=0o640)
     return None
 
 
 def apply(pppoe):
     systemd_service = 'accel-ppp@pppoe.service'
     if not pppoe:
         call(f'systemctl stop {systemd_service}')
         for file in [pppoe_conf, pppoe_chap_secrets]:
             if os.path.exists(file):
                 os.unlink(file)
         return None
 
     if 'restart_required' in pppoe:
         call(f'systemctl restart {systemd_service}')
     else:
         call(f'systemctl reload-or-restart {systemd_service}')
 
 if __name__ == '__main__':
     try:
         c = get_config()
         verify(c)
         generate(c)
         apply(c)
     except ConfigError as e:
         print(e)
         exit(1)
diff --git a/src/conf_mode/service_salt-minion.py b/src/conf_mode/service_salt-minion.py
index a8fce8e01..edf74b0c0 100755
--- a/src/conf_mode/service_salt-minion.py
+++ b/src/conf_mode/service_salt-minion.py
@@ -1,118 +1,118 @@
 #!/usr/bin/env python3
 #
 # Copyright (C) 2018-2022 VyOS maintainers and contributors
 #
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License version 2 or later as
 # published by the Free Software Foundation.
 #
 # This program is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 # GNU General Public License for more details.
 #
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 import os
 
 from socket import gethostname
 from sys import exit
 from urllib3 import PoolManager
 
 from vyos.base import Warning
 from vyos.config import Config
 from vyos.configverify import verify_interface_exists
 from vyos.template import render
 from vyos.utils.process import call
 from vyos.utils.permission import chown
 from vyos import ConfigError
 
 from vyos import airbag
 airbag.enable()
 
 config_file = r'/etc/salt/minion'
 master_keyfile = r'/opt/vyatta/etc/config/salt/pki/minion/master_sign.pub'
 
 user='minion'
 group='vyattacfg'
 
 def get_config(config=None):
     if config:
         conf = config
     else:
         conf = Config()
     base = ['service', 'salt-minion']
 
     if not conf.exists(base):
         return None
 
     salt = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True)
     # ID default is dynamic thus we can not use defaults()
     if 'id' not in salt:
         salt['id'] = gethostname()
     # We have gathered the dict representation of the CLI, but there are default
     # options which we need to update into the dictionary retrived.
     salt = conf.merge_defaults(salt, recursive=True)
 
     if not conf.exists(base):
         return None
     else:
         conf.set_level(base)
 
     return salt
 
 def verify(salt):
     if not salt:
         return None
 
     if 'hash' in salt and salt['hash'] == 'sha1':
         Warning('Do not use sha1 hashing algorithm, upgrade to sha256 or later!')
 
     if 'source_interface' in salt:
-        verify_interface_exists(salt['source_interface'])
+        verify_interface_exists(salt, salt['source_interface'])
 
     return None
 
 def generate(salt):
     if not salt:
         return None
 
     render(config_file, 'salt-minion/minion.j2', salt, user=user, group=group)
 
     if not os.path.exists(master_keyfile):
         if 'master_key' in salt:
             req = PoolManager().request('GET', salt['master_key'], preload_content=False)
             with open(master_keyfile, 'wb') as f:
                 while True:
                     data = req.read(1024)
                     if not data:
                         break
                     f.write(data)
 
             req.release_conn()
             chown(master_keyfile, user, group)
 
     return None
 
 def apply(salt):
     service_name = 'salt-minion.service'
     if not salt:
         # Salt removed from running config
         call(f'systemctl stop {service_name}')
         if os.path.exists(config_file):
             os.unlink(config_file)
     else:
         call(f'systemctl restart {service_name}')
 
     return None
 
 if __name__ == '__main__':
     try:
         c = get_config()
         verify(c)
         generate(c)
         apply(c)
     except ConfigError as e:
         print(e)
         exit(1)
diff --git a/src/conf_mode/system_flow-accounting.py b/src/conf_mode/system_flow-accounting.py
index 2dacd92da..a12ee363d 100755
--- a/src/conf_mode/system_flow-accounting.py
+++ b/src/conf_mode/system_flow-accounting.py
@@ -1,316 +1,316 @@
 #!/usr/bin/env python3
 #
 # Copyright (C) 2018-2024 VyOS maintainers and contributors
 #
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License version 2 or later as
 # published by the Free Software Foundation.
 #
 # This program is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 # GNU General Public License for more details.
 #
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 import os
 import re
 
 from sys import exit
 from ipaddress import ip_address
 
 from vyos.config import Config
 from vyos.config import config_dict_merge
 from vyos.configverify import verify_vrf
 from vyos.configverify import verify_interface_exists
 from vyos.template import render
 from vyos.utils.process import call
 from vyos.utils.process import cmd
 from vyos.utils.process import run
 from vyos.utils.network import is_addr_assigned
 from vyos import ConfigError
 from vyos import airbag
 airbag.enable()
 
 uacctd_conf_path = '/run/pmacct/uacctd.conf'
 systemd_service = 'uacctd.service'
 systemd_override = f'/run/systemd/system/{systemd_service}.d/override.conf'
 nftables_nflog_table = 'raw'
 nftables_nflog_chain = 'VYOS_PREROUTING_HOOK'
 egress_nftables_nflog_table = 'inet mangle'
 egress_nftables_nflog_chain = 'FORWARD'
 
 # get nftables rule dict for chain in table
 def _nftables_get_nflog(chain, table):
     # define list with rules
     rules = []
 
     # prepare regex for parsing rules
     rule_pattern = '[io]ifname "(?P<interface>[\w\.\*\-]+)".*handle (?P<handle>[\d]+)'
     rule_re = re.compile(rule_pattern)
 
     # run nftables, save output and split it by lines
     nftables_command = f'nft -a list chain {table} {chain}'
     tmp = cmd(nftables_command, message='Failed to get flows list')
     # parse each line and add information to list
     for current_rule in tmp.splitlines():
         if 'FLOW_ACCOUNTING_RULE' not in current_rule:
             continue
         current_rule_parsed = rule_re.search(current_rule)
         if current_rule_parsed:
             groups = current_rule_parsed.groupdict()
             rules.append({ 'interface': groups["interface"], 'table': table, 'handle': groups["handle"] })
 
     # return list with rules
     return rules
 
 def _nftables_config(configured_ifaces, direction, length=None):
     # define list of nftables commands to modify settings
     nftable_commands = []
     nftables_chain = nftables_nflog_chain
     nftables_table = nftables_nflog_table
 
     if direction == "egress":
         nftables_chain = egress_nftables_nflog_chain
         nftables_table = egress_nftables_nflog_table
 
     # prepare extended list with configured interfaces
     configured_ifaces_extended = []
     for iface in configured_ifaces:
         configured_ifaces_extended.append({ 'iface': iface })
 
     # get currently configured interfaces with nftables rules
     active_nflog_rules = _nftables_get_nflog(nftables_chain, nftables_table)
 
     # compare current active list with configured one and delete excessive interfaces, add missed
     active_nflog_ifaces = []
     for rule in active_nflog_rules:
         interface = rule['interface']
         if interface not in configured_ifaces:
             table = rule['table']
             handle = rule['handle']
             nftable_commands.append(f'nft delete rule {table} {nftables_chain} handle {handle}')
         else:
             active_nflog_ifaces.append({
                 'iface': interface,
             })
 
     # do not create new rules for already configured interfaces
     for iface in active_nflog_ifaces:
         if iface in active_nflog_ifaces and iface in configured_ifaces_extended:
             configured_ifaces_extended.remove(iface)
 
     # create missed rules
     for iface_extended in configured_ifaces_extended:
         iface = iface_extended['iface']
         iface_prefix = "o" if direction == "egress" else "i"
         rule_definition = f'{iface_prefix}ifname "{iface}" counter log group 2 snaplen {length} queue-threshold 100 comment "FLOW_ACCOUNTING_RULE"'
         nftable_commands.append(f'nft insert rule {nftables_table} {nftables_chain} {rule_definition}')
         # Also add IPv6 ingres logging
         if nftables_table == nftables_nflog_table:
             nftable_commands.append(f'nft insert rule ip6 {nftables_table} {nftables_chain} {rule_definition}')
 
     # change nftables
     for command in nftable_commands:
         cmd(command, raising=ConfigError)
 
 
 def _nftables_trigger_setup(operation: str) -> None:
     """Add a dummy rule to unlock the main pmacct loop with a packet-trigger
 
     Args:
         operation (str): 'add' or 'delete' a trigger
     """
     # check if a chain exists
     table_exists = False
     if run('nft -snj list table ip pmacct') == 0:
         table_exists = True
 
     if operation == 'delete' and table_exists:
         nft_cmd: str = 'nft delete table ip pmacct'
         cmd(nft_cmd, raising=ConfigError)
     if operation == 'add' and not table_exists:
         nft_cmds: list[str] = [
             'nft add table ip pmacct',
             'nft add chain ip pmacct pmacct_out { type filter hook output priority raw - 50 \\; policy accept \\; }',
             'nft add rule ip pmacct pmacct_out oif lo ip daddr 127.0.254.0 counter log group 2 snaplen 1 queue-threshold 0 comment NFLOG_TRIGGER'
         ]
         for nft_cmd in nft_cmds:
             cmd(nft_cmd, raising=ConfigError)
 
 
 def get_config(config=None):
     if config:
         conf = config
     else:
         conf = Config()
     base = ['system', 'flow-accounting']
     if not conf.exists(base):
         return None
 
     flow_accounting = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True)
 
     # We have gathered the dict representation of the CLI, but there are
     # default values which we need to conditionally update into the
     # dictionary retrieved.
     default_values = conf.get_config_defaults(**flow_accounting.kwargs,
                                               recursive=True)
 
     # delete individual flow type defaults - should only be added if user
     # sets this feature
     for flow_type in ['sflow', 'netflow']:
         if flow_type not in flow_accounting and flow_type in default_values:
             del default_values[flow_type]
 
     flow_accounting = config_dict_merge(default_values, flow_accounting)
 
     return flow_accounting
 
 def verify(flow_config):
     if not flow_config:
         return None
 
     # check if at least one collector is enabled
     if 'sflow' not in flow_config and 'netflow' not in flow_config and 'disable_imt' in flow_config:
         raise ConfigError('You need to configure at least sFlow or NetFlow, ' \
                           'or not set "disable-imt" for flow-accounting!')
 
     # Check if at least one interface is configured
     if 'interface' not in flow_config:
         raise ConfigError('Flow accounting requires at least one interface to ' \
                           'be configured!')
 
     # check that all configured interfaces exists in the system
     for interface in flow_config['interface']:
-        verify_interface_exists(interface, warning_only=True)
+        verify_interface_exists(flow_config, interface, warning_only=True)
 
     # check sFlow configuration
     if 'sflow' in flow_config:
         # check if at least one sFlow collector is configured
         if 'server' not in flow_config['sflow']:
             raise ConfigError('You need to configure at least one sFlow server!')
 
         # check that all sFlow collectors use the same IP protocol version
         sflow_collector_ipver = None
         for server in flow_config['sflow']['server']:
             if sflow_collector_ipver:
                 if sflow_collector_ipver != ip_address(server).version:
                     raise ConfigError("All sFlow servers must use the same IP protocol")
             else:
                 sflow_collector_ipver = ip_address(server).version
 
         # check if vrf is defined for Sflow
         verify_vrf(flow_config)
         sflow_vrf = None
         if 'vrf' in flow_config:
             sflow_vrf = flow_config['vrf']
 
         # check agent-id for sFlow: we should avoid mixing IPv4 agent-id with IPv6 collectors and vice-versa
         for server in flow_config['sflow']['server']:
             if 'agent_address' in flow_config['sflow']:
                 if ip_address(server).version != ip_address(flow_config['sflow']['agent_address']).version:
                     raise ConfigError('IPv4 and IPv6 addresses can not be mixed in "sflow agent-address" and "sflow '\
                                       'server". You need to set the same IP version for both "agent-address" and '\
                                       'all sFlow servers')
 
         if 'agent_address' in flow_config['sflow']:
             tmp = flow_config['sflow']['agent_address']
             if not is_addr_assigned(tmp, sflow_vrf):
                 raise ConfigError(f'Configured "sflow agent-address {tmp}" does not exist in the system!')
 
         # Check if configured sflow source-address exist in the system
         if 'source_address' in flow_config['sflow']:
             if not is_addr_assigned(flow_config['sflow']['source_address'], sflow_vrf):
                 tmp = flow_config['sflow']['source_address']
                 raise ConfigError(f'Configured "sflow source-address {tmp}" does not exist on the system!')
 
     # check NetFlow configuration
     if 'netflow' in flow_config:
         # check if vrf is defined for netflow
         netflow_vrf = None
         if 'vrf' in flow_config:
             netflow_vrf = flow_config['vrf']
 
         # check if at least one NetFlow collector is configured if NetFlow configuration is presented
         if 'server' not in flow_config['netflow']:
             raise ConfigError('You need to configure at least one NetFlow server!')
 
         # Check if configured netflow source-address exist in the system
         if 'source_address' in flow_config['netflow']:
             if not is_addr_assigned(flow_config['netflow']['source_address'], netflow_vrf):
                 tmp = flow_config['netflow']['source_address']
                 raise ConfigError(f'Configured "netflow source-address {tmp}" does not exist on the system!')
 
         # Check if engine-id compatible with selected protocol version
         if 'engine_id' in flow_config['netflow']:
             v5_filter = '^(\d|[1-9]\d|1\d{2}|2[0-4]\d|25[0-5]):(\d|[1-9]\d|1\d{2}|2[0-4]\d|25[0-5])$'
             v9v10_filter = '^(\d|[1-9]\d{1,8}|[1-3]\d{9}|4[01]\d{8}|42[0-8]\d{7}|429[0-3]\d{6}|4294[0-8]\d{5}|42949[0-5]\d{4}|429496[0-6]\d{3}|4294967[01]\d{2}|42949672[0-8]\d|429496729[0-5])$'
             engine_id = flow_config['netflow']['engine_id']
             version = flow_config['netflow']['version']
 
             if flow_config['netflow']['version'] == '5':
                 regex_filter = re.compile(v5_filter)
                 if not regex_filter.search(engine_id):
                     raise ConfigError(f'You cannot use NetFlow engine-id "{engine_id}" '\
                                       f'together with NetFlow protocol version "{version}"!')
             else:
                 regex_filter = re.compile(v9v10_filter)
                 if not regex_filter.search(flow_config['netflow']['engine_id']):
                     raise ConfigError(f'Can not use NetFlow engine-id "{engine_id}" together '\
                                       f'with NetFlow protocol version "{version}"!')
 
     # return True if all checks were passed
     return True
 
 def generate(flow_config):
     if not flow_config:
         return None
 
     render(uacctd_conf_path, 'pmacct/uacctd.conf.j2', flow_config)
     render(systemd_override, 'pmacct/override.conf.j2', flow_config)
     # Reload systemd manager configuration
     call('systemctl daemon-reload')
 
 def apply(flow_config):
     # Check if flow-accounting was removed and define command
     if not flow_config:
         _nftables_config([], 'ingress')
         _nftables_config([], 'egress')
 
         # Stop flow-accounting daemon and remove configuration file
         call(f'systemctl stop {systemd_service}')
         if os.path.exists(uacctd_conf_path):
             os.unlink(uacctd_conf_path)
 
         # must be done after systemctl
         _nftables_trigger_setup('delete')
 
         return
 
     # Start/reload flow-accounting daemon
     call(f'systemctl restart {systemd_service}')
 
     # configure nftables rules for defined interfaces
     if 'interface' in flow_config:
         _nftables_config(flow_config['interface'], 'ingress', flow_config['packet_length'])
 
         # configure egress the same way if configured otherwise remove it
         if 'enable_egress' in flow_config:
             _nftables_config(flow_config['interface'], 'egress', flow_config['packet_length'])
         else:
             _nftables_config([], 'egress')
 
     # add a trigger for signal processing
     _nftables_trigger_setup('add')
 
 
 if __name__ == '__main__':
     try:
         config = get_config()
         verify(config)
         generate(config)
         apply(config)
     except ConfigError as e:
         print(e)
         exit(1)
diff --git a/src/conf_mode/system_option.py b/src/conf_mode/system_option.py
index 180686924..9fd7a3195 100755
--- a/src/conf_mode/system_option.py
+++ b/src/conf_mode/system_option.py
@@ -1,194 +1,194 @@
 #!/usr/bin/env python3
 #
 # Copyright (C) 2019-2024 VyOS maintainers and contributors
 #
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License version 2 or later as
 # published by the Free Software Foundation.
 #
 # This program is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 # GNU General Public License for more details.
 #
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 import os
 
 from sys import exit
 from time import sleep
 
 from vyos.config import Config
 from vyos.configverify import verify_source_interface
 from vyos.configverify import verify_interface_exists
 from vyos.system import grub_util
 from vyos.template import render
 from vyos.utils.dict import dict_search
 from vyos.utils.file import write_file
 from vyos.utils.kernel import check_kmod
 from vyos.utils.process import cmd
 from vyos.utils.process import is_systemd_service_running
 from vyos.utils.network import is_addr_assigned
 from vyos.utils.network import is_intf_addr_assigned
 from vyos.configdep import set_dependents, call_dependents
 from vyos import ConfigError
 from vyos import airbag
 airbag.enable()
 
 curlrc_config = r'/etc/curlrc'
 ssh_config = r'/etc/ssh/ssh_config.d/91-vyos-ssh-client-options.conf'
 systemd_action_file = '/lib/systemd/system/ctrl-alt-del.target'
 usb_autosuspend = r'/etc/udev/rules.d/40-usb-autosuspend.rules'
 kernel_dynamic_debug = r'/sys/kernel/debug/dynamic_debug/control'
 time_format_to_locale = {
     '12-hour': 'en_US.UTF-8',
     '24-hour': 'en_GB.UTF-8'
 }
 
 def get_config(config=None):
     if config:
         conf = config
     else:
         conf = Config()
     base = ['system', 'option']
     options = conf.get_config_dict(base, key_mangling=('-', '_'),
                                    get_first_key=True,
                                    with_recursive_defaults=True)
 
     if 'performance' in options:
         # Update IPv4 and IPv6 options after TuneD reapplies
         # sysctl from config files
         for protocol in ['ip', 'ipv6']:
             set_dependents(protocol, conf)
 
     return options
 
 def verify(options):
     if 'http_client' in options:
         config = options['http_client']
         if 'source_interface' in config:
-            verify_interface_exists(config['source_interface'])
+            verify_interface_exists(options, config['source_interface'])
 
         if {'source_address', 'source_interface'} <= set(config):
             raise ConfigError('Can not define both HTTP source-interface and source-address')
 
         if 'source_address' in config:
             if not is_addr_assigned(config['source_address']):
                 raise ConfigError('No interface with give address specified!')
 
     if 'ssh_client' in options:
         config = options['ssh_client']
         if 'source_address' in config:
             address = config['source_address']
             if not is_addr_assigned(config['source_address']):
                 raise ConfigError('No interface with address "{address}" configured!')
 
         if 'source_interface' in config:
             verify_source_interface(config)
             if 'source_address' in config:
                 address = config['source_address']
                 interface = config['source_interface']
                 if not is_intf_addr_assigned(interface, address):
                     raise ConfigError(f'Address "{address}" not assigned on interface "{interface}"!')
 
     return None
 
 def generate(options):
     render(curlrc_config, 'system/curlrc.j2', options)
     render(ssh_config, 'system/ssh_config.j2', options)
     render(usb_autosuspend, 'system/40_usb_autosuspend.j2', options)
 
     cmdline_options = []
     if 'kernel' in options:
         if 'disable_mitigations' in options['kernel']:
             cmdline_options.append('mitigations=off')
         if 'disable_power_saving' in options['kernel']:
             cmdline_options.append('intel_idle.max_cstate=0 processor.max_cstate=1')
     grub_util.update_kernel_cmdline_options(' '.join(cmdline_options))
 
     return None
 
 def apply(options):
     # System bootup beep
     if 'startup_beep' in options:
         cmd('systemctl enable vyos-beep.service')
     else:
         cmd('systemctl disable vyos-beep.service')
 
     # Ctrl-Alt-Delete action
     if os.path.exists(systemd_action_file):
         os.unlink(systemd_action_file)
     if 'ctrl_alt_delete' in options:
         if options['ctrl_alt_delete'] == 'reboot':
             os.symlink('/lib/systemd/system/reboot.target', systemd_action_file)
         elif options['ctrl_alt_delete'] == 'poweroff':
             os.symlink('/lib/systemd/system/poweroff.target', systemd_action_file)
 
     # Configure HTTP client
     if 'http_client' not in options:
         if os.path.exists(curlrc_config):
             os.unlink(curlrc_config)
 
     # Configure SSH client
     if 'ssh_client' not in options:
         if os.path.exists(ssh_config):
             os.unlink(ssh_config)
 
     # Reboot system on kernel panic
     timeout = '0'
     if 'reboot_on_panic' in options:
         timeout = '60'
     with open('/proc/sys/kernel/panic', 'w') as f:
         f.write(timeout)
 
     # tuned - performance tuning
     if 'performance' in options:
         cmd('systemctl restart tuned.service')
         # wait until daemon has started before sending configuration
         while (not is_systemd_service_running('tuned.service')):
             sleep(0.250)
         cmd('tuned-adm profile network-{performance}'.format(**options))
     else:
         cmd('systemctl stop tuned.service')
 
     call_dependents()
 
     # Keyboard layout - there will be always the default key inside the dict
     # but we check for key existence anyway
     if 'keyboard_layout' in options:
         cmd('loadkeys {keyboard_layout}'.format(**options))
 
     # Enable/diable root-partition-auto-resize SystemD service
     if 'root_partition_auto_resize' in options:
       cmd('systemctl enable root-partition-auto-resize.service')
     else:
       cmd('systemctl disable root-partition-auto-resize.service')
 
     # Time format 12|24-hour
     if 'time_format' in options:
         time_format = time_format_to_locale.get(options['time_format'])
         cmd(f'localectl set-locale LC_TIME={time_format}')
 
     # Reload UDEV, required for USB auto suspend
     cmd('udevadm control --reload-rules')
 
     # Enable/disable dynamic debugging for kernel modules
     modules = ['wireguard']
     modules_enabled = dict_search('kernel.debug', options) or []
     for module in modules:
         if module in modules_enabled:
             check_kmod(module)
             write_file(kernel_dynamic_debug, f'module {module} +p')
         else:
             write_file(kernel_dynamic_debug, f'module {module} -p')
 
 if __name__ == '__main__':
     try:
         c = get_config()
         verify(c)
         generate(c)
         apply(c)
     except ConfigError as e:
         print(e)
         exit(1)
diff --git a/src/conf_mode/vpn_ipsec.py b/src/conf_mode/vpn_ipsec.py
index cf82b767f..65dd458ec 100755
--- a/src/conf_mode/vpn_ipsec.py
+++ b/src/conf_mode/vpn_ipsec.py
@@ -1,646 +1,646 @@
 #!/usr/bin/env python3
 #
 # Copyright (C) 2021-2024 VyOS maintainers and contributors
 #
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License version 2 or later as
 # published by the Free Software Foundation.
 #
 # This program is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 # GNU General Public License for more details.
 #
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 import ipaddress
 import os
 import re
 import jmespath
 
 from sys import exit
 from time import sleep
 
 from vyos.base import Warning
 from vyos.config import Config
 from vyos.config import config_dict_merge
 from vyos.configdep import set_dependents
 from vyos.configdep import call_dependents
 from vyos.configdict import leaf_node_changed
 from vyos.configverify import verify_interface_exists
 from vyos.configverify import dynamic_interface_pattern
 from vyos.defaults import directories
 from vyos.ifconfig import Interface
 from vyos.pki import encode_public_key
 from vyos.pki import load_private_key
 from vyos.pki import wrap_certificate
 from vyos.pki import wrap_crl
 from vyos.pki import wrap_public_key
 from vyos.pki import wrap_private_key
 from vyos.template import ip_from_cidr
 from vyos.template import is_ipv4
 from vyos.template import is_ipv6
 from vyos.template import render
 from vyos.utils.network import is_ipv6_link_local
 from vyos.utils.network import interface_exists
 from vyos.utils.dict import dict_search
 from vyos.utils.dict import dict_search_args
 from vyos.utils.process import call
 from vyos import ConfigError
 from vyos import airbag
 airbag.enable()
 
 dhcp_wait_attempts = 2
 dhcp_wait_sleep = 1
 
 swanctl_dir        = '/etc/swanctl'
 charon_conf        = '/etc/strongswan.d/charon.conf'
 charon_dhcp_conf   = '/etc/strongswan.d/charon/dhcp.conf'
 charon_radius_conf = '/etc/strongswan.d/charon/eap-radius.conf'
 interface_conf     = '/etc/strongswan.d/interfaces_use.conf'
 swanctl_conf       = f'{swanctl_dir}/swanctl.conf'
 
 default_install_routes = 'yes'
 
 vici_socket = '/var/run/charon.vici'
 
 CERT_PATH   = f'{swanctl_dir}/x509/'
 PUBKEY_PATH = f'{swanctl_dir}/pubkey/'
 KEY_PATH    = f'{swanctl_dir}/private/'
 CA_PATH     = f'{swanctl_dir}/x509ca/'
 CRL_PATH    = f'{swanctl_dir}/x509crl/'
 
 DHCP_HOOK_IFLIST = '/tmp/ipsec_dhcp_interfaces'
 
 def get_config(config=None):
     if config:
         conf = config
     else:
         conf = Config()
     base = ['vpn', 'ipsec']
     l2tp_base = ['vpn', 'l2tp', 'remote-access', 'ipsec-settings']
     if not conf.exists(base):
         return None
 
     # retrieve common dictionary keys
     ipsec = conf.get_config_dict(base, key_mangling=('-', '_'),
                                  no_tag_node_value_mangle=True,
                                  get_first_key=True,
                                  with_pki=True)
 
     # We have to cleanup the default dict, as default values could
     # enable features which are not explicitly enabled on the
     # CLI. E.g. dead-peer-detection defaults should not be injected
     # unless the feature is explicitly opted in to by setting the
     # top-level node
     default_values = conf.get_config_defaults(**ipsec.kwargs, recursive=True)
 
     if 'ike_group' in ipsec:
         for name, ike in ipsec['ike_group'].items():
             if 'dead_peer_detection' not in ike:
                 del default_values['ike_group'][name]['dead_peer_detection']
 
     ipsec = config_dict_merge(default_values, ipsec)
 
     ipsec['dhcp_interfaces'] = set()
     ipsec['dhcp_no_address'] = {}
     ipsec['install_routes'] = 'no' if conf.exists(base + ["options", "disable-route-autoinstall"]) else default_install_routes
     ipsec['interface_change'] = leaf_node_changed(conf, base + ['interface'])
     ipsec['nhrp_exists'] = conf.exists(['protocols', 'nhrp', 'tunnel'])
 
     if ipsec['nhrp_exists']:
         set_dependents('nhrp', conf)
 
     tmp = conf.get_config_dict(l2tp_base, key_mangling=('-', '_'),
                                no_tag_node_value_mangle=True,
                                get_first_key=True)
     if tmp:
         ipsec['l2tp'] = conf.merge_defaults(tmp, recursive=True)
         ipsec['l2tp_outside_address'] = conf.return_value(['vpn', 'l2tp', 'remote-access', 'outside-address'])
         ipsec['l2tp_ike_default'] = 'aes256-sha1-modp1024,3des-sha1-modp1024'
         ipsec['l2tp_esp_default'] = 'aes256-sha1,3des-sha1'
 
     return ipsec
 
 def get_dhcp_address(iface):
     addresses = Interface(iface).get_addr()
     if not addresses:
         return None
     for address in addresses:
         if not is_ipv6_link_local(address):
             return ip_from_cidr(address)
     return None
 
 def verify_pki_x509(pki, x509_conf):
     if not pki or 'ca' not in pki or 'certificate' not in pki:
         raise ConfigError(f'PKI is not configured')
 
     cert_name = x509_conf['certificate']
 
     for ca_cert_name in x509_conf['ca_certificate']:
         if not dict_search_args(pki, 'ca', ca_cert_name, 'certificate'):
             raise ConfigError(f'Missing CA certificate on specified PKI CA certificate "{ca_cert_name}"')
 
     if not dict_search_args(pki, 'certificate', cert_name, 'certificate'):
         raise ConfigError(f'Missing certificate on specified PKI certificate "{cert_name}"')
 
     if not dict_search_args(pki, 'certificate', cert_name, 'private', 'key'):
         raise ConfigError(f'Missing private key on specified PKI certificate "{cert_name}"')
 
     return True
 
 def verify_pki_rsa(pki, rsa_conf):
     if not pki or 'key_pair' not in pki:
         raise ConfigError(f'PKI is not configured')
 
     local_key = rsa_conf['local_key']
     remote_key = rsa_conf['remote_key']
 
     if not dict_search_args(pki, 'key_pair', local_key, 'private', 'key'):
         raise ConfigError(f'Missing private key on specified local-key "{local_key}"')
 
     if not dict_search_args(pki, 'key_pair', remote_key, 'public', 'key'):
         raise ConfigError(f'Missing public key on specified remote-key "{remote_key}"')
 
     return True
 
 def verify(ipsec):
     if not ipsec:
         return None
 
     if 'authentication' in ipsec:
         if 'psk' in ipsec['authentication']:
             for psk, psk_config in ipsec['authentication']['psk'].items():
                 if 'id' not in psk_config or 'secret' not in psk_config:
                     raise ConfigError(f'Authentication psk "{psk}" missing "id" or "secret"')
 
     if 'interface' in ipsec:
         tmp = re.compile(dynamic_interface_pattern)
         for interface in ipsec['interface']:
             # exclude check interface for dynamic interfaces
             if tmp.match(interface):
-                verify_interface_exists(interface, warning_only=True)
+                verify_interface_exists(ipsec, interface, warning_only=True)
             else:
-                verify_interface_exists(interface)
+                verify_interface_exists(ipsec, interface)
 
     if 'l2tp' in ipsec:
         if 'esp_group' in ipsec['l2tp']:
             if 'esp_group' not in ipsec or ipsec['l2tp']['esp_group'] not in ipsec['esp_group']:
                 raise ConfigError(f"Invalid esp-group on L2TP remote-access config")
 
         if 'ike_group' in ipsec['l2tp']:
             if 'ike_group' not in ipsec or ipsec['l2tp']['ike_group'] not in ipsec['ike_group']:
                 raise ConfigError(f"Invalid ike-group on L2TP remote-access config")
 
         if 'authentication' not in ipsec['l2tp']:
             raise ConfigError(f'Missing authentication settings on L2TP remote-access config')
 
         if 'mode' not in ipsec['l2tp']['authentication']:
             raise ConfigError(f'Missing authentication mode on L2TP remote-access config')
 
         if not ipsec['l2tp_outside_address']:
             raise ConfigError(f'Missing outside-address on L2TP remote-access config')
 
         if ipsec['l2tp']['authentication']['mode'] == 'pre-shared-secret':
             if 'pre_shared_secret' not in ipsec['l2tp']['authentication']:
                 raise ConfigError(f'Missing pre shared secret on L2TP remote-access config')
 
         if ipsec['l2tp']['authentication']['mode'] == 'x509':
             if 'x509' not in ipsec['l2tp']['authentication']:
                 raise ConfigError(f'Missing x509 settings on L2TP remote-access config')
 
             x509 = ipsec['l2tp']['authentication']['x509']
 
             if 'ca_certificate' not in x509 or 'certificate' not in x509:
                 raise ConfigError(f'Missing x509 certificates on L2TP remote-access config')
 
             verify_pki_x509(ipsec['pki'], x509)
 
     if 'profile' in ipsec:
         for profile, profile_conf in ipsec['profile'].items():
             if 'esp_group' in profile_conf:
                 if 'esp_group' not in ipsec or profile_conf['esp_group'] not in ipsec['esp_group']:
                     raise ConfigError(f"Invalid esp-group on {profile} profile")
             else:
                 raise ConfigError(f"Missing esp-group on {profile} profile")
 
             if 'ike_group' in profile_conf:
                 if 'ike_group' not in ipsec or profile_conf['ike_group'] not in ipsec['ike_group']:
                     raise ConfigError(f"Invalid ike-group on {profile} profile")
             else:
                 raise ConfigError(f"Missing ike-group on {profile} profile")
 
             if 'authentication' not in profile_conf:
                 raise ConfigError(f"Missing authentication on {profile} profile")
 
     if 'remote_access' in ipsec:
         if 'connection' in ipsec['remote_access']:
             for name, ra_conf in ipsec['remote_access']['connection'].items():
                 if 'local_address' not in ra_conf and 'dhcp_interface' not in ra_conf:
                     raise ConfigError(f"Missing local-address or dhcp-interface on remote-access connection {name}")
 
                 if 'dhcp_interface' in ra_conf:
                     dhcp_interface = ra_conf['dhcp_interface']
 
-                    verify_interface_exists(dhcp_interface)
+                    verify_interface_exists(ipsec, dhcp_interface)
                     dhcp_base = directories['isc_dhclient_dir']
 
                     if not os.path.exists(f'{dhcp_base}/dhclient_{dhcp_interface}.conf'):
                         raise ConfigError(f"Invalid dhcp-interface on remote-access connection {name}")
 
                     ipsec['dhcp_interfaces'].add(dhcp_interface)
 
                     address = get_dhcp_address(dhcp_interface)
                     count = 0
                     while not address and count < dhcp_wait_attempts:
                         address = get_dhcp_address(dhcp_interface)
                         count += 1
                         sleep(dhcp_wait_sleep)
 
                     if not address:
                         ipsec['dhcp_no_address'][f'ra_{name}'] = dhcp_interface
                         print(f"Failed to get address from dhcp-interface on remote-access connection {name} -- skipped")
                         continue
 
                 if 'esp_group' in ra_conf:
                     if 'esp_group' not in ipsec or ra_conf['esp_group'] not in ipsec['esp_group']:
                         raise ConfigError(f"Invalid esp-group on {name} remote-access config")
                 else:
                     raise ConfigError(f"Missing esp-group on {name} remote-access config")
 
                 if 'ike_group' in ra_conf:
                     if 'ike_group' not in ipsec or ra_conf['ike_group'] not in ipsec['ike_group']:
                         raise ConfigError(f"Invalid ike-group on {name} remote-access config")
 
                     ike = ra_conf['ike_group']
                     if dict_search(f'ike_group.{ike}.key_exchange', ipsec) != 'ikev2':
                         raise ConfigError('IPsec remote-access connections requires IKEv2!')
 
                 else:
                     raise ConfigError(f"Missing ike-group on {name} remote-access config")
 
                 if 'authentication' not in ra_conf:
                     raise ConfigError(f"Missing authentication on {name} remote-access config")
 
                 if ra_conf['authentication']['server_mode'] == 'x509':
                     if 'x509' not in ra_conf['authentication']:
                         raise ConfigError(f"Missing x509 settings on {name} remote-access config")
 
                     x509 = ra_conf['authentication']['x509']
 
                     if 'ca_certificate' not in x509 or 'certificate' not in x509:
                         raise ConfigError(f"Missing x509 certificates on {name} remote-access config")
 
                     verify_pki_x509(ipsec['pki'], x509)
                 elif ra_conf['authentication']['server_mode'] == 'pre-shared-secret':
                     if 'pre_shared_secret' not in ra_conf['authentication']:
                         raise ConfigError(f"Missing pre-shared-key on {name} remote-access config")
 
                 if 'client_mode' not in ra_conf['authentication']:
                     raise ConfigError('Client authentication method is required!')
 
                 if dict_search('authentication.client_mode', ra_conf) == 'eap-radius':
                     if dict_search('remote_access.radius.server', ipsec) == None:
                         raise ConfigError('RADIUS authentication requires at least one server')
 
                 if 'pool' in ra_conf:
                     if {'dhcp', 'radius'} <= set(ra_conf['pool']):
                         raise ConfigError(f'Can not use both DHCP and RADIUS for address allocation '\
                                           f'at the same time for "{name}"!')
 
                     if 'dhcp' in ra_conf['pool'] and len(ra_conf['pool']) > 1:
                         raise ConfigError(f'Can not use DHCP and a predefined address pool for "{name}"!')
 
                     if 'radius' in ra_conf['pool'] and len(ra_conf['pool']) > 1:
                         raise ConfigError(f'Can not use RADIUS and a predefined address pool for "{name}"!')
 
                     for pool in ra_conf['pool']:
                         if pool == 'dhcp':
                             if dict_search('remote_access.dhcp.server', ipsec) == None:
                                 raise ConfigError('IPsec DHCP server is not configured!')
                         elif pool == 'radius':
                             if dict_search('remote_access.radius.server', ipsec) == None:
                                 raise ConfigError('IPsec RADIUS server is not configured!')
 
                             if dict_search('authentication.client_mode', ra_conf) != 'eap-radius':
                                 raise ConfigError('RADIUS IP pool requires eap-radius client authentication!')
 
                         elif 'pool' not in ipsec['remote_access'] or pool not in ipsec['remote_access']['pool']:
                             raise ConfigError(f'Requested pool "{pool}" does not exist!')
 
         if 'pool' in ipsec['remote_access']:
             for pool, pool_config in ipsec['remote_access']['pool'].items():
                 if 'prefix' not in pool_config:
                     raise ConfigError(f'Missing madatory prefix option for pool "{pool}"!')
 
                 if 'name_server' in pool_config:
                     if len(pool_config['name_server']) > 2:
                         raise ConfigError(f'Only two name-servers are supported for remote-access pool "{pool}"!')
 
                     for ns in pool_config['name_server']:
                         v4_addr_and_ns = is_ipv4(ns) and not is_ipv4(pool_config['prefix'])
                         v6_addr_and_ns = is_ipv6(ns) and not is_ipv6(pool_config['prefix'])
                         if v4_addr_and_ns or v6_addr_and_ns:
                            raise ConfigError('Must use both IPv4 or IPv6 addresses for pool prefix and name-server adresses!')
 
                 if 'exclude' in pool_config:
                     for exclude in pool_config['exclude']:
                         v4_addr_and_exclude = is_ipv4(exclude) and not is_ipv4(pool_config['prefix'])
                         v6_addr_and_exclude = is_ipv6(exclude) and not is_ipv6(pool_config['prefix'])
                         if v4_addr_and_exclude or v6_addr_and_exclude:
                            raise ConfigError('Must use both IPv4 or IPv6 addresses for pool prefix and exclude prefixes!')
 
         if 'radius' in ipsec['remote_access'] and 'server' in ipsec['remote_access']['radius']:
             for server, server_config in ipsec['remote_access']['radius']['server'].items():
                 if 'key' not in server_config:
                     raise ConfigError(f'Missing RADIUS secret key for server "{server}"')
 
     if 'site_to_site' in ipsec and 'peer' in ipsec['site_to_site']:
         for peer, peer_conf in ipsec['site_to_site']['peer'].items():
             has_default_esp = False
             # Peer name it is swanctl connection name and shouldn't contain dots or colons, T4118
             if bool(re.search(':|\.', peer)):
                 raise ConfigError(f'Incorrect peer name "{peer}" '
                                   f'Peer name can contain alpha-numeric letters, hyphen and underscore')
 
             if 'remote_address' not in peer_conf:
                 print(f'You should set correct remote-address "peer {peer} remote-address x.x.x.x"\n')
 
             if 'default_esp_group' in peer_conf:
                 has_default_esp = True
                 if 'esp_group' not in ipsec or peer_conf['default_esp_group'] not in ipsec['esp_group']:
                     raise ConfigError(f"Invalid esp-group on site-to-site peer {peer}")
 
             if 'ike_group' in peer_conf:
                 if 'ike_group' not in ipsec or peer_conf['ike_group'] not in ipsec['ike_group']:
                     raise ConfigError(f"Invalid ike-group on site-to-site peer {peer}")
             else:
                 raise ConfigError(f"Missing ike-group on site-to-site peer {peer}")
 
             if 'authentication' not in peer_conf or 'mode' not in peer_conf['authentication']:
                 raise ConfigError(f"Missing authentication on site-to-site peer {peer}")
 
             if {'id', 'use_x509_id'} <= set(peer_conf['authentication']):
                 raise ConfigError(f"Manually set peer id and use-x509-id are mutually exclusive!")
 
             if peer_conf['authentication']['mode'] == 'x509':
                 if 'x509' not in peer_conf['authentication']:
                     raise ConfigError(f"Missing x509 settings on site-to-site peer {peer}")
 
                 x509 = peer_conf['authentication']['x509']
 
                 if 'ca_certificate' not in x509 or 'certificate' not in x509:
                     raise ConfigError(f"Missing x509 certificates on site-to-site peer {peer}")
 
                 verify_pki_x509(ipsec['pki'], x509)
             elif peer_conf['authentication']['mode'] == 'rsa':
                 if 'rsa' not in peer_conf['authentication']:
                     raise ConfigError(f"Missing RSA settings on site-to-site peer {peer}")
 
                 rsa = peer_conf['authentication']['rsa']
 
                 if 'local_key' not in rsa:
                     raise ConfigError(f"Missing RSA local-key on site-to-site peer {peer}")
 
                 if 'remote_key' not in rsa:
                     raise ConfigError(f"Missing RSA remote-key on site-to-site peer {peer}")
 
                 verify_pki_rsa(ipsec['pki'], rsa)
 
             if 'local_address' not in peer_conf and 'dhcp_interface' not in peer_conf:
                 raise ConfigError(f"Missing local-address or dhcp-interface on site-to-site peer {peer}")
 
             if 'dhcp_interface' in peer_conf:
                 dhcp_interface = peer_conf['dhcp_interface']
 
-                verify_interface_exists(dhcp_interface)
+                verify_interface_exists(ipsec, dhcp_interface)
                 dhcp_base = directories['isc_dhclient_dir']
 
                 if not os.path.exists(f'{dhcp_base}/dhclient_{dhcp_interface}.conf'):
                     raise ConfigError(f"Invalid dhcp-interface on site-to-site peer {peer}")
 
                 ipsec['dhcp_interfaces'].add(dhcp_interface)
 
                 address = get_dhcp_address(dhcp_interface)
                 count = 0
                 while not address and count < dhcp_wait_attempts:
                     address = get_dhcp_address(dhcp_interface)
                     count += 1
                     sleep(dhcp_wait_sleep)
 
                 if not address:
                     ipsec['dhcp_no_address'][f'peer_{peer}'] = dhcp_interface
                     print(f"Failed to get address from dhcp-interface on site-to-site peer {peer} -- skipped")
                     continue
 
             if 'vti' in peer_conf:
                 if 'local_address' in peer_conf and 'dhcp_interface' in peer_conf:
                     raise ConfigError(f"A single local-address or dhcp-interface is required when using VTI on site-to-site peer {peer}")
 
                 if dict_search('options.disable_route_autoinstall',
                                ipsec) == None:
                     Warning('It\'s recommended to use ipsec vti with the next command\n[set vpn ipsec option disable-route-autoinstall]')
 
                 if 'bind' in peer_conf['vti']:
                     vti_interface = peer_conf['vti']['bind']
                     if not interface_exists(vti_interface):
                         raise ConfigError(f'VTI interface {vti_interface} for site-to-site peer {peer} does not exist!')
 
             if 'vti' not in peer_conf and 'tunnel' not in peer_conf:
                 raise ConfigError(f"No VTI or tunnel specified on site-to-site peer {peer}")
 
             if 'tunnel' in peer_conf:
                 for tunnel, tunnel_conf in peer_conf['tunnel'].items():
                     if 'esp_group' not in tunnel_conf and not has_default_esp:
                         raise ConfigError(f"Missing esp-group on tunnel {tunnel} for site-to-site peer {peer}")
 
                     esp_group_name = tunnel_conf['esp_group'] if 'esp_group' in tunnel_conf else peer_conf['default_esp_group']
 
                     if esp_group_name not in ipsec['esp_group']:
                         raise ConfigError(f"Invalid esp-group on tunnel {tunnel} for site-to-site peer {peer}")
 
                     esp_group = ipsec['esp_group'][esp_group_name]
 
                     if 'mode' in esp_group and esp_group['mode'] == 'transport':
                         if 'protocol' in tunnel_conf and ((peer in ['any', '0.0.0.0']) or ('local_address' not in peer_conf or peer_conf['local_address'] in ['any', '0.0.0.0'])):
                             raise ConfigError(f"Fixed local-address or peer required when a protocol is defined with ESP transport mode on tunnel {tunnel} for site-to-site peer {peer}")
 
                         if ('local' in tunnel_conf and 'prefix' in tunnel_conf['local']) or ('remote' in tunnel_conf and 'prefix' in tunnel_conf['remote']):
                             raise ConfigError(f"Local/remote prefix cannot be used with ESP transport mode on tunnel {tunnel} for site-to-site peer {peer}")
 
 def cleanup_pki_files():
     for path in [CERT_PATH, CA_PATH, CRL_PATH, KEY_PATH, PUBKEY_PATH]:
         if not os.path.exists(path):
             continue
         for file in os.listdir(path):
             file_path = os.path.join(path, file)
             if os.path.isfile(file_path):
                 os.unlink(file_path)
 
 def generate_pki_files_x509(pki, x509_conf):
     for ca_cert_name in x509_conf['ca_certificate']:
         ca_cert_data = dict_search_args(pki, 'ca', ca_cert_name, 'certificate')
         ca_cert_crls = dict_search_args(pki, 'ca', ca_cert_name, 'crl') or []
         crl_index = 1
 
         with open(os.path.join(CA_PATH, f'{ca_cert_name}.pem'), 'w') as f:
             f.write(wrap_certificate(ca_cert_data))
 
         for crl in ca_cert_crls:
             with open(os.path.join(CRL_PATH, f'{ca_cert_name}_{crl_index}.pem'), 'w') as f:
                 f.write(wrap_crl(crl))
             crl_index += 1
 
     cert_name = x509_conf['certificate']
     cert_data = dict_search_args(pki, 'certificate', cert_name, 'certificate')
     key_data = dict_search_args(pki, 'certificate', cert_name, 'private', 'key')
     protected = 'passphrase' in x509_conf
 
     with open(os.path.join(CERT_PATH, f'{cert_name}.pem'), 'w') as f:
         f.write(wrap_certificate(cert_data))
 
     with open(os.path.join(KEY_PATH, f'x509_{cert_name}.pem'), 'w') as f:
         f.write(wrap_private_key(key_data, protected))
 
 def generate_pki_files_rsa(pki, rsa_conf):
     local_key_name = rsa_conf['local_key']
     local_key_data = dict_search_args(pki, 'key_pair', local_key_name, 'private', 'key')
     protected = 'passphrase' in rsa_conf
     remote_key_name = rsa_conf['remote_key']
     remote_key_data = dict_search_args(pki, 'key_pair', remote_key_name, 'public', 'key')
 
     local_key = load_private_key(local_key_data, rsa_conf['passphrase'] if protected else None)
 
     with open(os.path.join(KEY_PATH, f'rsa_{local_key_name}.pem'), 'w') as f:
         f.write(wrap_private_key(local_key_data, protected))
 
     with open(os.path.join(PUBKEY_PATH, f'{local_key_name}.pem'), 'w') as f:
         f.write(encode_public_key(local_key.public_key()))
 
     with open(os.path.join(PUBKEY_PATH, f'{remote_key_name}.pem'), 'w') as f:
         f.write(wrap_public_key(remote_key_data))
 
 def generate(ipsec):
     cleanup_pki_files()
 
     if not ipsec:
         for config_file in [charon_dhcp_conf, charon_radius_conf, interface_conf, swanctl_conf]:
             if os.path.isfile(config_file):
                 os.unlink(config_file)
         render(charon_conf, 'ipsec/charon.j2', {'install_routes': default_install_routes})
         return
 
     if ipsec['dhcp_interfaces']:
         with open(DHCP_HOOK_IFLIST, 'w') as f:
             f.write(" ".join(ipsec['dhcp_interfaces']))
     elif os.path.exists(DHCP_HOOK_IFLIST):
         os.unlink(DHCP_HOOK_IFLIST)
 
     for path in [swanctl_dir, CERT_PATH, CA_PATH, CRL_PATH, PUBKEY_PATH]:
         if not os.path.exists(path):
             os.mkdir(path, mode=0o755)
 
     if not os.path.exists(KEY_PATH):
         os.mkdir(KEY_PATH, mode=0o700)
 
     if 'l2tp' in ipsec:
         if 'authentication' in ipsec['l2tp'] and 'x509' in ipsec['l2tp']['authentication']:
             generate_pki_files_x509(ipsec['pki'], ipsec['l2tp']['authentication']['x509'])
 
     if 'remote_access' in ipsec and 'connection' in ipsec['remote_access']:
         for rw, rw_conf in ipsec['remote_access']['connection'].items():
             if f'ra_{rw}' in ipsec['dhcp_no_address']:
                 continue
 
             local_ip = ''
             if 'local_address' in rw_conf:
                 local_ip = rw_conf['local_address']
             elif 'dhcp_interface' in rw_conf:
                 local_ip = get_dhcp_address(rw_conf['dhcp_interface'])
 
             ipsec['remote_access']['connection'][rw]['local_address'] = local_ip
 
             if 'authentication' in rw_conf and 'x509' in rw_conf['authentication']:
                 generate_pki_files_x509(ipsec['pki'], rw_conf['authentication']['x509'])
 
     if 'site_to_site' in ipsec and 'peer' in ipsec['site_to_site']:
         for peer, peer_conf in ipsec['site_to_site']['peer'].items():
             if f'peer_{peer}' in ipsec['dhcp_no_address']:
                 continue
 
             if peer_conf['authentication']['mode'] == 'x509':
                 generate_pki_files_x509(ipsec['pki'], peer_conf['authentication']['x509'])
             elif peer_conf['authentication']['mode'] == 'rsa':
                 generate_pki_files_rsa(ipsec['pki'], peer_conf['authentication']['rsa'])
 
             local_ip = ''
             if 'local_address' in peer_conf:
                 local_ip = peer_conf['local_address']
             elif 'dhcp_interface' in peer_conf:
                 local_ip = get_dhcp_address(peer_conf['dhcp_interface'])
 
             ipsec['site_to_site']['peer'][peer]['local_address'] = local_ip
 
             if 'tunnel' in peer_conf:
                 for tunnel, tunnel_conf in peer_conf['tunnel'].items():
                     local_prefixes = dict_search_args(tunnel_conf, 'local', 'prefix')
                     remote_prefixes = dict_search_args(tunnel_conf, 'remote', 'prefix')
 
                     if not local_prefixes or not remote_prefixes:
                         continue
 
                     passthrough = None
 
                     for local_prefix in local_prefixes:
                         for remote_prefix in remote_prefixes:
                             local_net = ipaddress.ip_network(local_prefix)
                             remote_net = ipaddress.ip_network(remote_prefix)
                             if local_net.overlaps(remote_net):
                                 if passthrough is None:
                                     passthrough = []
                                 passthrough.append(local_prefix)
 
                     ipsec['site_to_site']['peer'][peer]['tunnel'][tunnel]['passthrough'] = passthrough
 
         # auth psk <tag> dhcp-interface <xxx>
         if jmespath.search('authentication.psk.*.dhcp_interface', ipsec):
             for psk, psk_config in ipsec['authentication']['psk'].items():
                 if 'dhcp_interface' in psk_config:
                     for iface in psk_config['dhcp_interface']:
                         id = get_dhcp_address(iface)
                         if id:
                             ipsec['authentication']['psk'][psk]['id'].append(id)
 
     render(charon_conf, 'ipsec/charon.j2', ipsec)
     render(charon_dhcp_conf, 'ipsec/charon/dhcp.conf.j2', ipsec)
     render(charon_radius_conf, 'ipsec/charon/eap-radius.conf.j2', ipsec)
     render(interface_conf, 'ipsec/interfaces_use.conf.j2', ipsec)
     render(swanctl_conf, 'ipsec/swanctl.conf.j2', ipsec)
 
 
 def apply(ipsec):
     systemd_service = 'strongswan.service'
     if not ipsec:
         call(f'systemctl stop {systemd_service}')
     else:
         call(f'systemctl reload-or-restart {systemd_service}')
 
         if ipsec.get('nhrp_exists', False):
             try:
                 call_dependents()
             except ConfigError:
                 # Ignore config errors on dependent due to being called too early. Example:
                 # ConfigError("ConfigError('Interface ethN requires an IP address!')")
                 pass
 
 
 if __name__ == '__main__':
     try:
         ipsec = get_config()
         verify(ipsec)
         generate(ipsec)
         apply(ipsec)
     except ConfigError as e:
         print(e)
         exit(1)