diff --git a/python/vyos/frr.py b/python/vyos/frr.py index 6fb81803f..183805e13 100644 --- a/python/vyos/frr.py +++ b/python/vyos/frr.py @@ -1,551 +1,568 @@ # Copyright 2020-2024 VyOS maintainers and contributors <maintainers@vyos.io> # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this library. If not, see <http://www.gnu.org/licenses/>. r""" A Library for interracting with the FRR daemon suite. It supports simple configuration manipulation and loading using the official tools supplied with FRR (vtysh and frr-reload) All configuration management and manipulation is done using strings and regex. Example Usage ##### # Reading configuration from frr: ``` >>> original_config = get_configuration() >>> repr(original_config) '!\nfrr version 7.3.1\nfrr defaults traditional\nhostname debian\n...... ``` # Modify a configuration section: ``` >>> new_bgp_section = 'router bgp 65000\n neighbor 192.0.2.1 remote-as 65000\n' >>> modified_config = replace_section(original_config, new_bgp_section, replace_re=r'router bgp \d+') >>> repr(modified_config) '............router bgp 65000\n neighbor 192.0.2.1 remote-as 65000\n...........' ``` Remove a configuration section: ``` >>> modified_config = remove_section(original_config, r'router ospf') ``` Test the new configuration: ``` >>> try: >>> mark_configuration(modified configuration) >>> except ConfigurationNotValid as e: >>> print('resulting configuration is not valid') >>> sys.exit(1) ``` Apply the new configuration: ``` >>> try: >>> replace_configuration(modified_config) >>> except CommitError as e: >>> print('Exception while commiting the supplied configuration') >>> print(e) >>> exit(1) ``` """ import tempfile import re from vyos import ConfigError from vyos.utils.process import cmd from vyos.utils.process import popen from vyos.utils.process import STDOUT import logging from logging.handlers import SysLogHandler import os import sys LOG = logging.getLogger(__name__) DEBUG = False ch = SysLogHandler(address='/dev/log') ch2 = logging.StreamHandler(stream=sys.stdout) LOG.addHandler(ch) LOG.addHandler(ch2) -_frr_daemons = ['zebra', 'staticd', 'bgpd', 'ospfd', 'ospf6d', 'ripd', 'ripngd', - 'isisd', 'pimd', 'pim6d', 'ldpd', 'eigrpd', 'babeld', 'bfdd', 'fabricd'] +babel_daemon = 'babeld' +bfd_daemon = 'bfdd' +bgp_daemon = 'bgpd' +eigrp_daemon = 'eigrpd' +isis_daemon = 'isisd' +ldpd_daemon = 'ldpd' +mgmt_daemon = 'mgmtd' +openfabric_daemon = 'fabricd' +ospf_daemon = 'ospfd' +ospf6_daemon = 'ospf6d' +pim_daemon = 'pimd' +pim6_daemon = 'pim6d' +rip_daemon = 'ripd' +ripng_daemon = 'ripngd' +static_daemon = 'staticd' +zebra_daemon = 'zebra' + +_frr_daemons = [zebra_daemon, static_daemon, bgp_daemon, ospf_daemon, ospf6_daemon, rip_daemon, ripng_daemon, mgmt_daemon, + isis_daemon, pim_daemon, pim6_daemon, ldpd_daemon, eigrp_daemon, babel_daemon, bfd_daemon, openfabric_daemon] path_vtysh = '/usr/bin/vtysh' path_frr_reload = '/usr/lib/frr/frr-reload.py' path_config = '/run/frr' default_add_before = r'(ip prefix-list .*|route-map .*|line vty|end)' class FrrError(Exception): pass class ConfigurationNotValid(FrrError): """ The configuratioin supplied to vtysh is not valid """ pass class CommitError(FrrError): """ Commiting the supplied configuration failed to commit by a unknown reason see commit error and/or run mark_configuration on the specified configuration to se error generated used by: reload_configuration() """ pass class ConfigSectionNotFound(FrrError): """ Removal of configuration failed because it is not existing in the supplied configuration """ pass def init_debugging(): global DEBUG DEBUG = os.path.exists('/tmp/vyos.frr.debug') if DEBUG: LOG.setLevel(logging.DEBUG) def get_configuration(daemon=None, marked=False): """ Get current running FRR configuration daemon: Collect only configuration for the specified FRR daemon, supplying daemon=None retrieves the complete configuration marked: Mark the configuration with "end" tags return: string containing the running configuration from frr """ if daemon and daemon not in _frr_daemons: raise ValueError(f'The specified daemon type is not supported {repr(daemon)}') cmd = f"{path_vtysh} -c 'show run'" if daemon: cmd += f' -d {daemon}' output, code = popen(cmd, stderr=STDOUT) if code: raise OSError(code, output) config = output.replace('\r', '') # Remove first header lines from FRR config config = config.split("\n", 3)[-1] # Mark the configuration with end tags if marked: config = mark_configuration(config) return config def mark_configuration(config): """ Add end marks and Test the configuration for syntax faults If the configuration is valid a marked version of the configuration is returned, or else it failes with a ConfigurationNotValid Exception config: The configuration string to mark/test return: The marked configuration from FRR """ output, code = popen(f"{path_vtysh} -m -f -", stderr=STDOUT, input=config) if code == 2: raise ConfigurationNotValid(str(output)) elif code: raise OSError(code, output) config = output.replace('\r', '') return config def reload_configuration(config, daemon=None): """ Execute frr-reload with the new configuration This will try to reapply the supplied configuration inside FRR. The configuration needs to be a complete configuration from the integrated config or from a daemon. config: The configuration to apply daemon: Apply the conigutaion to the specified FRR daemon, supplying daemon=None applies to the integrated configuration return: None """ if daemon and daemon not in _frr_daemons: raise ValueError(f'The specified daemon type is not supported {repr(daemon)}') f = tempfile.NamedTemporaryFile('w') f.write(config) f.flush() LOG.debug(f'reload_configuration: Reloading config using temporary file: {f.name}') cmd = f'{path_frr_reload} --reload' if daemon: cmd += f' --daemon {daemon}' if DEBUG: cmd += f' --debug --stdout' cmd += f' {f.name}' LOG.debug(f'reload_configuration: Executing command against frr-reload: "{cmd}"') output, code = popen(cmd, stderr=STDOUT) f.close() for i, e in enumerate(output.split('\n')): LOG.debug(f'frr-reload output: {i:3} {e}') if code == 1: raise ConfigError(output) elif code: raise OSError(code, output) return output def save_configuration(): """ T3217: Save FRR configuration to /run/frr/config/frr.conf """ return cmd(f'{path_vtysh} -n -w') def execute(command): """ Run commands inside vtysh command: str containing commands to execute inside a vtysh session """ if not isinstance(command, str): raise ValueError(f'command needs to be a string: {repr(command)}') cmd = f"{path_vtysh} -c '{command}'" output, code = popen(cmd, stderr=STDOUT) if code: raise OSError(code, output) config = output.replace('\r', '') return config def configure(lines, daemon=False): """ run commands inside config mode vtysh lines: list or str conaining commands to execute inside a configure session only one command executed on each configure() Executing commands inside a subcontext uses the list to describe the context ex: ['router bgp 6500', 'neighbor 192.0.2.1 remote-as 65000'] return: None """ if isinstance(lines, str): lines = [lines] elif not isinstance(lines, list): raise ValueError('lines needs to be string or list of commands') if daemon and daemon not in _frr_daemons: raise ValueError(f'The specified daemon type is not supported {repr(daemon)}') cmd = f'{path_vtysh}' if daemon: cmd += f' -d {daemon}' cmd += " -c 'configure terminal'" for x in lines: cmd += f" -c '{x}'" output, code = popen(cmd, stderr=STDOUT) if code == 1: raise ConfigurationNotValid(f'Configuration FRR failed: {repr(output)}') elif code: raise OSError(code, output) config = output.replace('\r', '') return config def _replace_section(config, replacement, replace_re, before_re): r"""Replace a section of FRR config config: full original configuration replacement: replacement configuration section replace_re: The regex to replace example: ^router bgp \d+$.?*^!$ this will replace everything between ^router bgp X$ and ^!$ before_re: When replace_re is not existant, the config will be added before this tag example: ^line vty$ return: modified configuration as a text file """ # DEPRECATED, this is replaced by a new implementation # Check if block is configured, remove the existing instance else add a new one if re.findall(replace_re, config, flags=re.MULTILINE | re.DOTALL): # Section is in the configration, replace it return re.sub(replace_re, replacement, config, count=1, flags=re.MULTILINE | re.DOTALL) if before_re: if not re.findall(before_re, config, flags=re.MULTILINE | re.DOTALL): raise ConfigSectionNotFound(f"Config section {before_re} not found in config") # If no section is in the configuration, add it before the line vty line return re.sub(before_re, rf'{replacement}\n\g<1>', config, count=1, flags=re.MULTILINE | re.DOTALL) raise ConfigSectionNotFound(f"Config section {replacement} not found in config") def replace_section(config, replacement, from_re, to_re=r'!', before_re=r'line vty'): r"""Replace a section of FRR config config: full original configuration replacement: replacement configuration section from_re: Regex for the start of section matching example: 'router bgp \d+' to_re: Regex for stop of section matching default: '!' example: '!' or 'end' before_re: When from_re/to_re does not return a match, the config will be added before this tag default: ^line vty$ startline and endline tags will be automatically added to the resulting from_re/to_re and before_re regex'es """ # DEPRECATED, this is replaced by a new implementation return _replace_section(config, replacement, replace_re=rf'^{from_re}$.*?^{to_re}$', before_re=rf'^({before_re})$') def remove_section(config, from_re, to_re='!'): # DEPRECATED, this is replaced by a new implementation return _replace_section(config, '', replace_re=rf'^{from_re}$.*?^{to_re}$', before_re=None) def _find_first_block(config, start_pattern, stop_pattern, start_at=0): '''Find start and stop line numbers for a config block config: (list) A list conaining the configuration that is searched start_pattern: (raw-str) The pattern searched for a a start of block tag stop_pattern: (raw-str) The pattern searched for to signify the end of the block start_at: (int) The index to start searching at in the <config> Returns: None: No complete block could be found set(int, int): A complete block found between the line numbers returned in the set The object <config> is searched from the start for the regex <start_pattern> until the first match is found. On a successful match it continues the search for the regex <stop_pattern> until it is found. After a successful run a set is returned containing the start and stop line numbers. ''' LOG.debug(f'_find_first_block: find start={repr(start_pattern)} stop={repr(stop_pattern)} start_at={start_at}') _start = None for i, element in enumerate(config[start_at:], start=start_at): # LOG.debug(f'_find_first_block: running line {i:3} "{element}"') if not _start: if not re.match(start_pattern, element): LOG.debug(f'_find_first_block: no match {i:3} "{element}"') continue _start = i LOG.debug(f'_find_first_block: Found start {i:3} "{element}"') continue if not re.match(stop_pattern, element): LOG.debug(f'_find_first_block: no match {i:3} "{element}"') continue LOG.debug(f'_find_first_block: Found stop {i:3} "{element}"') return (_start, i) LOG.debug('_find_first_block: exit start={repr(start_pattern)} stop={repr(stop_pattern)} start_at={start_at}') return None def _find_first_element(config, pattern, start_at=0): '''Find the first element that matches the current pattern in config config: (list) A list containing the configuration that is searched start_pattern: (raw-str) The pattern searched for start_at: (int) The index to start searching at in the <config> return: Line index of the line containing the searched pattern TODO: for now it returns -1 on a no-match because 0 also returns as False TODO: that means that we can not use False matching to tell if its ''' LOG.debug(f'_find_first_element: find start="{pattern}" start_at={start_at}') for i, element in enumerate(config[start_at:], start=0): if re.match(pattern + '$', element): LOG.debug(f'_find_first_element: Found stop {i:3} "{element}"') return i LOG.debug(f'_find_first_element: no match {i:3} "{element}"') LOG.debug(f'_find_first_element: Did not find any match, exiting') return -1 def _find_elements(config, pattern, start_at=0): '''Find all instances of pattern and return a list containing all element indexes config: (list) A list containing the configuration that is searched start_pattern: (raw-str) The pattern searched for start_at: (int) The index to start searching at in the <config> return: A list of line indexes containing the searched pattern TODO: refactor this to return a generator instead ''' return [i for i, element in enumerate(config[start_at:], start=0) if re.match(pattern + '$', element)] class FRRConfig: '''Main FRR Configuration manipulation object Using this object the user could load, manipulate and commit the configuration to FRR ''' def __init__(self, config=[]): self.imported_config = '' if isinstance(config, list): self.config = config.copy() self.original_config = config.copy() elif isinstance(config, str): self.config = config.split('\n') self.original_config = self.config.copy() else: raise ValueError( 'The config element needs to be a string or list type object') if config: LOG.debug(f'__init__: frr library initiated with initial config') for i, e in enumerate(self.config): LOG.debug(f'__init__: initial {i:3} {e}') def load_configuration(self, daemon=None): '''Load the running configuration from FRR into the config object daemon: str with name of the FRR Daemon to load configuration from or None to load the consolidated config Using this overwrites the current loaded config objects and replaces the original loaded config ''' init_debugging() self.imported_config = get_configuration(daemon=daemon) if daemon: LOG.debug(f'load_configuration: Configuration loaded from FRR daemon {daemon}') else: LOG.debug(f'load_configuration: Configuration loaded from FRR integrated config') self.original_config = self.imported_config.split('\n') self.config = self.original_config.copy() for i, e in enumerate(self.imported_config.split('\n')): LOG.debug(f'load_configuration: loaded {i:3} {e}') return def test_configuration(self): '''Test the current configuration against FRR This will exception if FRR failes to load the current configuration object ''' LOG.debug('test_configation: Testing configuration') mark_configuration('\n'.join(self.config)) def commit_configuration(self, daemon=None): ''' Commit the current configuration to FRR daemon: str with name of the FRR daemon to commit to or None to use the consolidated config. Configuration is automatically saved after apply ''' LOG.debug('commit_configuration: Commiting configuration') for i, e in enumerate(self.config): LOG.debug(f'commit_configuration: new_config {i:3} {e}') # https://github.com/FRRouting/frr/issues/10132 # https://github.com/FRRouting/frr/issues/10133 count = 0 count_max = 5 emsg = '' while count < count_max: count += 1 try: reload_configuration('\n'.join(self.config), daemon=daemon) break except ConfigError as e: emsg = str(e) except: # we just need to re-try the commit of the configuration # for the listed FRR issues above pass if count >= count_max: if emsg: raise ConfigError(emsg) raise ConfigurationNotValid(f'Config commit retry counter ({count_max}) exceeded for {daemon} daemon!') # Save configuration to /run/frr/config/frr.conf save_configuration() def modify_section(self, start_pattern, replacement='!', stop_pattern=r'\S+', remove_stop_mark=False, count=0): if isinstance(replacement, str): replacement = replacement.split('\n') elif not isinstance(replacement, list): return ValueError("The replacement element needs to be a string or list type object") LOG.debug(f'modify_section: starting search for {repr(start_pattern)} until {repr(stop_pattern)}') _count = 0 _next_start = 0 while True: if count and count <= _count: # Break out of the loop after specified amount of matches LOG.debug(f'modify_section: reached limit ({_count}), exiting loop at line {_next_start}') break # While searching, always assume that the user wants to search for the exact pattern he entered # To be more specific the user needs a override, eg. a "pattern.*" _w = _find_first_block( self.config, start_pattern+'$', stop_pattern, start_at=_next_start) if not _w: # Reached the end, no more elements to remove LOG.debug(f'modify_section: No more config sections found, exiting') break start_element, end_element = _w LOG.debug(f'modify_section: found match between {start_element} and {end_element}') for i, e in enumerate(self.config[start_element:end_element+1 if remove_stop_mark else end_element], start=start_element): LOG.debug(f'modify_section: remove {i:3} {e}') del self.config[start_element:end_element + 1 if remove_stop_mark else end_element] if replacement: # Append the replacement config at the current position for i, e in enumerate(replacement, start=start_element): LOG.debug(f'modify_section: add {i:3} {e}') self.config[start_element:start_element] = replacement _count += 1 _next_start = start_element + len(replacement) return _count def add_before(self, before_pattern, addition): '''Add config block before this element in the configuration''' if isinstance(addition, str): addition = addition.split('\n') elif not isinstance(addition, list): return ValueError("The replacement element needs to be a string or list type object") start = _find_first_element(self.config, before_pattern) if start < 0: return False for i, e in enumerate(addition, start=start): LOG.debug(f'add_before: add {i:3} {e}') self.config[start:start] = addition return True def __str__(self): return '\n'.join(self.config) def __repr__(self): return f'frr({repr(str(self))})' diff --git a/smoketest/scripts/cli/test_interfaces_bonding.py b/smoketest/scripts/cli/test_interfaces_bonding.py index f436424b8..418744712 100755 --- a/smoketest/scripts/cli/test_interfaces_bonding.py +++ b/smoketest/scripts/cli/test_interfaces_bonding.py @@ -1,313 +1,314 @@ #!/usr/bin/env python3 # # Copyright (C) 2020-2023 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import os import unittest from base_interfaces_test import BasicInterfaceTest from vyos.ifconfig import Section from vyos.ifconfig.interface import Interface from vyos.configsession import ConfigSessionError from vyos.utils.network import get_interface_config from vyos.utils.file import read_file +from vyos.frr import mgmt_daemon class BondingInterfaceTest(BasicInterfaceTest.TestCase): @classmethod def setUpClass(cls): cls._base_path = ['interfaces', 'bonding'] cls._mirror_interfaces = ['dum21354'] cls._members = [] # we need to filter out VLAN interfaces identified by a dot (.) # in their name - just in case! if 'TEST_ETH' in os.environ: cls._members = os.environ['TEST_ETH'].split() else: for tmp in Section.interfaces('ethernet', vlan=False): cls._members.append(tmp) cls._options = {'bond0' : []} for member in cls._members: cls._options['bond0'].append(f'member interface {member}') cls._interfaces = list(cls._options) # call base-classes classmethod super(BondingInterfaceTest, cls).setUpClass() def test_add_single_ip_address(self): super().test_add_single_ip_address() for interface in self._interfaces: slaves = read_file(f'/sys/class/net/{interface}/bonding/slaves').split() self.assertListEqual(slaves, self._members) def test_vif_8021q_interfaces(self): super().test_vif_8021q_interfaces() for interface in self._interfaces: slaves = read_file(f'/sys/class/net/{interface}/bonding/slaves').split() self.assertListEqual(slaves, self._members) def test_bonding_remove_member(self): # T2515: when removing a bond member the previously enslaved/member # interface must be in its former admin-up/down state. Here we ensure # that it is admin-up as it was admin-up before. # configure member interfaces for interface in self._interfaces: for option in self._options.get(interface, []): self.cli_set(self._base_path + [interface] + option.split()) self.cli_commit() # remove single bond member port for interface in self._interfaces: remove_member = self._members[0] self.cli_delete(self._base_path + [interface, 'member', 'interface', remove_member]) self.cli_commit() # removed member port must be admin-up for interface in self._interfaces: remove_member = self._members[0] state = Interface(remove_member).get_admin_state() self.assertEqual('up', state) def test_bonding_min_links(self): # configure member interfaces min_links = len(self._interfaces) for interface in self._interfaces: for option in self._options.get(interface, []): self.cli_set(self._base_path + [interface] + option.split()) self.cli_set(self._base_path + [interface, 'min-links', str(min_links)]) self.cli_commit() # verify config for interface in self._interfaces: tmp = get_interface_config(interface) self.assertEqual(min_links, tmp['linkinfo']['info_data']['min_links']) # check LACP default rate self.assertEqual('slow', tmp['linkinfo']['info_data']['ad_lacp_rate']) def test_bonding_lacp_rate(self): # configure member interfaces lacp_rate = 'fast' for interface in self._interfaces: for option in self._options.get(interface, []): self.cli_set(self._base_path + [interface] + option.split()) self.cli_set(self._base_path + [interface, 'lacp-rate', lacp_rate]) self.cli_commit() # verify config for interface in self._interfaces: tmp = get_interface_config(interface) # check LACP minimum links (default value) self.assertEqual(0, tmp['linkinfo']['info_data']['min_links']) self.assertEqual(lacp_rate, tmp['linkinfo']['info_data']['ad_lacp_rate']) def test_bonding_hash_policy(self): # Define available bonding hash policies hash_policies = ['layer2', 'layer2+3', 'encap2+3', 'encap3+4'] for hash_policy in hash_policies: for interface in self._interfaces: for option in self._options.get(interface, []): self.cli_set(self._base_path + [interface] + option.split()) self.cli_set(self._base_path + [interface, 'hash-policy', hash_policy]) self.cli_commit() # verify config for interface in self._interfaces: defined_policy = read_file(f'/sys/class/net/{interface}/bonding/xmit_hash_policy').split() self.assertEqual(defined_policy[0], hash_policy) def test_bonding_mii_monitoring_interval(self): for interface in self._interfaces: for option in self._options.get(interface, []): self.cli_set(self._base_path + [interface] + option.split()) self.cli_commit() # verify default for interface in self._interfaces: tmp = read_file(f'/sys/class/net/{interface}/bonding/miimon').split() self.assertIn('100', tmp) mii_mon = '250' for interface in self._interfaces: self.cli_set(self._base_path + [interface, 'mii-mon-interval', mii_mon]) self.cli_commit() # verify new CLI value for interface in self._interfaces: tmp = read_file(f'/sys/class/net/{interface}/bonding/miimon').split() self.assertIn(mii_mon, tmp) def test_bonding_multi_use_member(self): # Define available bonding hash policies for interface in ['bond10', 'bond20']: for member in self._members: self.cli_set(self._base_path + [interface, 'member', 'interface', member]) # check validate() - can not use the same member interfaces multiple times with self.assertRaises(ConfigSessionError): self.cli_commit() self.cli_delete(self._base_path + ['bond20']) self.cli_commit() def test_bonding_source_interface(self): # Re-use member interface that is already a source-interface bond = 'bond99' pppoe = 'pppoe98756' member = next(iter(self._members)) self.cli_set(self._base_path + [bond, 'member', 'interface', member]) self.cli_set(['interfaces', 'pppoe', pppoe, 'source-interface', member]) # check validate() - can not add interface to bond, it is the source-interface of ... with self.assertRaises(ConfigSessionError): self.cli_commit() self.cli_delete(['interfaces', 'pppoe', pppoe]) self.cli_commit() # verify config slaves = read_file(f'/sys/class/net/{bond}/bonding/slaves').split() self.assertIn(member, slaves) def test_bonding_source_bridge_interface(self): # Re-use member interface that is already a source-interface bond = 'bond1097' bridge = 'br6327' member = next(iter(self._members)) self.cli_set(self._base_path + [bond, 'member', 'interface', member]) self.cli_set(['interfaces', 'bridge', bridge, 'member', 'interface', member]) # check validate() - can not add interface to bond, it is a member of bridge ... with self.assertRaises(ConfigSessionError): self.cli_commit() self.cli_delete(['interfaces', 'bridge', bridge]) self.cli_commit() # verify config slaves = read_file(f'/sys/class/net/{bond}/bonding/slaves').split() self.assertIn(member, slaves) def test_bonding_uniq_member_description(self): ethernet_path = ['interfaces', 'ethernet'] for interface in self._interfaces: for option in self._options.get(interface, []): self.cli_set(self._base_path + [interface] + option.split()) self.cli_commit() # Add any changes on bonding members # For example add description on separate ethX interfaces for interface in self._interfaces: for member in self._members: self.cli_set(ethernet_path + [member, 'description', member + '_interface']) self.cli_commit() # verify config for interface in self._interfaces: slaves = read_file(f'/sys/class/net/{interface}/bonding/slaves').split() for member in self._members: self.assertIn(member, slaves) def test_bonding_system_mac(self): # configure member interfaces and system-mac default_system_mac = '00:00:00:00:00:00' # default MAC is all zeroes system_mac = '00:50:ab:cd:ef:11' for interface in self._interfaces: for option in self._options.get(interface, []): self.cli_set(self._base_path + [interface] + option.split()) self.cli_set(self._base_path + [interface, 'system-mac', system_mac]) self.cli_commit() # verify config for interface in self._interfaces: tmp = read_file(f'/sys/class/net/{interface}/bonding/ad_actor_system') self.assertIn(tmp, system_mac) for interface in self._interfaces: self.cli_delete(self._base_path + [interface, 'system-mac']) self.cli_commit() # verify default value for interface in self._interfaces: tmp = read_file(f'/sys/class/net/{interface}/bonding/ad_actor_system') self.assertIn(tmp, default_system_mac) def test_bonding_evpn_multihoming(self): id = '5' for interface in self._interfaces: for option in self._options.get(interface, []): self.cli_set(self._base_path + [interface] + option.split()) self.cli_set(self._base_path + [interface, 'evpn', 'es-id', id]) self.cli_set(self._base_path + [interface, 'evpn', 'es-df-pref', id]) self.cli_set(self._base_path + [interface, 'evpn', 'es-sys-mac', f'00:12:34:56:78:0{id}']) self.cli_set(self._base_path + [interface, 'evpn', 'uplink']) id = int(id) + 1 self.cli_commit() id = '5' for interface in self._interfaces: - frrconfig = self.getFRRconfig(f'interface {interface}', daemon='zebra') + frrconfig = self.getFRRconfig(f'interface {interface}', daemon=mgmt_daemon) self.assertIn(f' evpn mh es-id {id}', frrconfig) self.assertIn(f' evpn mh es-df-pref {id}', frrconfig) self.assertIn(f' evpn mh es-sys-mac 00:12:34:56:78:0{id}', frrconfig) self.assertIn(f' evpn mh uplink', frrconfig) id = int(id) + 1 for interface in self._interfaces: self.cli_delete(self._base_path + [interface, 'evpn', 'es-id']) self.cli_delete(self._base_path + [interface, 'evpn', 'es-df-pref']) self.cli_commit() id = '5' for interface in self._interfaces: - frrconfig = self.getFRRconfig(f'interface {interface}', daemon='zebra') + frrconfig = self.getFRRconfig(f'interface {interface}', daemon=mgmt_daemon) self.assertIn(f' evpn mh es-sys-mac 00:12:34:56:78:0{id}', frrconfig) self.assertIn(f' evpn mh uplink', frrconfig) id = int(id) + 1 if __name__ == '__main__': unittest.main(verbosity=2) diff --git a/smoketest/scripts/cli/test_interfaces_ethernet.py b/smoketest/scripts/cli/test_interfaces_ethernet.py index 3d12364f7..218fa0759 100755 --- a/smoketest/scripts/cli/test_interfaces_ethernet.py +++ b/smoketest/scripts/cli/test_interfaces_ethernet.py @@ -1,226 +1,227 @@ #!/usr/bin/env python3 # # Copyright (C) 2020-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import os import unittest from glob import glob from json import loads from netifaces import AF_INET from netifaces import AF_INET6 from netifaces import ifaddresses from base_interfaces_test import BasicInterfaceTest from vyos.configsession import ConfigSessionError from vyos.ifconfig import Section +from vyos.frr import mgmt_daemon from vyos.utils.process import cmd from vyos.utils.process import popen from vyos.utils.file import read_file from vyos.utils.network import is_ipv6_link_local class EthernetInterfaceTest(BasicInterfaceTest.TestCase): @classmethod def setUpClass(cls): cls._base_path = ['interfaces', 'ethernet'] cls._mirror_interfaces = ['dum21354'] # We only test on physical interfaces and not VLAN (sub-)interfaces if 'TEST_ETH' in os.environ: tmp = os.environ['TEST_ETH'].split() cls._interfaces = tmp else: for tmp in Section.interfaces('ethernet', vlan=False): cls._interfaces.append(tmp) cls._macs = {} for interface in cls._interfaces: cls._macs[interface] = read_file(f'/sys/class/net/{interface}/address') # call base-classes classmethod super(EthernetInterfaceTest, cls).setUpClass() def tearDown(self): for interface in self._interfaces: # when using a dedicated interface to test via TEST_ETH environment # variable only this one will be cleared in the end - usable to test # ethernet interfaces via SSH self.cli_delete(self._base_path + [interface]) self.cli_set(self._base_path + [interface, 'duplex', 'auto']) self.cli_set(self._base_path + [interface, 'speed', 'auto']) self.cli_set(self._base_path + [interface, 'hw-id', self._macs[interface]]) self.cli_commit() # Verify that no address remains on the system as this is an eternal # interface. for interface in self._interfaces: self.assertNotIn(AF_INET, ifaddresses(interface)) # required for IPv6 link-local address self.assertIn(AF_INET6, ifaddresses(interface)) for addr in ifaddresses(interface)[AF_INET6]: # checking link local addresses makes no sense if is_ipv6_link_local(addr['addr']): continue self.assertFalse(is_intf_addr_assigned(interface, addr['addr'])) # Ensure no VLAN interfaces are left behind tmp = [x for x in Section.interfaces('ethernet') if x.startswith(f'{interface}.')] self.assertListEqual(tmp, []) def test_offloading_rps(self): # enable RPS on all available CPUs, RPS works with a CPU bitmask, # where each bit represents a CPU (core/thread). The formula below # expands to rps_cpus = 255 for a 8 core system rps_cpus = (1 << os.cpu_count()) -1 # XXX: we should probably reserve one core when the system is under # high preasure so we can still have a core left for housekeeping. # This is done by masking out the lowst bit so CPU0 is spared from # receive packet steering. rps_cpus &= ~1 for interface in self._interfaces: self.cli_set(self._base_path + [interface, 'offload', 'rps']) self.cli_commit() for interface in self._interfaces: cpus = read_file(f'/sys/class/net/{interface}/queues/rx-0/rps_cpus') # remove the nasty ',' separation on larger strings cpus = cpus.replace(',','') cpus = int(cpus, 16) self.assertEqual(f'{cpus:x}', f'{rps_cpus:x}') def test_offloading_rfs(self): global_rfs_flow = 32768 rfs_flow = global_rfs_flow for interface in self._interfaces: self.cli_set(self._base_path + [interface, 'offload', 'rfs']) self.cli_commit() for interface in self._interfaces: queues = len(glob(f'/sys/class/net/{interface}/queues/rx-*')) rfs_flow = int(global_rfs_flow/queues) for i in range(0, queues): tmp = read_file(f'/sys/class/net/{interface}/queues/rx-{i}/rps_flow_cnt') self.assertEqual(int(tmp), rfs_flow) tmp = read_file(f'/proc/sys/net/core/rps_sock_flow_entries') self.assertEqual(int(tmp), global_rfs_flow) # delete configuration of RFS and check all values returned to default "0" for interface in self._interfaces: self.cli_delete(self._base_path + [interface, 'offload', 'rfs']) self.cli_commit() for interface in self._interfaces: queues = len(glob(f'/sys/class/net/{interface}/queues/rx-*')) rfs_flow = int(global_rfs_flow/queues) for i in range(0, queues): tmp = read_file(f'/sys/class/net/{interface}/queues/rx-{i}/rps_flow_cnt') self.assertEqual(int(tmp), 0) def test_non_existing_interface(self): unknonw_interface = self._base_path + ['eth667'] self.cli_set(unknonw_interface) # check validate() - interface does not exist with self.assertRaises(ConfigSessionError): self.cli_commit() # we need to remove this wrong interface from the configuration # manually, else tearDown() will have problem in commit() self.cli_delete(unknonw_interface) def test_speed_duplex_verify(self): for interface in self._interfaces: self.cli_set(self._base_path + [interface, 'speed', '1000']) # check validate() - if either speed or duplex is not auto, the # other one must be manually configured, too with self.assertRaises(ConfigSessionError): self.cli_commit() self.cli_set(self._base_path + [interface, 'speed', 'auto']) self.cli_commit() def test_ethtool_ring_buffer(self): for interface in self._interfaces: # We do not use vyos.ethtool here to not have any chance # for invalid testcases. Re-gain data by hand tmp = cmd(f'sudo ethtool --json --show-ring {interface}') tmp = loads(tmp) max_rx = str(tmp[0]['rx-max']) max_tx = str(tmp[0]['tx-max']) self.cli_set(self._base_path + [interface, 'ring-buffer', 'rx', max_rx]) self.cli_set(self._base_path + [interface, 'ring-buffer', 'tx', max_tx]) self.cli_commit() for interface in self._interfaces: tmp = cmd(f'sudo ethtool --json --show-ring {interface}') tmp = loads(tmp) max_rx = str(tmp[0]['rx-max']) max_tx = str(tmp[0]['tx-max']) rx = str(tmp[0]['rx']) tx = str(tmp[0]['tx']) # validate if the above change was carried out properly and the # ring-buffer size got increased self.assertEqual(max_rx, rx) self.assertEqual(max_tx, tx) def test_ethtool_flow_control(self): for interface in self._interfaces: # Disable flow-control self.cli_set(self._base_path + [interface, 'disable-flow-control']) # Check current flow-control state on ethernet interface out, err = popen(f'sudo ethtool --json --show-pause {interface}') # Flow-control not supported - test if it bails out with a proper # this is a dynamic path where err = 1 on VMware, but err = 0 on # a physical box. if bool(err): with self.assertRaises(ConfigSessionError): self.cli_commit() else: out = loads(out) # Flow control is on self.assertTrue(out[0]['autonegotiate']) # commit change on CLI to disable-flow-control and re-test self.cli_commit() out, err = popen(f'sudo ethtool --json --show-pause {interface}') out = loads(out) self.assertFalse(out[0]['autonegotiate']) def test_ethtool_evpn_uplink_tarcking(self): for interface in self._interfaces: self.cli_set(self._base_path + [interface, 'evpn', 'uplink']) self.cli_commit() for interface in self._interfaces: - frrconfig = self.getFRRconfig(f'interface {interface}', daemon='zebra') + frrconfig = self.getFRRconfig(f'interface {interface}', daemon=mgmt_daemon) self.assertIn(f' evpn mh uplink', frrconfig) if __name__ == '__main__': unittest.main(verbosity=2) diff --git a/smoketest/scripts/cli/test_protocols_bgp.py b/smoketest/scripts/cli/test_protocols_bgp.py index ea2f561a4..ad9ce3676 100755 --- a/smoketest/scripts/cli/test_protocols_bgp.py +++ b/smoketest/scripts/cli/test_protocols_bgp.py @@ -1,1411 +1,1412 @@ #!/usr/bin/env python3 # -# Copyright (C) 2021-2023 VyOS maintainers and contributors +# Copyright (C) 2021-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import unittest from time import sleep from base_vyostest_shim import VyOSUnitTestSHIM from vyos.ifconfig import Section from vyos.configsession import ConfigSessionError from vyos.template import is_ipv6 from vyos.utils.process import process_named_running from vyos.utils.process import cmd +from vyos.frr import bgp_daemon PROCESS_NAME = 'bgpd' ASN = '64512' base_path = ['protocols', 'bgp'] route_map_in = 'foo-map-in' route_map_out = 'foo-map-out' prefix_list_in = 'pfx-foo-in' prefix_list_out = 'pfx-foo-out' prefix_list_in6 = 'pfx-foo-in6' prefix_list_out6 = 'pfx-foo-out6' bfd_profile = 'foo-bar-baz' import_afi = 'ipv4-unicast' import_vrf = 'red' import_rd = ASN + ':100' import_vrf_base = ['vrf', 'name'] neighbor_config = { '192.0.2.1' : { 'bfd' : '', 'cap_dynamic' : '', 'cap_ext_next' : '', 'cap_ext_sver' : '', 'remote_as' : '100', 'adv_interv' : '400', 'passive' : '', 'password' : 'VyOS-Secure123', 'shutdown' : '', 'cap_over' : '', 'ttl_security' : '5', 'system_as' : '300', 'route_map_in' : route_map_in, 'route_map_out' : route_map_out, 'no_send_comm_ext' : '', 'addpath_all' : '', 'p_attr_discard' : ['10', '20', '30', '40', '50'], }, '192.0.2.2' : { 'bfd_profile' : bfd_profile, 'remote_as' : '200', 'shutdown' : '', 'no_cap_nego' : '', 'port' : '667', 'cap_strict' : '', 'advertise_map' : route_map_in, 'non_exist_map' : route_map_out, 'pfx_list_in' : prefix_list_in, 'pfx_list_out' : prefix_list_out, 'no_send_comm_std' : '', 'local_role' : 'rs-client', 'p_attr_taw' : '200', }, '192.0.2.3' : { 'advertise_map' : route_map_in, 'description' : 'foo bar baz', 'remote_as' : '200', 'passive' : '', 'multi_hop' : '5', 'update_src' : 'lo', 'peer_group' : 'foo', 'graceful_rst' : '', }, '2001:db8::1' : { 'advertise_map' : route_map_in, 'exist_map' : route_map_out, 'cap_dynamic' : '', 'cap_ext_next' : '', 'cap_ext_sver' : '', 'remote_as' : '123', 'adv_interv' : '400', 'passive' : '', 'password' : 'VyOS-Secure123', 'shutdown' : '', 'cap_over' : '', 'ttl_security' : '5', 'system_as' : '300', 'solo' : '', 'route_map_in' : route_map_in, 'route_map_out' : route_map_out, 'no_send_comm_std' : '', 'addpath_per_as' : '', 'peer_group' : 'foo-bar', 'local_role' : 'customer', 'local_role_strict': '', }, '2001:db8::2' : { 'remote_as' : '456', 'shutdown' : '', 'no_cap_nego' : '', 'port' : '667', 'cap_strict' : '', 'pfx_list_in' : prefix_list_in6, 'pfx_list_out' : prefix_list_out6, 'no_send_comm_ext' : '', 'peer_group' : 'foo-bar_baz', 'graceful_rst_hlp' : '', 'disable_conn_chk' : '', }, } peer_group_config = { 'foo' : { 'advertise_map' : route_map_in, 'exist_map' : route_map_out, 'bfd' : '', 'remote_as' : '100', 'passive' : '', 'password' : 'VyOS-Secure123', 'shutdown' : '', 'cap_over' : '', 'ttl_security' : '5', 'disable_conn_chk' : '', 'p_attr_discard' : ['100', '150', '200'], }, 'bar' : { 'remote_as' : '111', 'graceful_rst_no' : '', 'port' : '667', 'p_attr_taw' : '126', }, 'foo-bar' : { 'advertise_map' : route_map_in, 'description' : 'foo peer bar group', 'remote_as' : '200', 'shutdown' : '', 'no_cap_nego' : '', 'system_as' : '300', 'pfx_list_in' : prefix_list_in, 'pfx_list_out' : prefix_list_out, 'no_send_comm_ext' : '', }, 'foo-bar_baz' : { 'advertise_map' : route_map_in, 'non_exist_map' : route_map_out, 'bfd_profile' : bfd_profile, 'cap_dynamic' : '', 'cap_ext_next' : '', 'remote_as' : '200', 'passive' : '', 'multi_hop' : '5', 'update_src' : 'lo', 'route_map_in' : route_map_in, 'route_map_out' : route_map_out, 'local_role' : 'peer', 'local_role_strict': '', }, } class TestProtocolsBGP(VyOSUnitTestSHIM.TestCase): @classmethod def setUpClass(cls): super(TestProtocolsBGP, cls).setUpClass() # Retrieve FRR daemon PID - it is not allowed to crash, thus PID must remain the same cls.daemon_pid = process_named_running(PROCESS_NAME) # ensure we can also run this test on a live system - so lets clean # out the current configuration :) cls.cli_delete(cls, base_path) cls.cli_delete(cls, ['policy', 'route-map']) cls.cli_delete(cls, ['policy', 'prefix-list']) cls.cli_delete(cls, ['policy', 'prefix-list6']) cls.cli_delete(cls, ['vrf']) cls.cli_set(cls, ['policy', 'route-map', route_map_in, 'rule', '10', 'action', 'permit']) cls.cli_set(cls, ['policy', 'route-map', route_map_out, 'rule', '10', 'action', 'permit']) cls.cli_set(cls, ['policy', 'prefix-list', prefix_list_in, 'rule', '10', 'action', 'permit']) cls.cli_set(cls, ['policy', 'prefix-list', prefix_list_in, 'rule', '10', 'prefix', '192.0.2.0/25']) cls.cli_set(cls, ['policy', 'prefix-list', prefix_list_out, 'rule', '10', 'action', 'permit']) cls.cli_set(cls, ['policy', 'prefix-list', prefix_list_out, 'rule', '10', 'prefix', '192.0.2.128/25']) cls.cli_set(cls, ['policy', 'prefix-list6', prefix_list_in6, 'rule', '10', 'action', 'permit']) cls.cli_set(cls, ['policy', 'prefix-list6', prefix_list_in6, 'rule', '10', 'prefix', '2001:db8:1000::/64']) cls.cli_set(cls, ['policy', 'prefix-list6', prefix_list_out6, 'rule', '10', 'action', 'deny']) cls.cli_set(cls, ['policy', 'prefix-list6', prefix_list_out6, 'rule', '10', 'prefix', '2001:db8:2000::/64']) @classmethod def tearDownClass(cls): cls.cli_delete(cls, ['policy', 'route-map']) cls.cli_delete(cls, ['policy', 'prefix-list']) cls.cli_delete(cls, ['policy', 'prefix-list6']) def setUp(self): self.cli_set(base_path + ['system-as', ASN]) def tearDown(self): # cleanup any possible VRF mess self.cli_delete(['vrf']) # always destrox the entire bgpd configuration to make the processes # life as hard as possible self.cli_delete(base_path) self.cli_commit() # check process health and continuity self.assertEqual(self.daemon_pid, process_named_running(PROCESS_NAME)) def create_bgp_instances_for_import_test(self): table = '1000' self.cli_set(import_vrf_base + [import_vrf, 'table', table]) self.cli_set(import_vrf_base + [import_vrf, 'protocols', 'bgp', 'system-as', ASN]) def verify_frr_config(self, peer, peer_config, frrconfig): # recurring patterns to verify for both a simple neighbor and a peer-group if 'bfd' in peer_config: self.assertIn(f' neighbor {peer} bfd', frrconfig) if 'bfd_profile' in peer_config: self.assertIn(f' neighbor {peer} bfd profile {peer_config["bfd_profile"]}', frrconfig) self.assertIn(f' neighbor {peer} bfd check-control-plane-failure', frrconfig) if 'cap_dynamic' in peer_config: self.assertIn(f' neighbor {peer} capability dynamic', frrconfig) if 'cap_ext_next' in peer_config: self.assertIn(f' neighbor {peer} capability extended-nexthop', frrconfig) if 'cap_ext_sver' in peer_config: self.assertIn(f' neighbor {peer} capability software-version', frrconfig) if 'description' in peer_config: self.assertIn(f' neighbor {peer} description {peer_config["description"]}', frrconfig) if 'no_cap_nego' in peer_config: self.assertIn(f' neighbor {peer} dont-capability-negotiate', frrconfig) if 'multi_hop' in peer_config: self.assertIn(f' neighbor {peer} ebgp-multihop {peer_config["multi_hop"]}', frrconfig) if 'local_as' in peer_config: self.assertIn(f' neighbor {peer} local-as {peer_config["local_as"]} no-prepend replace-as', frrconfig) if 'local_role' in peer_config: tmp = f' neighbor {peer} local-role {peer_config["local_role"]}' if 'local_role_strict' in peer_config: tmp += ' strict' self.assertIn(tmp, frrconfig) if 'cap_over' in peer_config: self.assertIn(f' neighbor {peer} override-capability', frrconfig) if 'passive' in peer_config: self.assertIn(f' neighbor {peer} passive', frrconfig) if 'password' in peer_config: self.assertIn(f' neighbor {peer} password {peer_config["password"]}', frrconfig) if 'port' in peer_config: self.assertIn(f' neighbor {peer} port {peer_config["port"]}', frrconfig) if 'remote_as' in peer_config: self.assertIn(f' neighbor {peer} remote-as {peer_config["remote_as"]}', frrconfig) if 'solo' in peer_config: self.assertIn(f' neighbor {peer} solo', frrconfig) if 'shutdown' in peer_config: self.assertIn(f' neighbor {peer} shutdown', frrconfig) if 'ttl_security' in peer_config: self.assertIn(f' neighbor {peer} ttl-security hops {peer_config["ttl_security"]}', frrconfig) if 'update_src' in peer_config: self.assertIn(f' neighbor {peer} update-source {peer_config["update_src"]}', frrconfig) if 'route_map_in' in peer_config: self.assertIn(f' neighbor {peer} route-map {peer_config["route_map_in"]} in', frrconfig) if 'route_map_out' in peer_config: self.assertIn(f' neighbor {peer} route-map {peer_config["route_map_out"]} out', frrconfig) if 'pfx_list_in' in peer_config: self.assertIn(f' neighbor {peer} prefix-list {peer_config["pfx_list_in"]} in', frrconfig) if 'pfx_list_out' in peer_config: self.assertIn(f' neighbor {peer} prefix-list {peer_config["pfx_list_out"]} out', frrconfig) if 'no_send_comm_std' in peer_config: self.assertIn(f' no neighbor {peer} send-community', frrconfig) if 'no_send_comm_ext' in peer_config: self.assertIn(f' no neighbor {peer} send-community extended', frrconfig) if 'addpath_all' in peer_config: self.assertIn(f' neighbor {peer} addpath-tx-all-paths', frrconfig) if 'p_attr_discard' in peer_config: tmp = ' '.join(peer_config["p_attr_discard"]) self.assertIn(f' neighbor {peer} path-attribute discard {tmp}', frrconfig) if 'p_attr_taw' in peer_config: self.assertIn(f' neighbor {peer} path-attribute treat-as-withdraw {peer_config["p_attr_taw"]}', frrconfig) if 'addpath_per_as' in peer_config: self.assertIn(f' neighbor {peer} addpath-tx-bestpath-per-AS', frrconfig) if 'advertise_map' in peer_config: base = f' neighbor {peer} advertise-map {peer_config["advertise_map"]}' if 'exist_map' in peer_config: base = f'{base} exist-map {peer_config["exist_map"]}' if 'non_exist_map' in peer_config: base = f'{base} non-exist-map {peer_config["non_exist_map"]}' self.assertIn(base, frrconfig) if 'graceful_rst' in peer_config: self.assertIn(f' neighbor {peer} graceful-restart', frrconfig) if 'graceful_rst_no' in peer_config: self.assertIn(f' neighbor {peer} graceful-restart-disable', frrconfig) if 'graceful_rst_hlp' in peer_config: self.assertIn(f' neighbor {peer} graceful-restart-helper', frrconfig) if 'disable_conn_chk' in peer_config: self.assertIn(f' neighbor {peer} disable-connected-check', frrconfig) def test_bgp_01_simple(self): router_id = '127.0.0.1' local_pref = '500' stalepath_time = '60' max_path_v4 = '2' max_path_v4ibgp = '4' max_path_v6 = '8' max_path_v6ibgp = '16' cond_adv_timer = '30' min_hold_time = '2' tcp_keepalive_idle = '66' tcp_keepalive_interval = '77' tcp_keepalive_probes = '22' self.cli_set(base_path + ['parameters', 'allow-martian-nexthop']) self.cli_set(base_path + ['parameters', 'disable-ebgp-connected-route-check']) self.cli_set(base_path + ['parameters', 'no-hard-administrative-reset']) self.cli_set(base_path + ['parameters', 'log-neighbor-changes']) self.cli_set(base_path + ['parameters', 'labeled-unicast', 'explicit-null']) self.cli_set(base_path + ['parameters', 'router-id', router_id]) # System AS number MUST be defined - as this is set in setUp() we remove # this once for testing of the proper error self.cli_delete(base_path + ['system-as']) with self.assertRaises(ConfigSessionError): self.cli_commit() self.cli_set(base_path + ['system-as', ASN]) # Default local preference (higher = more preferred, default value is 100) self.cli_set(base_path + ['parameters', 'default', 'local-pref', local_pref]) self.cli_set(base_path + ['parameters', 'graceful-restart', 'stalepath-time', stalepath_time]) self.cli_set(base_path + ['parameters', 'graceful-shutdown']) self.cli_set(base_path + ['parameters', 'ebgp-requires-policy']) self.cli_set(base_path + ['parameters', 'bestpath', 'as-path', 'multipath-relax']) self.cli_set(base_path + ['parameters', 'bestpath', 'bandwidth', 'default-weight-for-missing']) self.cli_set(base_path + ['parameters', 'bestpath', 'compare-routerid']) self.cli_set(base_path + ['parameters', 'bestpath', 'peer-type', 'multipath-relax']) self.cli_set(base_path + ['parameters', 'conditional-advertisement', 'timer', cond_adv_timer]) self.cli_set(base_path + ['parameters', 'fast-convergence']) self.cli_set(base_path + ['parameters', 'minimum-holdtime', min_hold_time]) self.cli_set(base_path + ['parameters', 'no-suppress-duplicates']) self.cli_set(base_path + ['parameters', 'reject-as-sets']) self.cli_set(base_path + ['parameters', 'route-reflector-allow-outbound-policy']) self.cli_set(base_path + ['parameters', 'shutdown']) self.cli_set(base_path + ['parameters', 'suppress-fib-pending']) self.cli_set(base_path + ['parameters', 'tcp-keepalive', 'idle', tcp_keepalive_idle]) self.cli_set(base_path + ['parameters', 'tcp-keepalive', 'interval', tcp_keepalive_interval]) self.cli_set(base_path + ['parameters', 'tcp-keepalive', 'probes', tcp_keepalive_probes]) # AFI maximum path support self.cli_set(base_path + ['address-family', 'ipv4-unicast', 'maximum-paths', 'ebgp', max_path_v4]) self.cli_set(base_path + ['address-family', 'ipv4-unicast', 'maximum-paths', 'ibgp', max_path_v4ibgp]) self.cli_set(base_path + ['address-family', 'ipv4-labeled-unicast', 'maximum-paths', 'ebgp', max_path_v4]) self.cli_set(base_path + ['address-family', 'ipv4-labeled-unicast', 'maximum-paths', 'ibgp', max_path_v4ibgp]) self.cli_set(base_path + ['address-family', 'ipv6-unicast', 'maximum-paths', 'ebgp', max_path_v6]) self.cli_set(base_path + ['address-family', 'ipv6-unicast', 'maximum-paths', 'ibgp', max_path_v6ibgp]) # commit changes self.cli_commit() # Verify FRR bgpd configuration frrconfig = self.getFRRconfig(f'router bgp {ASN}') self.assertIn(f'router bgp {ASN}', frrconfig) self.assertIn(f' bgp router-id {router_id}', frrconfig) self.assertIn(f' bgp allow-martian-nexthop', frrconfig) self.assertIn(f' bgp disable-ebgp-connected-route-check', frrconfig) self.assertIn(f' bgp log-neighbor-changes', frrconfig) self.assertIn(f' bgp default local-preference {local_pref}', frrconfig) self.assertIn(f' bgp conditional-advertisement timer {cond_adv_timer}', frrconfig) self.assertIn(f' bgp fast-convergence', frrconfig) self.assertIn(f' bgp graceful-restart stalepath-time {stalepath_time}', frrconfig) self.assertIn(f' bgp graceful-shutdown', frrconfig) self.assertIn(f' no bgp hard-administrative-reset', frrconfig) self.assertIn(f' bgp labeled-unicast explicit-null', frrconfig) self.assertIn(f' bgp bestpath as-path multipath-relax', frrconfig) self.assertIn(f' bgp bestpath bandwidth default-weight-for-missing', frrconfig) self.assertIn(f' bgp bestpath compare-routerid', frrconfig) self.assertIn(f' bgp bestpath peer-type multipath-relax', frrconfig) self.assertIn(f' bgp minimum-holdtime {min_hold_time}', frrconfig) self.assertIn(f' bgp reject-as-sets', frrconfig) self.assertIn(f' bgp route-reflector allow-outbound-policy', frrconfig) self.assertIn(f' bgp shutdown', frrconfig) self.assertIn(f' bgp suppress-fib-pending', frrconfig) self.assertIn(f' bgp tcp-keepalive {tcp_keepalive_idle} {tcp_keepalive_interval} {tcp_keepalive_probes}', frrconfig) self.assertNotIn(f'bgp ebgp-requires-policy', frrconfig) self.assertIn(f' no bgp suppress-duplicates', frrconfig) afiv4_config = self.getFRRconfig(' address-family ipv4 unicast') self.assertIn(f' maximum-paths {max_path_v4}', afiv4_config) self.assertIn(f' maximum-paths ibgp {max_path_v4ibgp}', afiv4_config) afiv4_config = self.getFRRconfig(' address-family ipv4 labeled-unicast') self.assertIn(f' maximum-paths {max_path_v4}', afiv4_config) self.assertIn(f' maximum-paths ibgp {max_path_v4ibgp}', afiv4_config) afiv6_config = self.getFRRconfig(' address-family ipv6 unicast') self.assertIn(f' maximum-paths {max_path_v6}', afiv6_config) self.assertIn(f' maximum-paths ibgp {max_path_v6ibgp}', afiv6_config) def test_bgp_02_neighbors(self): # Test out individual neighbor configuration items, not all of them are # also available to a peer-group! self.cli_set(base_path + ['parameters', 'deterministic-med']) for peer, peer_config in neighbor_config.items(): afi = 'ipv4-unicast' if is_ipv6(peer): afi = 'ipv6-unicast' if 'adv_interv' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'advertisement-interval', peer_config["adv_interv"]]) if 'bfd' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'bfd']) if 'bfd_profile' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'bfd', 'profile', peer_config["bfd_profile"]]) self.cli_set(base_path + ['neighbor', peer, 'bfd', 'check-control-plane-failure']) if 'cap_dynamic' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'capability', 'dynamic']) if 'cap_ext_next' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'capability', 'extended-nexthop']) if 'cap_ext_sver' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'capability', 'software-version']) if 'description' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'description', peer_config["description"]]) if 'no_cap_nego' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'disable-capability-negotiation']) if 'multi_hop' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'ebgp-multihop', peer_config["multi_hop"]]) if 'local_as' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'local-as', peer_config["local_as"], 'no-prepend', 'replace-as']) if 'local_role' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'local-role', peer_config["local_role"]]) if 'local_role_strict' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'local-role', peer_config["local_role"], 'strict']) if 'cap_over' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'override-capability']) if 'passive' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'passive']) if 'password' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'password', peer_config["password"]]) if 'port' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'port', peer_config["port"]]) if 'remote_as' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'remote-as', peer_config["remote_as"]]) if 'cap_strict' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'strict-capability-match']) if 'shutdown' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'shutdown']) if 'solo' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'solo']) if 'ttl_security' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'ttl-security', 'hops', peer_config["ttl_security"]]) if 'update_src' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'update-source', peer_config["update_src"]]) if 'p_attr_discard' in peer_config: for attribute in peer_config['p_attr_discard']: self.cli_set(base_path + ['neighbor', peer, 'path-attribute', 'discard', attribute]) if 'p_attr_taw' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'path-attribute', 'treat-as-withdraw', peer_config["p_attr_taw"]]) if 'route_map_in' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'address-family', afi, 'route-map', 'import', peer_config["route_map_in"]]) if 'route_map_out' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'address-family', afi, 'route-map', 'export', peer_config["route_map_out"]]) if 'pfx_list_in' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'address-family', afi, 'prefix-list', 'import', peer_config["pfx_list_in"]]) if 'pfx_list_out' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'address-family', afi, 'prefix-list', 'export', peer_config["pfx_list_out"]]) if 'no_send_comm_std' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'address-family', afi, 'disable-send-community', 'standard']) if 'no_send_comm_ext' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'address-family', afi, 'disable-send-community', 'extended']) if 'addpath_all' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'address-family', afi, 'addpath-tx-all']) if 'addpath_per_as' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'address-family', afi, 'addpath-tx-per-as']) if 'graceful_rst' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'graceful-restart', 'enable']) if 'graceful_rst_no' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'graceful-restart', 'disable']) if 'graceful_rst_hlp' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'graceful-restart', 'restart-helper']) if 'disable_conn_chk' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'disable-connected-check']) # Conditional advertisement if 'advertise_map' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'address-family', afi, 'conditionally-advertise', 'advertise-map', peer_config["advertise_map"]]) # Either exist-map or non-exist-map needs to be specified if 'exist_map' not in peer_config and 'non_exist_map' not in peer_config: with self.assertRaises(ConfigSessionError): self.cli_commit() self.cli_set(base_path + ['neighbor', peer, 'address-family', afi, 'conditionally-advertise', 'exist-map', route_map_in]) if 'exist_map' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'address-family', afi, 'conditionally-advertise', 'exist-map', peer_config["exist_map"]]) if 'non_exist_map' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'address-family', afi, 'conditionally-advertise', 'non-exist-map', peer_config["non_exist_map"]]) # commit changes self.cli_commit() # Verify FRR bgpd configuration frrconfig = self.getFRRconfig(f'router bgp {ASN}') self.assertIn(f'router bgp {ASN}', frrconfig) for peer, peer_config in neighbor_config.items(): if 'adv_interv' in peer_config: self.assertIn(f' neighbor {peer} advertisement-interval {peer_config["adv_interv"]}', frrconfig) if 'cap_strict' in peer_config: self.assertIn(f' neighbor {peer} strict-capability-match', frrconfig) self.verify_frr_config(peer, peer_config, frrconfig) def test_bgp_03_peer_groups(self): # Test out individual peer-group configuration items for peer_group, config in peer_group_config.items(): if 'bfd' in config: self.cli_set(base_path + ['peer-group', peer_group, 'bfd']) if 'bfd_profile' in config: self.cli_set(base_path + ['peer-group', peer_group, 'bfd', 'profile', config["bfd_profile"]]) self.cli_set(base_path + ['peer-group', peer_group, 'bfd', 'check-control-plane-failure']) if 'cap_dynamic' in config: self.cli_set(base_path + ['peer-group', peer_group, 'capability', 'dynamic']) if 'cap_ext_next' in config: self.cli_set(base_path + ['peer-group', peer_group, 'capability', 'extended-nexthop']) if 'cap_ext_sver' in config: self.cli_set(base_path + ['peer-group', peer_group, 'capability', 'software-version']) if 'description' in config: self.cli_set(base_path + ['peer-group', peer_group, 'description', config["description"]]) if 'no_cap_nego' in config: self.cli_set(base_path + ['peer-group', peer_group, 'disable-capability-negotiation']) if 'multi_hop' in config: self.cli_set(base_path + ['peer-group', peer_group, 'ebgp-multihop', config["multi_hop"]]) if 'local_as' in config: self.cli_set(base_path + ['peer-group', peer_group, 'local-as', config["local_as"], 'no-prepend', 'replace-as']) if 'local_role' in config: self.cli_set(base_path + ['peer-group', peer_group, 'local-role', config["local_role"]]) if 'local_role_strict' in config: self.cli_set(base_path + ['peer-group', peer_group, 'local-role', config["local_role"], 'strict']) if 'cap_over' in config: self.cli_set(base_path + ['peer-group', peer_group, 'override-capability']) if 'passive' in config: self.cli_set(base_path + ['peer-group', peer_group, 'passive']) if 'password' in config: self.cli_set(base_path + ['peer-group', peer_group, 'password', config["password"]]) if 'port' in config: self.cli_set(base_path + ['peer-group', peer_group, 'port', config["port"]]) if 'remote_as' in config: self.cli_set(base_path + ['peer-group', peer_group, 'remote-as', config["remote_as"]]) if 'shutdown' in config: self.cli_set(base_path + ['peer-group', peer_group, 'shutdown']) if 'ttl_security' in config: self.cli_set(base_path + ['peer-group', peer_group, 'ttl-security', 'hops', config["ttl_security"]]) if 'update_src' in config: self.cli_set(base_path + ['peer-group', peer_group, 'update-source', config["update_src"]]) if 'route_map_in' in config: self.cli_set(base_path + ['peer-group', peer_group, 'address-family', 'ipv4-unicast', 'route-map', 'import', config["route_map_in"]]) if 'route_map_out' in config: self.cli_set(base_path + ['peer-group', peer_group, 'address-family', 'ipv4-unicast', 'route-map', 'export', config["route_map_out"]]) if 'pfx_list_in' in config: self.cli_set(base_path + ['peer-group', peer_group, 'address-family', 'ipv4-unicast', 'prefix-list', 'import', config["pfx_list_in"]]) if 'pfx_list_out' in config: self.cli_set(base_path + ['peer-group', peer_group, 'address-family', 'ipv4-unicast', 'prefix-list', 'export', config["pfx_list_out"]]) if 'no_send_comm_std' in config: self.cli_set(base_path + ['peer-group', peer_group, 'address-family', 'ipv4-unicast', 'disable-send-community', 'standard']) if 'no_send_comm_ext' in config: self.cli_set(base_path + ['peer-group', peer_group, 'address-family', 'ipv4-unicast', 'disable-send-community', 'extended']) if 'addpath_all' in config: self.cli_set(base_path + ['peer-group', peer_group, 'address-family', 'ipv4-unicast', 'addpath-tx-all']) if 'addpath_per_as' in config: self.cli_set(base_path + ['peer-group', peer_group, 'address-family', 'ipv4-unicast', 'addpath-tx-per-as']) if 'graceful_rst' in config: self.cli_set(base_path + ['peer-group', peer_group, 'graceful-restart', 'enable']) if 'graceful_rst_no' in config: self.cli_set(base_path + ['peer-group', peer_group, 'graceful-restart', 'disable']) if 'graceful_rst_hlp' in config: self.cli_set(base_path + ['peer-group', peer_group, 'graceful-restart', 'restart-helper']) if 'disable_conn_chk' in config: self.cli_set(base_path + ['peer-group', peer_group, 'disable-connected-check']) if 'p_attr_discard' in config: for attribute in config['p_attr_discard']: self.cli_set(base_path + ['peer-group', peer_group, 'path-attribute', 'discard', attribute]) if 'p_attr_taw' in config: self.cli_set(base_path + ['peer-group', peer_group, 'path-attribute', 'treat-as-withdraw', config["p_attr_taw"]]) # Conditional advertisement if 'advertise_map' in config: self.cli_set(base_path + ['peer-group', peer_group, 'address-family', 'ipv4-unicast', 'conditionally-advertise', 'advertise-map', config["advertise_map"]]) # Either exist-map or non-exist-map needs to be specified if 'exist_map' not in config and 'non_exist_map' not in config: with self.assertRaises(ConfigSessionError): self.cli_commit() self.cli_set(base_path + ['peer-group', peer_group, 'address-family', 'ipv4-unicast', 'conditionally-advertise', 'exist-map', route_map_in]) if 'exist_map' in config: self.cli_set(base_path + ['peer-group', peer_group, 'address-family', 'ipv4-unicast', 'conditionally-advertise', 'exist-map', config["exist_map"]]) if 'non_exist_map' in config: self.cli_set(base_path + ['peer-group', peer_group, 'address-family', 'ipv4-unicast', 'conditionally-advertise', 'non-exist-map', config["non_exist_map"]]) for peer, peer_config in neighbor_config.items(): if 'peer_group' in peer_config: self.cli_set(base_path + ['neighbor', peer, 'peer-group', peer_config['peer_group']]) # commit changes self.cli_commit() # Verify FRR bgpd configuration frrconfig = self.getFRRconfig(f'router bgp {ASN}') self.assertIn(f'router bgp {ASN}', frrconfig) for peer, peer_config in peer_group_config.items(): self.assertIn(f' neighbor {peer_group} peer-group', frrconfig) self.verify_frr_config(peer, peer_config, frrconfig) for peer, peer_config in neighbor_config.items(): if 'peer_group' in peer_config: self.assertIn(f' neighbor {peer} peer-group {peer_config["peer_group"]}', frrconfig) def test_bgp_04_afi_ipv4(self): networks = { '10.0.0.0/8' : { 'as_set' : '', 'summary_only' : '', 'route_map' : route_map_in, }, '100.64.0.0/10' : { 'as_set' : '', }, '192.168.0.0/16' : { 'summary_only' : '', }, } # We want to redistribute ... redistributes = ['connected', 'isis', 'kernel', 'ospf', 'rip', 'static'] for redistribute in redistributes: self.cli_set(base_path + ['address-family', 'ipv4-unicast', 'redistribute', redistribute]) for network, network_config in networks.items(): self.cli_set(base_path + ['address-family', 'ipv4-unicast', 'network', network]) if 'as_set' in network_config: self.cli_set(base_path + ['address-family', 'ipv4-unicast', 'aggregate-address', network, 'as-set']) if 'summary_only' in network_config: self.cli_set(base_path + ['address-family', 'ipv4-unicast', 'aggregate-address', network, 'summary-only']) if 'route_map' in network_config: self.cli_set(base_path + ['address-family', 'ipv4-unicast', 'aggregate-address', network, 'route-map', network_config['route_map']]) # commit changes self.cli_commit() # Verify FRR bgpd configuration frrconfig = self.getFRRconfig(f'router bgp {ASN}') self.assertIn(f'router bgp {ASN}', frrconfig) self.assertIn(f' address-family ipv4 unicast', frrconfig) for redistribute in redistributes: self.assertIn(f' redistribute {redistribute}', frrconfig) for network, network_config in networks.items(): self.assertIn(f' network {network}', frrconfig) command = f'aggregate-address {network}' if 'as_set' in network_config: command = f'{command} as-set' if 'summary_only' in network_config: command = f'{command} summary-only' if 'route_map' in network_config: command = f'{command} route-map {network_config["route_map"]}' self.assertIn(command, frrconfig) def test_bgp_05_afi_ipv6(self): networks = { '2001:db8:100::/48' : { }, '2001:db8:200::/48' : { }, '2001:db8:300::/48' : { 'summary_only' : '', }, } # We want to redistribute ... redistributes = ['connected', 'kernel', 'ospfv3', 'ripng', 'static'] for redistribute in redistributes: self.cli_set(base_path + ['address-family', 'ipv6-unicast', 'redistribute', redistribute]) for network, network_config in networks.items(): self.cli_set(base_path + ['address-family', 'ipv6-unicast', 'network', network]) if 'summary_only' in network_config: self.cli_set(base_path + ['address-family', 'ipv6-unicast', 'aggregate-address', network, 'summary-only']) # commit changes self.cli_commit() # Verify FRR bgpd configuration frrconfig = self.getFRRconfig(f'router bgp {ASN}') self.assertIn(f'router bgp {ASN}', frrconfig) self.assertIn(f' address-family ipv6 unicast', frrconfig) # T2100: By default ebgp-requires-policy is disabled to keep VyOS # 1.3 and 1.2 backwards compatibility self.assertIn(f' no bgp ebgp-requires-policy', frrconfig) for redistribute in redistributes: # FRR calls this OSPF6 if redistribute == 'ospfv3': redistribute = 'ospf6' self.assertIn(f' redistribute {redistribute}', frrconfig) for network, network_config in networks.items(): self.assertIn(f' network {network}', frrconfig) if 'as_set' in network_config: self.assertIn(f' aggregate-address {network} summary-only', frrconfig) def test_bgp_06_listen_range(self): # Implemented via T1875 limit = '64' listen_ranges = ['192.0.2.0/25', '192.0.2.128/25'] peer_group = 'listenfoobar' self.cli_set(base_path + ['listen', 'limit', limit]) for prefix in listen_ranges: self.cli_set(base_path + ['listen', 'range', prefix]) # check validate() - peer-group must be defined for range/prefix with self.assertRaises(ConfigSessionError): self.cli_commit() self.cli_set(base_path + ['listen', 'range', prefix, 'peer-group', peer_group]) # check validate() - peer-group does yet not exist! with self.assertRaises(ConfigSessionError): self.cli_commit() self.cli_set(base_path + ['peer-group', peer_group, 'remote-as', ASN]) # commit changes self.cli_commit() # Verify FRR bgpd configuration frrconfig = self.getFRRconfig(f'router bgp {ASN}') self.assertIn(f'router bgp {ASN}', frrconfig) self.assertIn(f' neighbor {peer_group} peer-group', frrconfig) self.assertIn(f' neighbor {peer_group} remote-as {ASN}', frrconfig) self.assertIn(f' bgp listen limit {limit}', frrconfig) for prefix in listen_ranges: self.assertIn(f' bgp listen range {prefix} peer-group {peer_group}', frrconfig) def test_bgp_07_l2vpn_evpn(self): vnis = ['10010', '10020', '10030'] soo = '1.2.3.4:10000' evi_limit = '1000' route_targets = ['1.1.1.1:100', '1.1.1.1:200', '1.1.1.1:300'] self.cli_set(base_path + ['address-family', 'l2vpn-evpn', 'advertise-all-vni']) self.cli_set(base_path + ['address-family', 'l2vpn-evpn', 'advertise-default-gw']) self.cli_set(base_path + ['address-family', 'l2vpn-evpn', 'advertise-svi-ip']) self.cli_set(base_path + ['address-family', 'l2vpn-evpn', 'flooding', 'disable']) self.cli_set(base_path + ['address-family', 'l2vpn-evpn', 'default-originate', 'ipv4']) self.cli_set(base_path + ['address-family', 'l2vpn-evpn', 'default-originate', 'ipv6']) self.cli_set(base_path + ['address-family', 'l2vpn-evpn', 'disable-ead-evi-rx']) self.cli_set(base_path + ['address-family', 'l2vpn-evpn', 'disable-ead-evi-tx']) self.cli_set(base_path + ['address-family', 'l2vpn-evpn', 'mac-vrf', 'soo', soo]) for vni in vnis: self.cli_set(base_path + ['address-family', 'l2vpn-evpn', 'vni', vni, 'advertise-default-gw']) self.cli_set(base_path + ['address-family', 'l2vpn-evpn', 'vni', vni, 'advertise-svi-ip']) self.cli_set(base_path + ['address-family', 'l2vpn-evpn', 'ead-es-frag', 'evi-limit', evi_limit]) for route_target in route_targets: self.cli_set(base_path + ['address-family', 'l2vpn-evpn', 'ead-es-route-target', 'export', route_target]) # commit changes self.cli_commit() # Verify FRR bgpd configuration frrconfig = self.getFRRconfig(f'router bgp {ASN}') self.assertIn(f'router bgp {ASN}', frrconfig) self.assertIn(f' address-family l2vpn evpn', frrconfig) self.assertIn(f' advertise-all-vni', frrconfig) self.assertIn(f' advertise-default-gw', frrconfig) self.assertIn(f' advertise-svi-ip', frrconfig) self.assertIn(f' default-originate ipv4', frrconfig) self.assertIn(f' default-originate ipv6', frrconfig) self.assertIn(f' disable-ead-evi-rx', frrconfig) self.assertIn(f' disable-ead-evi-tx', frrconfig) self.assertIn(f' flooding disable', frrconfig) self.assertIn(f' mac-vrf soo {soo}', frrconfig) for vni in vnis: vniconfig = self.getFRRconfig(f' vni {vni}') self.assertIn(f'vni {vni}', vniconfig) self.assertIn(f' advertise-default-gw', vniconfig) self.assertIn(f' advertise-svi-ip', vniconfig) self.assertIn(f' ead-es-frag evi-limit {evi_limit}', frrconfig) for route_target in route_targets: self.assertIn(f' ead-es-route-target export {route_target}', frrconfig) def test_bgp_09_distance_and_flowspec(self): distance_external = '25' distance_internal = '30' distance_local = '35' distance_v4_prefix = '169.254.0.0/32' distance_v6_prefix = '2001::/128' distance_prefix_value = '110' distance_families = ['ipv4-unicast', 'ipv6-unicast','ipv4-multicast', 'ipv6-multicast'] verify_families = ['ipv4 unicast', 'ipv6 unicast','ipv4 multicast', 'ipv6 multicast'] flowspec_families = ['address-family ipv4 flowspec', 'address-family ipv6 flowspec'] flowspec_int = 'lo' # Per family distance support for family in distance_families: self.cli_set(base_path + ['address-family', family, 'distance', 'external', distance_external]) self.cli_set(base_path + ['address-family', family, 'distance', 'internal', distance_internal]) self.cli_set(base_path + ['address-family', family, 'distance', 'local', distance_local]) if 'ipv4' in family: self.cli_set(base_path + ['address-family', family, 'distance', 'prefix', distance_v4_prefix, 'distance', distance_prefix_value]) if 'ipv6' in family: self.cli_set(base_path + ['address-family', family, 'distance', 'prefix', distance_v6_prefix, 'distance', distance_prefix_value]) # IPv4 flowspec interface check self.cli_set(base_path + ['address-family', 'ipv4-flowspec', 'local-install', 'interface', flowspec_int]) # IPv6 flowspec interface check self.cli_set(base_path + ['address-family', 'ipv6-flowspec', 'local-install', 'interface', flowspec_int]) # Commit changes self.cli_commit() # Verify FRR distances configuration frrconfig = self.getFRRconfig(f'router bgp {ASN}') self.assertIn(f'router bgp {ASN}', frrconfig) for family in verify_families: self.assertIn(f'address-family {family}', frrconfig) self.assertIn(f'distance bgp {distance_external} {distance_internal} {distance_local}', frrconfig) if 'ipv4' in family: self.assertIn(f'distance {distance_prefix_value} {distance_v4_prefix}', frrconfig) if 'ipv6' in family: self.assertIn(f'distance {distance_prefix_value} {distance_v6_prefix}', frrconfig) # Verify FRR flowspec configuration for family in flowspec_families: self.assertIn(f'{family}', frrconfig) self.assertIn(f'local-install {flowspec_int}', frrconfig) def test_bgp_10_vrf_simple(self): router_id = '127.0.0.3' vrfs = ['red', 'green', 'blue'] # It is safe to assume that when the basic VRF test works, all # other BGP related features work, as we entirely inherit the CLI # templates and Jinja2 FRR template. table = '1000' # testing only one AFI is sufficient as it's generic code for vrf in vrfs: vrf_base = ['vrf', 'name', vrf] self.cli_set(vrf_base + ['table', table]) self.cli_set(vrf_base + ['protocols', 'bgp', 'system-as', ASN]) self.cli_set(vrf_base + ['protocols', 'bgp', 'parameters', 'router-id', router_id]) table = str(int(table) + 1000) # import VRF routes do main RIB self.cli_set(base_path + ['address-family', 'ipv6-unicast', 'import', 'vrf', vrf]) self.cli_commit() # Verify FRR bgpd configuration frrconfig = self.getFRRconfig(f'router bgp {ASN}') self.assertIn(f'router bgp {ASN}', frrconfig) self.assertIn(f' address-family ipv6 unicast', frrconfig) for vrf in vrfs: self.assertIn(f' import vrf {vrf}', frrconfig) # Verify FRR bgpd configuration frr_vrf_config = self.getFRRconfig(f'router bgp {ASN} vrf {vrf}') self.assertIn(f'router bgp {ASN} vrf {vrf}', frr_vrf_config) self.assertIn(f' bgp router-id {router_id}', frr_vrf_config) def test_bgp_11_confederation(self): router_id = '127.10.10.2' confed_id = str(int(ASN) + 1) confed_asns = '10 20 30 40' self.cli_set(base_path + ['parameters', 'router-id', router_id]) self.cli_set(base_path + ['parameters', 'confederation', 'identifier', confed_id]) for asn in confed_asns.split(): self.cli_set(base_path + ['parameters', 'confederation', 'peers', asn]) # commit changes self.cli_commit() # Verify FRR bgpd configuration frrconfig = self.getFRRconfig(f'router bgp {ASN}') self.assertIn(f'router bgp {ASN}', frrconfig) self.assertIn(f' bgp router-id {router_id}', frrconfig) self.assertIn(f' bgp confederation identifier {confed_id}', frrconfig) self.assertIn(f' bgp confederation peers {confed_asns}', frrconfig) def test_bgp_12_v6_link_local(self): remote_asn = str(int(ASN) + 10) interface = 'eth0' self.cli_set(base_path + ['neighbor', interface, 'address-family', 'ipv6-unicast']) self.cli_set(base_path + ['neighbor', interface, 'interface', 'v6only', 'remote-as', remote_asn]) # commit changes self.cli_commit() # Verify FRR bgpd configuration frrconfig = self.getFRRconfig(f'router bgp {ASN}') self.assertIn(f'router bgp {ASN}', frrconfig) self.assertIn(f' neighbor {interface} interface v6only remote-as {remote_asn}', frrconfig) self.assertIn(f' address-family ipv6 unicast', frrconfig) self.assertIn(f' neighbor {interface} activate', frrconfig) self.assertIn(f' exit-address-family', frrconfig) def test_bgp_13_vpn(self): remote_asn = str(int(ASN) + 150) neighbor = '192.0.2.55' vrf_name = 'red' label = 'auto' rd = f'{neighbor}:{ASN}' rt_export = f'{neighbor}:1002 1.2.3.4:567' rt_import = f'{neighbor}:1003 500:100' # testing only one AFI is sufficient as it's generic code for afi in ['ipv4-unicast', 'ipv6-unicast']: self.cli_set(base_path + ['address-family', afi, 'export', 'vpn']) self.cli_set(base_path + ['address-family', afi, 'import', 'vpn']) self.cli_set(base_path + ['address-family', afi, 'label', 'vpn', 'export', label]) self.cli_set(base_path + ['address-family', afi, 'label', 'vpn', 'allocation-mode', 'per-nexthop']) self.cli_set(base_path + ['address-family', afi, 'rd', 'vpn', 'export', rd]) self.cli_set(base_path + ['address-family', afi, 'route-map', 'vpn', 'export', route_map_out]) self.cli_set(base_path + ['address-family', afi, 'route-map', 'vpn', 'import', route_map_in]) self.cli_set(base_path + ['address-family', afi, 'route-target', 'vpn', 'export', rt_export]) self.cli_set(base_path + ['address-family', afi, 'route-target', 'vpn', 'import', rt_import]) # commit changes self.cli_commit() # Verify FRR bgpd configuration frrconfig = self.getFRRconfig(f'router bgp {ASN}') self.assertIn(f'router bgp {ASN}', frrconfig) for afi in ['ipv4', 'ipv6']: - afi_config = self.getFRRconfig(f' address-family {afi} unicast', endsection='exit-address-family', daemon='bgpd') + afi_config = self.getFRRconfig(f' address-family {afi} unicast', endsection='exit-address-family', daemon=bgp_daemon) self.assertIn(f'address-family {afi} unicast', afi_config) self.assertIn(f' export vpn', afi_config) self.assertIn(f' import vpn', afi_config) self.assertIn(f' label vpn export {label}', afi_config) self.assertIn(f' label vpn export allocation-mode per-nexthop', afi_config) self.assertIn(f' rd vpn export {rd}', afi_config) self.assertIn(f' route-map vpn export {route_map_out}', afi_config) self.assertIn(f' route-map vpn import {route_map_in}', afi_config) self.assertIn(f' rt vpn export {rt_export}', afi_config) self.assertIn(f' rt vpn import {rt_import}', afi_config) self.assertIn(f' exit-address-family', afi_config) def test_bgp_14_remote_as_peer_group_override(self): # Peer-group member cannot override remote-as of peer-group remote_asn = str(int(ASN) + 150) neighbor = '192.0.2.1' peer_group = 'bar' interface = 'eth0' self.cli_set(base_path + ['neighbor', neighbor, 'remote-as', remote_asn]) self.cli_set(base_path + ['neighbor', neighbor, 'peer-group', peer_group]) self.cli_set(base_path + ['peer-group', peer_group, 'remote-as', remote_asn]) # Peer-group member cannot override remote-as of peer-group with self.assertRaises(ConfigSessionError): self.cli_commit() self.cli_delete(base_path + ['neighbor', neighbor, 'remote-as']) # re-test with interface based peer-group self.cli_set(base_path + ['neighbor', interface, 'interface', 'peer-group', peer_group]) self.cli_set(base_path + ['neighbor', interface, 'interface', 'remote-as', 'external']) with self.assertRaises(ConfigSessionError): self.cli_commit() self.cli_delete(base_path + ['neighbor', interface, 'interface', 'remote-as']) # re-test with interface based v6only peer-group self.cli_set(base_path + ['neighbor', interface, 'interface', 'v6only', 'peer-group', peer_group]) self.cli_set(base_path + ['neighbor', interface, 'interface', 'v6only', 'remote-as', 'external']) with self.assertRaises(ConfigSessionError): self.cli_commit() self.cli_delete(base_path + ['neighbor', interface, 'interface', 'v6only', 'remote-as']) self.cli_commit() frrconfig = self.getFRRconfig(f'router bgp {ASN}') self.assertIn(f'router bgp {ASN}', frrconfig) self.assertIn(f' neighbor {neighbor} peer-group {peer_group}', frrconfig) self.assertIn(f' neighbor {peer_group} peer-group', frrconfig) self.assertIn(f' neighbor {peer_group} remote-as {remote_asn}', frrconfig) def test_bgp_15_local_as_ebgp(self): # https://vyos.dev/T4560 # local-as allowed only for ebgp peers neighbor = '192.0.2.99' remote_asn = '500' local_asn = '400' self.cli_set(base_path + ['neighbor', neighbor, 'remote-as', ASN]) self.cli_set(base_path + ['neighbor', neighbor, 'local-as', local_asn]) # check validate() - local-as allowed only for ebgp peers with self.assertRaises(ConfigSessionError): self.cli_commit() self.cli_set(base_path + ['neighbor', neighbor, 'remote-as', remote_asn]) self.cli_commit() frrconfig = self.getFRRconfig(f'router bgp {ASN}') self.assertIn(f'router bgp {ASN}', frrconfig) self.assertIn(f' neighbor {neighbor} remote-as {remote_asn}', frrconfig) self.assertIn(f' neighbor {neighbor} local-as {local_asn}', frrconfig) def test_bgp_16_import_rd_rt_compatibility(self): # Verify if import vrf and rd vpn export # exist in the same address family self.create_bgp_instances_for_import_test() self.cli_set( base_path + ['address-family', import_afi, 'import', 'vrf', import_vrf]) self.cli_set( base_path + ['address-family', import_afi, 'rd', 'vpn', 'export', import_rd]) with self.assertRaises(ConfigSessionError): self.cli_commit() def test_bgp_17_import_rd_rt_compatibility(self): # Verify if vrf that is in import vrf list contains rd vpn export self.create_bgp_instances_for_import_test() self.cli_set( base_path + ['address-family', import_afi, 'import', 'vrf', import_vrf]) self.cli_commit() frrconfig = self.getFRRconfig(f'router bgp {ASN}') frrconfig_vrf = self.getFRRconfig(f'router bgp {ASN} vrf {import_vrf}') self.assertIn(f'router bgp {ASN}', frrconfig) self.assertIn(f'address-family ipv4 unicast', frrconfig) self.assertIn(f' import vrf {import_vrf}', frrconfig) self.assertIn(f'router bgp {ASN} vrf {import_vrf}', frrconfig_vrf) self.cli_set( import_vrf_base + [import_vrf] + base_path + ['address-family', import_afi, 'rd', 'vpn', 'export', import_rd]) with self.assertRaises(ConfigSessionError): self.cli_commit() def test_bgp_18_deleting_import_vrf(self): # Verify deleting vrf that is in import vrf list self.create_bgp_instances_for_import_test() self.cli_set( base_path + ['address-family', import_afi, 'import', 'vrf', import_vrf]) self.cli_commit() frrconfig = self.getFRRconfig(f'router bgp {ASN}') frrconfig_vrf = self.getFRRconfig(f'router bgp {ASN} vrf {import_vrf}') self.assertIn(f'router bgp {ASN}', frrconfig) self.assertIn(f'address-family ipv4 unicast', frrconfig) self.assertIn(f' import vrf {import_vrf}', frrconfig) self.assertIn(f'router bgp {ASN} vrf {import_vrf}', frrconfig_vrf) self.cli_delete(import_vrf_base + [import_vrf]) with self.assertRaises(ConfigSessionError): self.cli_commit() def test_bgp_19_deleting_default_vrf(self): # Verify deleting existent vrf default if other vrfs were created self.create_bgp_instances_for_import_test() self.cli_commit() frrconfig = self.getFRRconfig(f'router bgp {ASN}') frrconfig_vrf = self.getFRRconfig(f'router bgp {ASN} vrf {import_vrf}') self.assertIn(f'router bgp {ASN}', frrconfig) self.assertIn(f'router bgp {ASN} vrf {import_vrf}', frrconfig_vrf) self.cli_delete(base_path) with self.assertRaises(ConfigSessionError): self.cli_commit() def test_bgp_20_import_rd_rt_compatibility(self): # Verify if vrf that has rd vpn export is in import vrf of other vrfs self.create_bgp_instances_for_import_test() self.cli_set( import_vrf_base + [import_vrf] + base_path + ['address-family', import_afi, 'rd', 'vpn', 'export', import_rd]) self.cli_commit() frrconfig = self.getFRRconfig(f'router bgp {ASN}') frrconfig_vrf = self.getFRRconfig(f'router bgp {ASN} vrf {import_vrf}') self.assertIn(f'router bgp {ASN}', frrconfig) self.assertIn(f'router bgp {ASN} vrf {import_vrf}', frrconfig_vrf) self.assertIn(f'address-family ipv4 unicast', frrconfig_vrf) self.assertIn(f' rd vpn export {import_rd}', frrconfig_vrf) self.cli_set( base_path + ['address-family', import_afi, 'import', 'vrf', import_vrf]) with self.assertRaises(ConfigSessionError): self.cli_commit() def test_bgp_21_import_unspecified_vrf(self): # Verify if vrf that is in import is unspecified self.create_bgp_instances_for_import_test() self.cli_set( base_path + ['address-family', import_afi, 'import', 'vrf', 'test']) with self.assertRaises(ConfigSessionError): self.cli_commit() def test_bgp_22_interface_mpls_forwarding(self): interfaces = Section.interfaces('ethernet', vlan=False) for interface in interfaces: self.cli_set(base_path + ['interface', interface, 'mpls', 'forwarding']) self.cli_commit() for interface in interfaces: frrconfig = self.getFRRconfig(f'interface {interface}') self.assertIn(f'interface {interface}', frrconfig) self.assertIn(f' mpls bgp forwarding', frrconfig) def test_bgp_23_vrf_interface_mpls_forwarding(self): self.create_bgp_instances_for_import_test() interfaces = Section.interfaces('ethernet', vlan=False) for interface in interfaces: self.cli_set(['interfaces', 'ethernet', interface, 'vrf', import_vrf]) self.cli_set(import_vrf_base + [import_vrf] + base_path + ['interface', interface, 'mpls', 'forwarding']) self.cli_commit() for interface in interfaces: frrconfig = self.getFRRconfig(f'interface {interface}') self.assertIn(f'interface {interface}', frrconfig) self.assertIn(f' mpls bgp forwarding', frrconfig) self.cli_delete(['interfaces', 'ethernet', interface, 'vrf']) def test_bgp_24_srv6_sid(self): locator_name = 'VyOS_foo' sid = 'auto' nexthop_ipv4 = '192.0.0.1' nexthop_ipv6 = '2001:db8:100:200::2' self.cli_set(base_path + ['srv6', 'locator', locator_name]) self.cli_set(base_path + ['sid', 'vpn', 'per-vrf', 'export', sid]) self.cli_set(base_path + ['address-family', 'ipv4-unicast', 'sid', 'vpn', 'export', sid]) # verify() - SID per VRF and SID per address-family are mutually exclusive! with self.assertRaises(ConfigSessionError): self.cli_commit() self.cli_delete(base_path + ['address-family', 'ipv4-unicast', 'sid']) self.cli_commit() frrconfig = self.getFRRconfig(f'router bgp {ASN}') self.assertIn(f'router bgp {ASN}', frrconfig) self.assertIn(f' segment-routing srv6', frrconfig) self.assertIn(f' locator {locator_name}', frrconfig) self.assertIn(f' sid vpn per-vrf export {sid}', frrconfig) # Now test AFI SID self.cli_delete(base_path + ['sid']) self.cli_set(base_path + ['address-family', 'ipv4-unicast', 'sid', 'vpn', 'export', sid]) self.cli_set(base_path + ['address-family', 'ipv4-unicast', 'nexthop', 'vpn', 'export', nexthop_ipv4]) self.cli_set(base_path + ['address-family', 'ipv6-unicast', 'sid', 'vpn', 'export', sid]) self.cli_set(base_path + ['address-family', 'ipv6-unicast', 'nexthop', 'vpn', 'export', nexthop_ipv6]) self.cli_commit() frrconfig = self.getFRRconfig(f'router bgp {ASN}') self.assertIn(f'router bgp {ASN}', frrconfig) self.assertIn(f' segment-routing srv6', frrconfig) self.assertIn(f' locator {locator_name}', frrconfig) afiv4_config = self.getFRRconfig(' address-family ipv4 unicast') self.assertIn(f' sid vpn export {sid}', afiv4_config) self.assertIn(f' nexthop vpn export {nexthop_ipv4}', afiv4_config) afiv6_config = self.getFRRconfig(' address-family ipv6 unicast') self.assertIn(f' sid vpn export {sid}', afiv6_config) self.assertIn(f' nexthop vpn export {nexthop_ipv6}', afiv4_config) def test_bgp_25_ipv4_labeled_unicast_peer_group(self): pg_ipv4 = 'foo4' ipv4_max_prefix = '20' ipv4_prefix = '192.0.2.0/24' self.cli_set(base_path + ['listen', 'range', ipv4_prefix, 'peer-group', pg_ipv4]) self.cli_set(base_path + ['parameters', 'labeled-unicast', 'ipv4-explicit-null']) self.cli_set(base_path + ['peer-group', pg_ipv4, 'address-family', 'ipv4-labeled-unicast', 'maximum-prefix', ipv4_max_prefix]) self.cli_set(base_path + ['peer-group', pg_ipv4, 'remote-as', 'external']) self.cli_commit() frrconfig = self.getFRRconfig(f'router bgp {ASN}') self.assertIn(f'router bgp {ASN}', frrconfig) self.assertIn(f' neighbor {pg_ipv4} peer-group', frrconfig) self.assertIn(f' neighbor {pg_ipv4} remote-as external', frrconfig) self.assertIn(f' bgp listen range {ipv4_prefix} peer-group {pg_ipv4}', frrconfig) self.assertIn(f' bgp labeled-unicast ipv4-explicit-null', frrconfig) afiv4_config = self.getFRRconfig(' address-family ipv4 labeled-unicast') self.assertIn(f' neighbor {pg_ipv4} activate', afiv4_config) self.assertIn(f' neighbor {pg_ipv4} maximum-prefix {ipv4_max_prefix}', afiv4_config) def test_bgp_26_ipv6_labeled_unicast_peer_group(self): pg_ipv6 = 'foo6' ipv6_max_prefix = '200' ipv6_prefix = '2001:db8:1000::/64' self.cli_set(base_path + ['listen', 'range', ipv6_prefix, 'peer-group', pg_ipv6]) self.cli_set(base_path + ['parameters', 'labeled-unicast', 'ipv6-explicit-null']) self.cli_set(base_path + ['peer-group', pg_ipv6, 'address-family', 'ipv6-labeled-unicast', 'maximum-prefix', ipv6_max_prefix]) self.cli_set(base_path + ['peer-group', pg_ipv6, 'remote-as', 'external']) self.cli_commit() frrconfig = self.getFRRconfig(f'router bgp {ASN}') self.assertIn(f'router bgp {ASN}', frrconfig) self.assertIn(f' neighbor {pg_ipv6} peer-group', frrconfig) self.assertIn(f' neighbor {pg_ipv6} remote-as external', frrconfig) self.assertIn(f' bgp listen range {ipv6_prefix} peer-group {pg_ipv6}', frrconfig) self.assertIn(f' bgp labeled-unicast ipv6-explicit-null', frrconfig) afiv6_config = self.getFRRconfig(' address-family ipv6 labeled-unicast') self.assertIn(f' neighbor {pg_ipv6} activate', afiv6_config) self.assertIn(f' neighbor {pg_ipv6} maximum-prefix {ipv6_max_prefix}', afiv6_config) def test_bgp_27_route_reflector_client(self): self.cli_set(base_path + ['peer-group', 'peer1', 'address-family', 'l2vpn-evpn', 'route-reflector-client']) with self.assertRaises(ConfigSessionError) as e: self.cli_commit() self.cli_set(base_path + ['peer-group', 'peer1', 'remote-as', 'internal']) self.cli_commit() conf = self.getFRRconfig(' address-family l2vpn evpn') self.assertIn('neighbor peer1 route-reflector-client', conf) def test_bgp_28_peer_group_member_all_internal_or_external(self): def _common_config_check(conf, include_ras=True): if include_ras: self.assertIn(f'neighbor {int_neighbors[0]} remote-as {ASN}', conf) self.assertIn(f'neighbor {int_neighbors[1]} remote-as {ASN}', conf) self.assertIn(f'neighbor {ext_neighbors[0]} remote-as {int(ASN) + 1}',conf) self.assertIn(f'neighbor {int_neighbors[0]} peer-group {int_pg_name}', conf) self.assertIn(f'neighbor {int_neighbors[1]} peer-group {int_pg_name}', conf) self.assertIn(f'neighbor {ext_neighbors[0]} peer-group {ext_pg_name}', conf) int_neighbors = ['192.0.2.2', '192.0.2.3'] ext_neighbors = ['192.122.2.2', '192.122.2.3'] int_pg_name, ext_pg_name = 'SMOKETESTINT', 'SMOKETESTEXT' self.cli_set(base_path + ['neighbor', int_neighbors[0], 'peer-group', int_pg_name]) self.cli_set(base_path + ['neighbor', int_neighbors[0], 'remote-as', ASN]) self.cli_set(base_path + ['peer-group', int_pg_name, 'address-family', 'ipv4-unicast']) self.cli_set(base_path + ['neighbor', ext_neighbors[0], 'peer-group', ext_pg_name]) self.cli_set(base_path + ['neighbor', ext_neighbors[0], 'remote-as', f'{int(ASN) + 1}']) self.cli_set(base_path + ['peer-group', ext_pg_name, 'address-family', 'ipv4-unicast']) self.cli_commit() # test add external remote-as to internal group self.cli_set(base_path + ['neighbor', int_neighbors[1], 'peer-group', int_pg_name]) self.cli_set(base_path + ['neighbor', int_neighbors[1], 'remote-as', f'{int(ASN) + 1}']) with self.assertRaises(ConfigSessionError) as e: self.cli_commit() # self.assertIn('\nPeer-group members must be all internal or all external\n', str(e.exception)) # test add internal remote-as to internal group self.cli_set(base_path + ['neighbor', int_neighbors[1], 'remote-as', ASN]) self.cli_commit() conf = self.getFRRconfig(f'router bgp {ASN}') _common_config_check(conf) # test add internal remote-as to external group self.cli_set(base_path + ['neighbor', ext_neighbors[1], 'peer-group', ext_pg_name]) self.cli_set(base_path + ['neighbor', ext_neighbors[1], 'remote-as', ASN]) with self.assertRaises(ConfigSessionError) as e: self.cli_commit() # self.assertIn('\nPeer-group members must be all internal or all external\n', str(e.exception)) # test add external remote-as to external group self.cli_set(base_path + ['neighbor', ext_neighbors[1], 'remote-as', f'{int(ASN) + 2}']) self.cli_commit() conf = self.getFRRconfig(f'router bgp {ASN}') _common_config_check(conf) self.assertIn(f'neighbor {ext_neighbors[1]} remote-as {int(ASN) + 2}', conf) self.assertIn(f'neighbor {ext_neighbors[1]} peer-group {ext_pg_name}', conf) # test named remote-as self.cli_set(base_path + ['neighbor', int_neighbors[0], 'remote-as', 'internal']) self.cli_set(base_path + ['neighbor', int_neighbors[1], 'remote-as', 'internal']) self.cli_set(base_path + ['neighbor', ext_neighbors[0], 'remote-as', 'external']) self.cli_set(base_path + ['neighbor', ext_neighbors[1], 'remote-as', 'external']) self.cli_commit() conf = self.getFRRconfig(f'router bgp {ASN}') _common_config_check(conf, include_ras=False) self.assertIn(f'neighbor {int_neighbors[0]} remote-as internal', conf) self.assertIn(f'neighbor {int_neighbors[1]} remote-as internal', conf) self.assertIn(f'neighbor {ext_neighbors[0]} remote-as external', conf) self.assertIn(f'neighbor {ext_neighbors[1]} remote-as external', conf) self.assertIn(f'neighbor {ext_neighbors[1]} peer-group {ext_pg_name}', conf) def test_bgp_29_peer_group_remote_as_equal_local_as(self): self.cli_set(base_path + ['system-as', ASN]) self.cli_set(base_path + ['peer-group', 'OVERLAY', 'local-as', f'{int(ASN) + 1}']) self.cli_set(base_path + ['peer-group', 'OVERLAY', 'remote-as', f'{int(ASN) + 1}']) self.cli_set(base_path + ['peer-group', 'OVERLAY', 'address-family', 'l2vpn-evpn']) self.cli_set(base_path + ['peer-group', 'UNDERLAY', 'address-family', 'ipv4-unicast']) self.cli_set(base_path + ['neighbor', '10.177.70.62', 'peer-group', 'UNDERLAY']) self.cli_set(base_path + ['neighbor', '10.177.70.62', 'remote-as', 'external']) self.cli_set(base_path + ['neighbor', '10.177.75.1', 'peer-group', 'OVERLAY']) self.cli_set(base_path + ['neighbor', '10.177.75.2', 'peer-group', 'OVERLAY']) self.cli_commit() conf = self.getFRRconfig(f'router bgp {ASN}') self.assertIn(f'neighbor OVERLAY remote-as {int(ASN) + 1}', conf) self.assertIn(f'neighbor OVERLAY local-as {int(ASN) + 1}', conf) def test_bgp_99_bmp(self): target_name = 'instance-bmp' target_address = '127.0.0.1' target_port = '5000' min_retry = '1024' max_retry = '2048' monitor_ipv4 = 'pre-policy' monitor_ipv6 = 'pre-policy' mirror_buffer = '32000000' bmp_path = base_path + ['bmp'] target_path = bmp_path + ['target', target_name] # by default the 'bmp' module not loaded for the bgpd expect Error self.cli_set(bmp_path) if not process_named_running('bgpd', 'bmp'): with self.assertRaises(ConfigSessionError): self.cli_commit() # add required 'bmp' module to bgpd and restart bgpd self.cli_delete(bmp_path) self.cli_set(['system', 'frr', 'bmp']) self.cli_commit() # restart bgpd to apply "-M bmp" and update PID cmd(f'sudo kill -9 {self.daemon_pid}') # let the bgpd process recover sleep(10) # update daemon PID - this was a planned daemon restart self.daemon_pid = process_named_running(PROCESS_NAME) # set bmp config but not set address self.cli_set(target_path + ['port', target_port]) # address is not set, expect Error with self.assertRaises(ConfigSessionError): self.cli_commit() # config other bmp options self.cli_set(target_path + ['address', target_address]) self.cli_set(bmp_path + ['mirror-buffer-limit', mirror_buffer]) self.cli_set(target_path + ['port', target_port]) self.cli_set(target_path + ['min-retry', min_retry]) self.cli_set(target_path + ['max-retry', max_retry]) self.cli_set(target_path + ['mirror']) self.cli_set(target_path + ['monitor', 'ipv4-unicast', monitor_ipv4]) self.cli_set(target_path + ['monitor', 'ipv6-unicast', monitor_ipv6]) self.cli_commit() # Verify bgpd bmp configuration frrconfig = self.getFRRconfig(f'router bgp {ASN}') self.assertIn(f'bmp mirror buffer-limit {mirror_buffer}', frrconfig) self.assertIn(f'bmp targets {target_name}', frrconfig) self.assertIn(f'bmp mirror', frrconfig) self.assertIn(f'bmp monitor ipv4 unicast {monitor_ipv4}', frrconfig) self.assertIn(f'bmp monitor ipv6 unicast {monitor_ipv6}', frrconfig) self.assertIn(f'bmp connect {target_address} port {target_port} min-retry {min_retry} max-retry {max_retry}', frrconfig) if __name__ == '__main__': unittest.main(verbosity=2) diff --git a/smoketest/scripts/cli/test_protocols_isis.py b/smoketest/scripts/cli/test_protocols_isis.py index 769f3dd33..5b86dd53a 100755 --- a/smoketest/scripts/cli/test_protocols_isis.py +++ b/smoketest/scripts/cli/test_protocols_isis.py @@ -1,416 +1,417 @@ #!/usr/bin/env python3 # # Copyright (C) 2021-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import unittest from base_vyostest_shim import VyOSUnitTestSHIM from vyos.configsession import ConfigSessionError from vyos.ifconfig import Section from vyos.utils.process import process_named_running +from vyos.frr import isis_daemon PROCESS_NAME = 'isisd' base_path = ['protocols', 'isis'] domain = 'VyOS' net = '49.0001.1921.6800.1002.00' class TestProtocolsISIS(VyOSUnitTestSHIM.TestCase): @classmethod def setUpClass(cls): cls._interfaces = Section.interfaces('ethernet') # call base-classes classmethod super(TestProtocolsISIS, cls).setUpClass() # Retrieve FRR daemon PID - it is not allowed to crash, thus PID must remain the same cls.daemon_pid = process_named_running(PROCESS_NAME) # ensure we can also run this test on a live system - so lets clean # out the current configuration :) cls.cli_delete(cls, base_path) cls.cli_delete(cls, ['vrf']) def tearDown(self): # cleanup any possible VRF mess self.cli_delete(['vrf']) # always destrox the entire isisd configuration to make the processes # life as hard as possible self.cli_delete(base_path) self.cli_commit() # check process health and continuity self.assertEqual(self.daemon_pid, process_named_running(PROCESS_NAME)) def isis_base_config(self): self.cli_set(base_path + ['net', net]) for interface in self._interfaces: self.cli_set(base_path + ['interface', interface]) def test_isis_01_redistribute(self): prefix_list = 'EXPORT-ISIS' route_map = 'EXPORT-ISIS' rule = '10' metric_style = 'transition' self.cli_set(['policy', 'prefix-list', prefix_list, 'rule', rule, 'action', 'permit']) self.cli_set(['policy', 'prefix-list', prefix_list, 'rule', rule, 'prefix', '203.0.113.0/24']) self.cli_set(['policy', 'route-map', route_map, 'rule', rule, 'action', 'permit']) self.cli_set(['policy', 'route-map', route_map, 'rule', rule, 'match', 'ip', 'address', 'prefix-list', prefix_list]) self.cli_set(base_path) # verify() - net id and interface are mandatory with self.assertRaises(ConfigSessionError): self.cli_commit() self.isis_base_config() self.cli_set(base_path + ['redistribute', 'ipv4', 'connected']) # verify() - Redistribute level-1 or level-2 should be specified with self.assertRaises(ConfigSessionError): self.cli_commit() self.cli_set(base_path + ['redistribute', 'ipv4', 'connected', 'level-2', 'route-map', route_map]) self.cli_set(base_path + ['metric-style', metric_style]) self.cli_set(base_path + ['log-adjacency-changes']) # Commit all changes self.cli_commit() # Verify all changes - tmp = self.getFRRconfig(f'router isis {domain}', daemon='isisd') + tmp = self.getFRRconfig(f'router isis {domain}', daemon=isis_daemon) self.assertIn(f' net {net}', tmp) self.assertIn(f' metric-style {metric_style}', tmp) self.assertIn(f' log-adjacency-changes', tmp) self.assertIn(f' redistribute ipv4 connected level-2 route-map {route_map}', tmp) for interface in self._interfaces: - tmp = self.getFRRconfig(f'interface {interface}', daemon='isisd') + tmp = self.getFRRconfig(f'interface {interface}', daemon=isis_daemon) self.assertIn(f' ip router isis {domain}', tmp) self.assertIn(f' ipv6 router isis {domain}', tmp) self.cli_delete(['policy', 'route-map', route_map]) self.cli_delete(['policy', 'prefix-list', prefix_list]) def test_isis_02_vrfs(self): vrfs = ['red', 'green', 'blue'] # It is safe to assume that when the basic VRF test works, all other # IS-IS related features work, as we entirely inherit the CLI templates # and Jinja2 FRR template. table = '1000' vrf = 'red' vrf_base = ['vrf', 'name', vrf] vrf_iface = 'eth1' self.cli_set(vrf_base + ['table', table]) self.cli_set(vrf_base + ['protocols', 'isis', 'net', net]) self.cli_set(vrf_base + ['protocols', 'isis', 'interface', vrf_iface]) self.cli_set(vrf_base + ['protocols', 'isis', 'advertise-high-metrics']) self.cli_set(vrf_base + ['protocols', 'isis', 'advertise-passive-only']) self.cli_set(['interfaces', 'ethernet', vrf_iface, 'vrf', vrf]) # Also set a default VRF IS-IS config self.cli_set(base_path + ['net', net]) self.cli_set(base_path + ['interface', 'eth0']) self.cli_commit() # Verify FRR isisd configuration - tmp = self.getFRRconfig(f'router isis {domain}', daemon='isisd') + tmp = self.getFRRconfig(f'router isis {domain}', daemon=isis_daemon) self.assertIn(f'router isis {domain}', tmp) self.assertIn(f' net {net}', tmp) - tmp = self.getFRRconfig(f'router isis {domain} vrf {vrf}', daemon='isisd') + tmp = self.getFRRconfig(f'router isis {domain} vrf {vrf}', daemon=isis_daemon) self.assertIn(f'router isis {domain} vrf {vrf}', tmp) self.assertIn(f' net {net}', tmp) self.assertIn(f' advertise-high-metrics', tmp) self.assertIn(f' advertise-passive-only', tmp) self.cli_delete(['vrf', 'name', vrf]) self.cli_delete(['interfaces', 'ethernet', vrf_iface, 'vrf']) def test_isis_04_default_information(self): metric = '50' route_map = 'default-foo-' self.isis_base_config() for afi in ['ipv4', 'ipv6']: for level in ['level-1', 'level-2']: self.cli_set(base_path + ['default-information', 'originate', afi, level, 'always']) self.cli_set(base_path + ['default-information', 'originate', afi, level, 'metric', metric]) self.cli_set(base_path + ['default-information', 'originate', afi, level, 'route-map', route_map + level + afi]) # Commit all changes self.cli_commit() # Verify all changes - tmp = self.getFRRconfig(f'router isis {domain}', daemon='isisd') + tmp = self.getFRRconfig(f'router isis {domain}', daemon=isis_daemon) self.assertIn(f' net {net}', tmp) for afi in ['ipv4', 'ipv6']: for level in ['level-1', 'level-2']: route_map_name = route_map + level + afi self.assertIn(f' default-information originate {afi} {level} always route-map {route_map_name} metric {metric}', tmp) def test_isis_05_password(self): password = 'foo' self.isis_base_config() for interface in self._interfaces: self.cli_set(base_path + ['interface', interface, 'password', 'plaintext-password', f'{password}-{interface}']) self.cli_set(base_path + ['area-password', 'plaintext-password', password]) self.cli_set(base_path + ['area-password', 'md5', password]) self.cli_set(base_path + ['domain-password', 'plaintext-password', password]) self.cli_set(base_path + ['domain-password', 'md5', password]) # verify() - can not use both md5 and plaintext-password for area-password with self.assertRaises(ConfigSessionError): self.cli_commit() self.cli_delete(base_path + ['area-password', 'md5', password]) # verify() - can not use both md5 and plaintext-password for domain-password with self.assertRaises(ConfigSessionError): self.cli_commit() self.cli_delete(base_path + ['domain-password', 'md5', password]) # Commit all changes self.cli_commit() # Verify all changes - tmp = self.getFRRconfig(f'router isis {domain}', daemon='isisd') + tmp = self.getFRRconfig(f'router isis {domain}', daemon=isis_daemon) self.assertIn(f' net {net}', tmp) self.assertIn(f' domain-password clear {password}', tmp) self.assertIn(f' area-password clear {password}', tmp) for interface in self._interfaces: - tmp = self.getFRRconfig(f'interface {interface}', daemon='isisd') + tmp = self.getFRRconfig(f'interface {interface}', daemon=isis_daemon) self.assertIn(f' isis password clear {password}-{interface}', tmp) def test_isis_06_spf_delay_bfd(self): network = 'point-to-point' holddown = '10' init_delay = '50' long_delay = '200' short_delay = '100' time_to_learn = '75' bfd_profile = 'isis-bfd' self.cli_set(base_path + ['net', net]) for interface in self._interfaces: self.cli_set(base_path + ['interface', interface, 'network', network]) self.cli_set(base_path + ['interface', interface, 'bfd', 'profile', bfd_profile]) self.cli_set(base_path + ['spf-delay-ietf', 'holddown', holddown]) # verify() - All types of spf-delay must be configured with self.assertRaises(ConfigSessionError): self.cli_commit() self.cli_set(base_path + ['spf-delay-ietf', 'init-delay', init_delay]) # verify() - All types of spf-delay must be configured with self.assertRaises(ConfigSessionError): self.cli_commit() self.cli_set(base_path + ['spf-delay-ietf', 'long-delay', long_delay]) # verify() - All types of spf-delay must be configured with self.assertRaises(ConfigSessionError): self.cli_commit() self.cli_set(base_path + ['spf-delay-ietf', 'short-delay', short_delay]) # verify() - All types of spf-delay must be configured with self.assertRaises(ConfigSessionError): self.cli_commit() self.cli_set(base_path + ['spf-delay-ietf', 'time-to-learn', time_to_learn]) # Commit all changes self.cli_commit() # Verify all changes - tmp = self.getFRRconfig(f'router isis {domain}', daemon='isisd') + tmp = self.getFRRconfig(f'router isis {domain}', daemon=isis_daemon) self.assertIn(f' net {net}', tmp) self.assertIn(f' spf-delay-ietf init-delay {init_delay} short-delay {short_delay} long-delay {long_delay} holddown {holddown} time-to-learn {time_to_learn}', tmp) for interface in self._interfaces: - tmp = self.getFRRconfig(f'interface {interface}', daemon='isisd') + tmp = self.getFRRconfig(f'interface {interface}', daemon=isis_daemon) self.assertIn(f' ip router isis {domain}', tmp) self.assertIn(f' ipv6 router isis {domain}', tmp) self.assertIn(f' isis network {network}', tmp) self.assertIn(f' isis bfd', tmp) self.assertIn(f' isis bfd profile {bfd_profile}', tmp) def test_isis_07_segment_routing_configuration(self): global_block_low = "300" global_block_high = "399" local_block_low = "400" local_block_high = "499" interface = 'lo' maximum_stack_size = '5' prefix_one = '192.168.0.1/32' prefix_two = '192.168.0.2/32' prefix_three = '192.168.0.3/32' prefix_four = '192.168.0.4/32' prefix_one_value = '1' prefix_two_value = '2' prefix_three_value = '60000' prefix_four_value = '65000' self.cli_set(base_path + ['net', net]) self.cli_set(base_path + ['interface', interface]) self.cli_set(base_path + ['segment-routing', 'maximum-label-depth', maximum_stack_size]) self.cli_set(base_path + ['segment-routing', 'global-block', 'low-label-value', global_block_low]) self.cli_set(base_path + ['segment-routing', 'global-block', 'high-label-value', global_block_high]) self.cli_set(base_path + ['segment-routing', 'local-block', 'low-label-value', local_block_low]) self.cli_set(base_path + ['segment-routing', 'local-block', 'high-label-value', local_block_high]) self.cli_set(base_path + ['segment-routing', 'prefix', prefix_one, 'index', 'value', prefix_one_value]) self.cli_set(base_path + ['segment-routing', 'prefix', prefix_one, 'index', 'explicit-null']) self.cli_set(base_path + ['segment-routing', 'prefix', prefix_two, 'index', 'value', prefix_two_value]) self.cli_set(base_path + ['segment-routing', 'prefix', prefix_two, 'index', 'no-php-flag']) self.cli_set(base_path + ['segment-routing', 'prefix', prefix_three, 'absolute', 'value', prefix_three_value]) self.cli_set(base_path + ['segment-routing', 'prefix', prefix_three, 'absolute', 'explicit-null']) self.cli_set(base_path + ['segment-routing', 'prefix', prefix_four, 'absolute', 'value', prefix_four_value]) self.cli_set(base_path + ['segment-routing', 'prefix', prefix_four, 'absolute', 'no-php-flag']) # Commit all changes self.cli_commit() # Verify all changes - tmp = self.getFRRconfig(f'router isis {domain}', daemon='isisd') + tmp = self.getFRRconfig(f'router isis {domain}', daemon=isis_daemon) self.assertIn(f' net {net}', tmp) self.assertIn(f' segment-routing on', tmp) self.assertIn(f' segment-routing global-block {global_block_low} {global_block_high} local-block {local_block_low} {local_block_high}', tmp) self.assertIn(f' segment-routing node-msd {maximum_stack_size}', tmp) self.assertIn(f' segment-routing prefix {prefix_one} index {prefix_one_value} explicit-null', tmp) self.assertIn(f' segment-routing prefix {prefix_two} index {prefix_two_value} no-php-flag', tmp) self.assertIn(f' segment-routing prefix {prefix_three} absolute {prefix_three_value} explicit-null', tmp) self.assertIn(f' segment-routing prefix {prefix_four} absolute {prefix_four_value} no-php-flag', tmp) def test_isis_08_ldp_sync(self): holddown = "500" interface = 'lo' self.cli_set(base_path + ['net', net]) self.cli_set(base_path + ['interface', interface]) self.cli_set(base_path + ['ldp-sync', 'holddown', holddown]) # Commit main ISIS changes self.cli_commit() # Verify main ISIS changes - tmp = self.getFRRconfig(f'router isis {domain}', daemon='isisd') + tmp = self.getFRRconfig(f'router isis {domain}', daemon=isis_daemon) self.assertIn(f' net {net}', tmp) self.assertIn(f' mpls ldp-sync', tmp) self.assertIn(f' mpls ldp-sync holddown {holddown}', tmp) for interface in self._interfaces: self.cli_set(base_path + ['interface', interface, 'ldp-sync', 'holddown', holddown]) # Commit interface changes for holddown self.cli_commit() for interface in self._interfaces: # Verify interface changes for holddown - tmp = self.getFRRconfig(f'interface {interface}', daemon='isisd') + tmp = self.getFRRconfig(f'interface {interface}', daemon=isis_daemon) self.assertIn(f'interface {interface}', tmp) self.assertIn(f' ip router isis {domain}', tmp) self.assertIn(f' ipv6 router isis {domain}', tmp) self.assertIn(f' isis mpls ldp-sync holddown {holddown}', tmp) for interface in self._interfaces: self.cli_set(base_path + ['interface', interface, 'ldp-sync', 'disable']) # Commit interface changes for disable self.cli_commit() for interface in self._interfaces: # Verify interface changes for disable - tmp = self.getFRRconfig(f'interface {interface}', daemon='isisd') + tmp = self.getFRRconfig(f'interface {interface}', daemon=isis_daemon) self.assertIn(f'interface {interface}', tmp) self.assertIn(f' ip router isis {domain}', tmp) self.assertIn(f' ipv6 router isis {domain}', tmp) self.assertIn(f' no isis mpls ldp-sync', tmp) def test_isis_09_lfa(self): prefix_list = 'lfa-prefix-list-test-1' prefix_list_address = '192.168.255.255/32' interface = 'lo' self.cli_set(base_path + ['net', net]) self.cli_set(base_path + ['interface', interface]) self.cli_set(['policy', 'prefix-list', prefix_list, 'rule', '1', 'action', 'permit']) self.cli_set(['policy', 'prefix-list', prefix_list, 'rule', '1', 'prefix', prefix_list_address]) # Commit main ISIS changes self.cli_commit() # Add remote portion of LFA with prefix list with validation for level in ['level-1', 'level-2']: self.cli_set(base_path + ['fast-reroute', 'lfa', 'remote', 'prefix-list', prefix_list, level]) self.cli_commit() - tmp = self.getFRRconfig(f'router isis {domain}', daemon='isisd') + tmp = self.getFRRconfig(f'router isis {domain}', daemon=isis_daemon) self.assertIn(f' net {net}', tmp) self.assertIn(f' fast-reroute remote-lfa prefix-list {prefix_list} {level}', tmp) self.cli_delete(base_path + ['fast-reroute']) self.cli_commit() # Add local portion of LFA load-sharing portion with validation for level in ['level-1', 'level-2']: self.cli_set(base_path + ['fast-reroute', 'lfa', 'local', 'load-sharing', 'disable', level]) self.cli_commit() - tmp = self.getFRRconfig(f'router isis {domain}', daemon='isisd') + tmp = self.getFRRconfig(f'router isis {domain}', daemon=isis_daemon) self.assertIn(f' net {net}', tmp) self.assertIn(f' fast-reroute load-sharing disable {level}', tmp) self.cli_delete(base_path + ['fast-reroute']) self.cli_commit() # Add local portion of LFA priority-limit portion with validation for priority in ['critical', 'high', 'medium']: for level in ['level-1', 'level-2']: self.cli_set(base_path + ['fast-reroute', 'lfa', 'local', 'priority-limit', priority, level]) self.cli_commit() - tmp = self.getFRRconfig(f'router isis {domain}', daemon='isisd') + tmp = self.getFRRconfig(f'router isis {domain}', daemon=isis_daemon) self.assertIn(f' net {net}', tmp) self.assertIn(f' fast-reroute priority-limit {priority} {level}', tmp) self.cli_delete(base_path + ['fast-reroute']) self.cli_commit() # Add local portion of LFA tiebreaker portion with validation index = '100' for tiebreaker in ['downstream','lowest-backup-metric','node-protecting']: for level in ['level-1', 'level-2']: self.cli_set(base_path + ['fast-reroute', 'lfa', 'local', 'tiebreaker', tiebreaker, 'index', index, level]) self.cli_commit() - tmp = self.getFRRconfig(f'router isis {domain}', daemon='isisd') + tmp = self.getFRRconfig(f'router isis {domain}', daemon=isis_daemon) self.assertIn(f' net {net}', tmp) self.assertIn(f' fast-reroute lfa tiebreaker {tiebreaker} index {index} {level}', tmp) self.cli_delete(base_path + ['fast-reroute']) self.cli_commit() # Clean up and remove prefix list self.cli_delete(['policy', 'prefix-list', prefix_list]) self.cli_commit() def test_isis_10_topology(self): topologies = ['ipv4-multicast', 'ipv4-mgmt', 'ipv6-unicast', 'ipv6-multicast', 'ipv6-mgmt'] interface = 'lo' # Set a basic IS-IS config self.cli_set(base_path + ['net', net]) self.cli_set(base_path + ['interface', interface]) for topology in topologies: self.cli_set(base_path + ['topology', topology]) self.cli_commit() - tmp = self.getFRRconfig(f'router isis {domain}', daemon='isisd') + tmp = self.getFRRconfig(f'router isis {domain}', daemon=isis_daemon) self.assertIn(f' net {net}', tmp) self.assertIn(f' topology {topology}', tmp) if __name__ == '__main__': unittest.main(verbosity=2) diff --git a/smoketest/scripts/cli/test_protocols_openfabric.py b/smoketest/scripts/cli/test_protocols_openfabric.py index e37aed456..889cba135 100644 --- a/smoketest/scripts/cli/test_protocols_openfabric.py +++ b/smoketest/scripts/cli/test_protocols_openfabric.py @@ -1,186 +1,187 @@ #!/usr/bin/env python3 # # Copyright (C) 2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import unittest from base_vyostest_shim import VyOSUnitTestSHIM from vyos.configsession import ConfigSessionError from vyos.utils.process import process_named_running +from vyos.frr import openfabric_daemon PROCESS_NAME = 'fabricd' base_path = ['protocols', 'openfabric'] domain = 'VyOS' net = '49.0001.1111.1111.1111.00' dummy_if = 'dum1234' address_families = ['ipv4', 'ipv6'] path = base_path + ['domain', domain] class TestProtocolsOpenFabric(VyOSUnitTestSHIM.TestCase): @classmethod def setUpClass(cls): # call base-classes classmethod super(TestProtocolsOpenFabric, cls).setUpClass() # Retrieve FRR daemon PID - it is not allowed to crash, thus PID must remain the same cls.daemon_pid = process_named_running(PROCESS_NAME) # ensure we can also run this test on a live system - so lets clean # out the current configuration :) cls.cli_delete(cls, base_path) def tearDown(self): self.cli_delete(base_path) self.cli_commit() # check process health and continuity self.assertEqual(self.daemon_pid, process_named_running(PROCESS_NAME)) def openfabric_base_config(self): self.cli_set(['interfaces', 'dummy', dummy_if]) self.cli_set(base_path + ['net', net]) for family in address_families: self.cli_set(path + ['interface', dummy_if, 'address-family', family]) def test_openfabric_01_router_params(self): fabric_tier = '5' lsp_gen_interval = '20' self.cli_set(base_path) # verify() - net id and domain name are mandatory with self.assertRaises(ConfigSessionError): self.cli_commit() self.openfabric_base_config() self.cli_set(path + ['log-adjacency-changes']) self.cli_set(path + ['set-overload-bit']) self.cli_set(path + ['fabric-tier', fabric_tier]) self.cli_set(path + ['lsp-gen-interval', lsp_gen_interval]) # Commit all changes self.cli_commit() # Verify all changes - tmp = self.getFRRconfig(f'router openfabric {domain}', daemon='fabricd') + tmp = self.getFRRconfig(f'router openfabric {domain}', daemon=openfabric_daemon) self.assertIn(f' net {net}', tmp) self.assertIn(f' log-adjacency-changes', tmp) self.assertIn(f' set-overload-bit', tmp) self.assertIn(f' fabric-tier {fabric_tier}', tmp) self.assertIn(f' lsp-gen-interval {lsp_gen_interval}', tmp) - tmp = self.getFRRconfig(f'interface {dummy_if}', daemon='fabricd') + tmp = self.getFRRconfig(f'interface {dummy_if}', daemon=openfabric_daemon) self.assertIn(f' ip router openfabric {domain}', tmp) self.assertIn(f' ipv6 router openfabric {domain}', tmp) def test_openfabric_02_loopback_interface(self): interface = 'lo' hello_interval = '100' metric = '24478' self.openfabric_base_config() self.cli_set(path + ['interface', interface, 'address-family', 'ipv4']) self.cli_set(path + ['interface', interface, 'hello-interval', hello_interval]) self.cli_set(path + ['interface', interface, 'metric', metric]) # Commit all changes self.cli_commit() # Verify FRR openfabric configuration - tmp = self.getFRRconfig(f'router openfabric {domain}', daemon='fabricd') + tmp = self.getFRRconfig(f'router openfabric {domain}', daemon=openfabric_daemon) self.assertIn(f'router openfabric {domain}', tmp) self.assertIn(f' net {net}', tmp) # Verify interface configuration - tmp = self.getFRRconfig(f'interface {interface}', daemon='fabricd') + tmp = self.getFRRconfig(f'interface {interface}', daemon=openfabric_daemon) self.assertIn(f' ip router openfabric {domain}', tmp) # for lo interface 'openfabric passive' is implied self.assertIn(f' openfabric passive', tmp) self.assertIn(f' openfabric metric {metric}', tmp) def test_openfabric_03_password(self): password = 'foo' self.openfabric_base_config() self.cli_set(path + ['interface', dummy_if, 'password', 'plaintext-password', f'{password}-{dummy_if}']) self.cli_set(path + ['interface', dummy_if, 'password', 'md5', f'{password}-{dummy_if}']) # verify() - can not use both md5 and plaintext-password for password for the interface with self.assertRaises(ConfigSessionError): self.cli_commit() self.cli_delete(path + ['interface', dummy_if, 'password', 'md5']) self.cli_set(path + ['domain-password', 'plaintext-password', password]) self.cli_set(path + ['domain-password', 'md5', password]) # verify() - can not use both md5 and plaintext-password for domain-password with self.assertRaises(ConfigSessionError): self.cli_commit() self.cli_delete(path + ['domain-password', 'md5']) # Commit all changes self.cli_commit() # Verify all changes - tmp = self.getFRRconfig(f'router openfabric {domain}', daemon='fabricd') + tmp = self.getFRRconfig(f'router openfabric {domain}', daemon=openfabric_daemon) self.assertIn(f' net {net}', tmp) self.assertIn(f' domain-password clear {password}', tmp) - tmp = self.getFRRconfig(f'interface {dummy_if}', daemon='fabricd') + tmp = self.getFRRconfig(f'interface {dummy_if}', daemon=openfabric_daemon) self.assertIn(f' openfabric password clear {password}-{dummy_if}', tmp) def test_openfabric_multiple_domains(self): domain_2 = 'VyOS_2' interface = 'dum5678' new_path = base_path + ['domain', domain_2] self.openfabric_base_config() # set same interface for 2 OpenFabric domains self.cli_set(['interfaces', 'dummy', interface]) self.cli_set(new_path + ['interface', interface, 'address-family', 'ipv4']) self.cli_set(path + ['interface', interface, 'address-family', 'ipv4']) # verify() - same interface can be used only for one OpenFabric instance with self.assertRaises(ConfigSessionError): self.cli_commit() self.cli_delete(path + ['interface', interface]) # Commit all changes self.cli_commit() # Verify FRR openfabric configuration - tmp = self.getFRRconfig(f'router openfabric {domain}', daemon='fabricd') + tmp = self.getFRRconfig(f'router openfabric {domain}', daemon=openfabric_daemon) self.assertIn(f'router openfabric {domain}', tmp) self.assertIn(f' net {net}', tmp) - tmp = self.getFRRconfig(f'router openfabric {domain_2}', daemon='fabricd') + tmp = self.getFRRconfig(f'router openfabric {domain_2}', daemon=openfabric_daemon) self.assertIn(f'router openfabric {domain_2}', tmp) self.assertIn(f' net {net}', tmp) # Verify interface configuration - tmp = self.getFRRconfig(f'interface {dummy_if}', daemon='fabricd') + tmp = self.getFRRconfig(f'interface {dummy_if}', daemon=openfabric_daemon) self.assertIn(f' ip router openfabric {domain}', tmp) self.assertIn(f' ipv6 router openfabric {domain}', tmp) - tmp = self.getFRRconfig(f'interface {interface}', daemon='fabricd') + tmp = self.getFRRconfig(f'interface {interface}', daemon=openfabric_daemon) self.assertIn(f' ip router openfabric {domain_2}', tmp) if __name__ == '__main__': unittest.main(verbosity=2) diff --git a/smoketest/scripts/cli/test_protocols_segment-routing.py b/smoketest/scripts/cli/test_protocols_segment-routing.py index daa7f088f..eb563db93 100755 --- a/smoketest/scripts/cli/test_protocols_segment-routing.py +++ b/smoketest/scripts/cli/test_protocols_segment-routing.py @@ -1,110 +1,111 @@ #!/usr/bin/env python3 # # Copyright (C) 2023-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import unittest from base_vyostest_shim import VyOSUnitTestSHIM from vyos.configsession import ConfigSessionError from vyos.ifconfig import Section +from vyos.frr import zebra_daemon from vyos.utils.process import process_named_running from vyos.utils.system import sysctl_read base_path = ['protocols', 'segment-routing'] PROCESS_NAME = 'zebra' class TestProtocolsSegmentRouting(VyOSUnitTestSHIM.TestCase): @classmethod def setUpClass(cls): # call base-classes classmethod super(TestProtocolsSegmentRouting, cls).setUpClass() # Retrieve FRR daemon PID - it is not allowed to crash, thus PID must remain the same cls.daemon_pid = process_named_running(PROCESS_NAME) # ensure we can also run this test on a live system - so lets clean # out the current configuration :) cls.cli_delete(cls, base_path) def tearDown(self): self.cli_delete(base_path) self.cli_commit() # check process health and continuity self.assertEqual(self.daemon_pid, process_named_running(PROCESS_NAME)) def test_srv6(self): interfaces = Section.interfaces('ethernet', vlan=False) locators = { 'foo' : { 'prefix' : '2001:a::/64' }, 'foo' : { 'prefix' : '2001:b::/64', 'usid' : {} }, } for locator, locator_config in locators.items(): self.cli_set(base_path + ['srv6', 'locator', locator, 'prefix', locator_config['prefix']]) if 'usid' in locator_config: self.cli_set(base_path + ['srv6', 'locator', locator, 'behavior-usid']) # verify() - SRv6 should be enabled on at least one interface! with self.assertRaises(ConfigSessionError): self.cli_commit() for interface in interfaces: self.cli_set(base_path + ['interface', interface, 'srv6']) self.cli_commit() for interface in interfaces: self.assertEqual(sysctl_read(f'net.ipv6.conf.{interface}.seg6_enabled'), '1') self.assertEqual(sysctl_read(f'net.ipv6.conf.{interface}.seg6_require_hmac'), '0') # default - frrconfig = self.getFRRconfig(f'segment-routing', daemon='zebra') + frrconfig = self.getFRRconfig(f'segment-routing', daemon=zebra_daemon) self.assertIn(f'segment-routing', frrconfig) self.assertIn(f' srv6', frrconfig) self.assertIn(f' locators', frrconfig) for locator, locator_config in locators.items(): self.assertIn(f' locator {locator}', frrconfig) self.assertIn(f' prefix {locator_config["prefix"]} block-len 40 node-len 24 func-bits 16', frrconfig) def test_srv6_sysctl(self): interfaces = Section.interfaces('ethernet', vlan=False) # HMAC accept for interface in interfaces: self.cli_set(base_path + ['interface', interface, 'srv6']) self.cli_set(base_path + ['interface', interface, 'srv6', 'hmac', 'ignore']) self.cli_commit() for interface in interfaces: self.assertEqual(sysctl_read(f'net.ipv6.conf.{interface}.seg6_enabled'), '1') self.assertEqual(sysctl_read(f'net.ipv6.conf.{interface}.seg6_require_hmac'), '-1') # ignore # HMAC drop for interface in interfaces: self.cli_set(base_path + ['interface', interface, 'srv6']) self.cli_set(base_path + ['interface', interface, 'srv6', 'hmac', 'drop']) self.cli_commit() for interface in interfaces: self.assertEqual(sysctl_read(f'net.ipv6.conf.{interface}.seg6_enabled'), '1') self.assertEqual(sysctl_read(f'net.ipv6.conf.{interface}.seg6_require_hmac'), '1') # drop # Disable SRv6 on first interface first_if = interfaces[-1] self.cli_delete(base_path + ['interface', first_if]) self.cli_commit() self.assertEqual(sysctl_read(f'net.ipv6.conf.{first_if}.seg6_enabled'), '0') if __name__ == '__main__': unittest.main(verbosity=2) diff --git a/smoketest/scripts/cli/test_system_ip.py b/smoketest/scripts/cli/test_system_ip.py index 5b0090237..4ab5e8181 100755 --- a/smoketest/scripts/cli/test_system_ip.py +++ b/smoketest/scripts/cli/test_system_ip.py @@ -1,126 +1,127 @@ #!/usr/bin/env python3 # # Copyright (C) 2020-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import unittest from base_vyostest_shim import VyOSUnitTestSHIM from vyos.configsession import ConfigSessionError from vyos.utils.file import read_file +from vyos.frr import mgmt_daemon base_path = ['system', 'ip'] class TestSystemIP(VyOSUnitTestSHIM.TestCase): def tearDown(self): self.cli_delete(base_path) self.cli_commit() def test_system_ip_forwarding(self): # Test if IPv4 forwarding can be disabled globally, default is '1' # which means forwarding enabled all_forwarding = '/proc/sys/net/ipv4/conf/all/forwarding' self.assertEqual(read_file(all_forwarding), '1') self.cli_set(base_path + ['disable-forwarding']) self.cli_commit() self.assertEqual(read_file(all_forwarding), '0') def test_system_ip_multipath(self): # Test IPv4 multipathing options, options default to off -> '0' use_neigh = '/proc/sys/net/ipv4/fib_multipath_use_neigh' hash_policy = '/proc/sys/net/ipv4/fib_multipath_hash_policy' self.assertEqual(read_file(use_neigh), '0') self.assertEqual(read_file(hash_policy), '0') self.cli_set(base_path + ['multipath', 'ignore-unreachable-nexthops']) self.cli_set(base_path + ['multipath', 'layer4-hashing']) self.cli_commit() self.assertEqual(read_file(use_neigh), '1') self.assertEqual(read_file(hash_policy), '1') def test_system_ip_arp_table_size(self): # Maximum number of entries to keep in the ARP cache, the # default is 8k gc_thresh3 = '/proc/sys/net/ipv4/neigh/default/gc_thresh3' gc_thresh2 = '/proc/sys/net/ipv4/neigh/default/gc_thresh2' gc_thresh1 = '/proc/sys/net/ipv4/neigh/default/gc_thresh1' self.assertEqual(read_file(gc_thresh3), '8192') self.assertEqual(read_file(gc_thresh2), '4096') self.assertEqual(read_file(gc_thresh1), '1024') for size in [1024, 2048, 4096, 8192, 16384, 32768]: self.cli_set(base_path + ['arp', 'table-size', str(size)]) self.cli_commit() self.assertEqual(read_file(gc_thresh3), str(size)) self.assertEqual(read_file(gc_thresh2), str(size // 2)) self.assertEqual(read_file(gc_thresh1), str(size // 8)) def test_system_ip_protocol_route_map(self): protocols = ['any', 'babel', 'bgp', 'connected', 'eigrp', 'isis', 'kernel', 'ospf', 'rip', 'static', 'table'] for protocol in protocols: self.cli_set(['policy', 'route-map', f'route-map-{protocol}', 'rule', '10', 'action', 'permit']) self.cli_set(base_path + ['protocol', protocol, 'route-map', f'route-map-{protocol}']) self.cli_commit() # Verify route-map properly applied to FRR - frrconfig = self.getFRRconfig('ip protocol', end='', daemon='zebra') + frrconfig = self.getFRRconfig('ip protocol', end='', daemon=mgmt_daemon) for protocol in protocols: self.assertIn(f'ip protocol {protocol} route-map route-map-{protocol}', frrconfig) # Delete route-maps self.cli_delete(['policy', 'route-map']) self.cli_delete(base_path + ['protocol']) self.cli_commit() # Verify route-map properly applied to FRR - frrconfig = self.getFRRconfig('ip protocol', end='', daemon='zebra') + frrconfig = self.getFRRconfig('ip protocol', end='', daemon=mgmt_daemon) self.assertNotIn(f'ip protocol', frrconfig) def test_system_ip_protocol_non_existing_route_map(self): non_existing = 'non-existing' self.cli_set(base_path + ['protocol', 'static', 'route-map', non_existing]) # VRF does yet not exist - an error must be thrown with self.assertRaises(ConfigSessionError): self.cli_commit() self.cli_set(['policy', 'route-map', non_existing, 'rule', '10', 'action', 'deny']) # Commit again self.cli_commit() def test_system_ip_nht(self): self.cli_set(base_path + ['nht', 'no-resolve-via-default']) self.cli_commit() # Verify CLI config applied to FRR - frrconfig = self.getFRRconfig('', end='', daemon='zebra') + frrconfig = self.getFRRconfig('', end='', daemon=mgmt_daemon) self.assertIn(f'no ip nht resolve-via-default', frrconfig) self.cli_delete(base_path + ['nht', 'no-resolve-via-default']) self.cli_commit() # Verify CLI config removed to FRR - frrconfig = self.getFRRconfig('', end='', daemon='zebra') + frrconfig = self.getFRRconfig('', end='', daemon=mgmt_daemon) self.assertNotIn(f'no ip nht resolve-via-default', frrconfig) if __name__ == '__main__': unittest.main(verbosity=2) diff --git a/smoketest/scripts/cli/test_system_ipv6.py b/smoketest/scripts/cli/test_system_ipv6.py index 0c77c1dd4..1c778a11d 100755 --- a/smoketest/scripts/cli/test_system_ipv6.py +++ b/smoketest/scripts/cli/test_system_ipv6.py @@ -1,145 +1,146 @@ #!/usr/bin/env python3 # # Copyright (C) 2021-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import unittest from base_vyostest_shim import VyOSUnitTestSHIM from vyos.configsession import ConfigSessionError from vyos.utils.file import read_file +from vyos.frr import mgmt_daemon base_path = ['system', 'ipv6'] file_forwarding = '/proc/sys/net/ipv6/conf/all/forwarding' file_disable = '/proc/sys/net/ipv6/conf/all/disable_ipv6' file_dad = '/proc/sys/net/ipv6/conf/all/accept_dad' file_multipath = '/proc/sys/net/ipv6/fib_multipath_hash_policy' class TestSystemIPv6(VyOSUnitTestSHIM.TestCase): def tearDown(self): self.cli_delete(base_path) self.cli_commit() def test_system_ipv6_forwarding(self): # Test if IPv6 forwarding can be disabled globally, default is '1' # which means forwearding enabled self.assertEqual(read_file(file_forwarding), '1') self.cli_set(base_path + ['disable-forwarding']) self.cli_commit() self.assertEqual(read_file(file_forwarding), '0') def test_system_ipv6_strict_dad(self): # This defaults to 1 self.assertEqual(read_file(file_dad), '1') # Do not assign any IPv6 address on interfaces, this requires a reboot # which can not be tested, but we can read the config file :) self.cli_set(base_path + ['strict-dad']) self.cli_commit() # Verify configuration file self.assertEqual(read_file(file_dad), '2') def test_system_ipv6_multipath(self): # This defaults to 0 self.assertEqual(read_file(file_multipath), '0') # Do not assign any IPv6 address on interfaces, this requires a reboot # which can not be tested, but we can read the config file :) self.cli_set(base_path + ['multipath', 'layer4-hashing']) self.cli_commit() # Verify configuration file self.assertEqual(read_file(file_multipath), '1') def test_system_ipv6_neighbor_table_size(self): # Maximum number of entries to keep in the ARP cache, the # default is 8192 gc_thresh3 = '/proc/sys/net/ipv6/neigh/default/gc_thresh3' gc_thresh2 = '/proc/sys/net/ipv6/neigh/default/gc_thresh2' gc_thresh1 = '/proc/sys/net/ipv6/neigh/default/gc_thresh1' self.assertEqual(read_file(gc_thresh3), '8192') self.assertEqual(read_file(gc_thresh2), '4096') self.assertEqual(read_file(gc_thresh1), '1024') for size in [1024, 2048, 4096, 8192, 16384, 32768]: self.cli_set(base_path + ['neighbor', 'table-size', str(size)]) self.cli_commit() self.assertEqual(read_file(gc_thresh3), str(size)) self.assertEqual(read_file(gc_thresh2), str(size // 2)) self.assertEqual(read_file(gc_thresh1), str(size // 8)) def test_system_ipv6_protocol_route_map(self): protocols = ['any', 'babel', 'bgp', 'connected', 'isis', 'kernel', 'ospfv3', 'ripng', 'static', 'table'] for protocol in protocols: route_map = 'route-map-' + protocol.replace('ospfv3', 'ospf6') self.cli_set(['policy', 'route-map', route_map, 'rule', '10', 'action', 'permit']) self.cli_set(base_path + ['protocol', protocol, 'route-map', route_map]) self.cli_commit() # Verify route-map properly applied to FRR - frrconfig = self.getFRRconfig('ipv6 protocol', end='', daemon='zebra') + frrconfig = self.getFRRconfig('ipv6 protocol', end='', daemon=mgmt_daemon) for protocol in protocols: # VyOS and FRR use a different name for OSPFv3 (IPv6) if protocol == 'ospfv3': protocol = 'ospf6' self.assertIn(f'ipv6 protocol {protocol} route-map route-map-{protocol}', frrconfig) # Delete route-maps self.cli_delete(['policy', 'route-map']) self.cli_delete(base_path + ['protocol']) self.cli_commit() # Verify route-map properly applied to FRR - frrconfig = self.getFRRconfig('ipv6 protocol', end='', daemon='zebra') + frrconfig = self.getFRRconfig('ipv6 protocol', end='', daemon=mgmt_daemon) self.assertNotIn(f'ipv6 protocol', frrconfig) def test_system_ipv6_protocol_non_existing_route_map(self): non_existing = 'non-existing6' self.cli_set(base_path + ['protocol', 'static', 'route-map', non_existing]) # VRF does yet not exist - an error must be thrown with self.assertRaises(ConfigSessionError): self.cli_commit() self.cli_set(['policy', 'route-map', non_existing, 'rule', '10', 'action', 'deny']) # Commit again self.cli_commit() def test_system_ipv6_nht(self): self.cli_set(base_path + ['nht', 'no-resolve-via-default']) self.cli_commit() # Verify CLI config applied to FRR - frrconfig = self.getFRRconfig('', end='', daemon='zebra') + frrconfig = self.getFRRconfig('', end='', daemon=mgmt_daemon) self.assertIn(f'no ipv6 nht resolve-via-default', frrconfig) self.cli_delete(base_path + ['nht', 'no-resolve-via-default']) self.cli_commit() # Verify CLI config removed to FRR - frrconfig = self.getFRRconfig('', end='', daemon='zebra') + frrconfig = self.getFRRconfig('', end='', daemon=mgmt_daemon) self.assertNotIn(f'no ipv6 nht resolve-via-default', frrconfig) if __name__ == '__main__': unittest.main(verbosity=2) diff --git a/smoketest/scripts/cli/test_vrf.py b/smoketest/scripts/cli/test_vrf.py index 2bb6c91c1..3cab5248e 100755 --- a/smoketest/scripts/cli/test_vrf.py +++ b/smoketest/scripts/cli/test_vrf.py @@ -1,599 +1,600 @@ #!/usr/bin/env python3 # # Copyright (C) 2020-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import re import os import unittest from base_vyostest_shim import VyOSUnitTestSHIM from json import loads from jmespath import search from vyos.configsession import ConfigSessionError +from vyos.frr import mgmt_daemon from vyos.ifconfig import Interface from vyos.ifconfig import Section from vyos.utils.file import read_file from vyos.utils.network import get_interface_config from vyos.utils.network import get_vrf_tableid from vyos.utils.network import is_intf_addr_assigned from vyos.utils.network import interface_exists from vyos.utils.process import cmd from vyos.utils.system import sysctl_read base_path = ['vrf'] vrfs = ['red', 'green', 'blue', 'foo-bar', 'baz_foo'] v4_protocols = ['any', 'babel', 'bgp', 'connected', 'eigrp', 'isis', 'kernel', 'ospf', 'rip', 'static', 'table'] v6_protocols = ['any', 'babel', 'bgp', 'connected', 'isis', 'kernel', 'ospfv3', 'ripng', 'static', 'table'] class VRFTest(VyOSUnitTestSHIM.TestCase): _interfaces = [] @classmethod def setUpClass(cls): # we need to filter out VLAN interfaces identified by a dot (.) # in their name - just in case! if 'TEST_ETH' in os.environ: tmp = os.environ['TEST_ETH'].split() cls._interfaces = tmp else: for tmp in Section.interfaces('ethernet', vlan=False): cls._interfaces.append(tmp) # call base-classes classmethod super(VRFTest, cls).setUpClass() def setUp(self): # VRF strict_most ist always enabled tmp = read_file('/proc/sys/net/vrf/strict_mode') self.assertEqual(tmp, '1') def tearDown(self): # delete all VRFs self.cli_delete(base_path) self.cli_commit() for vrf in vrfs: self.assertFalse(interface_exists(vrf)) def test_vrf_vni_and_table_id(self): base_table = '1000' table = base_table for vrf in vrfs: base = base_path + ['name', vrf] description = f'VyOS-VRF-{vrf}' self.cli_set(base + ['description', description]) # check validate() - a table ID is mandatory with self.assertRaises(ConfigSessionError): self.cli_commit() self.cli_set(base + ['table', table]) self.cli_set(base + ['vni', table]) if vrf == 'green': self.cli_set(base + ['disable']) table = str(int(table) + 1) # commit changes self.cli_commit() # Verify VRF configuration table = base_table iproute2_config = read_file('/etc/iproute2/rt_tables.d/vyos-vrf.conf') for vrf in vrfs: description = f'VyOS-VRF-{vrf}' self.assertTrue(interface_exists(vrf)) vrf_if = Interface(vrf) # validate proper interface description self.assertEqual(vrf_if.get_alias(), description) # validate admin up/down state of VRF state = 'up' if vrf == 'green': state = 'down' self.assertEqual(vrf_if.get_admin_state(), state) # Test the iproute2 lookup file, syntax is as follows: # # # id vrf name comment # 1000 red # VyOS-VRF-red # 1001 green # VyOS-VRF-green # ... regex = f'{table}\s+{vrf}\s+#\s+{description}' self.assertTrue(re.findall(regex, iproute2_config)) - frrconfig = self.getFRRconfig(f'vrf {vrf}') + frrconfig = self.getFRRconfig(f'vrf {vrf}', daemon=mgmt_daemon) self.assertIn(f' vni {table}', frrconfig) self.assertEqual(int(table), get_vrf_tableid(vrf)) # Increment table ID for the next run table = str(int(table) + 1) def test_vrf_loopbacks_ips(self): table = '2000' for vrf in vrfs: base = base_path + ['name', vrf] self.cli_set(base + ['table', str(table)]) table = str(int(table) + 1) # commit changes self.cli_commit() # Verify VRF configuration loopbacks = ['127.0.0.1', '::1'] for vrf in vrfs: # Ensure VRF was created self.assertTrue(interface_exists(vrf)) # Verify IP forwarding is 1 (enabled) self.assertEqual(sysctl_read(f'net.ipv4.conf.{vrf}.forwarding'), '1') self.assertEqual(sysctl_read(f'net.ipv6.conf.{vrf}.forwarding'), '1') # Test for proper loopback IP assignment for addr in loopbacks: self.assertTrue(is_intf_addr_assigned(vrf, addr)) def test_vrf_bind_all(self): table = '2000' for vrf in vrfs: base = base_path + ['name', vrf] self.cli_set(base + ['table', str(table)]) table = str(int(table) + 1) self.cli_set(base_path + ['bind-to-all']) # commit changes self.cli_commit() # Verify VRF configuration self.assertEqual(sysctl_read('net.ipv4.tcp_l3mdev_accept'), '1') self.assertEqual(sysctl_read('net.ipv4.udp_l3mdev_accept'), '1') # If there is any VRF defined, strict_mode should be on self.assertEqual(sysctl_read('net.vrf.strict_mode'), '1') def test_vrf_table_id_is_unalterable(self): # Linux Kernel prohibits the change of a VRF table on the fly. # VRF must be deleted and recreated! table = '1000' vrf = vrfs[0] base = base_path + ['name', vrf] self.cli_set(base + ['table', table]) # commit changes self.cli_commit() # Check if VRF has been created self.assertTrue(interface_exists(vrf)) table = str(int(table) + 1) self.cli_set(base + ['table', table]) # check validate() - table ID can not be altered! with self.assertRaises(ConfigSessionError): self.cli_commit() def test_vrf_assign_interface(self): vrf = vrfs[0] table = '5000' self.cli_set(['vrf', 'name', vrf, 'table', table]) for interface in self._interfaces: section = Section.section(interface) self.cli_set(['interfaces', section, interface, 'vrf', vrf]) # commit changes self.cli_commit() # Verify VRF assignmant for interface in self._interfaces: tmp = get_interface_config(interface) self.assertEqual(vrf, tmp['master']) # cleanup section = Section.section(interface) self.cli_delete(['interfaces', section, interface, 'vrf']) def test_vrf_static_route(self): base_table = '100' table = base_table for vrf in vrfs: next_hop = f'192.0.{table}.1' prefix = f'10.0.{table}.0/24' base = base_path + ['name', vrf] self.cli_set(base + ['vni', table]) # check validate() - a table ID is mandatory with self.assertRaises(ConfigSessionError): self.cli_commit() self.cli_set(base + ['table', table]) self.cli_set(base + ['protocols', 'static', 'route', prefix, 'next-hop', next_hop]) table = str(int(table) + 1) # commit changes self.cli_commit() # Verify VRF configuration table = base_table for vrf in vrfs: next_hop = f'192.0.{table}.1' prefix = f'10.0.{table}.0/24' self.assertTrue(interface_exists(vrf)) - frrconfig = self.getFRRconfig(f'vrf {vrf}') + frrconfig = self.getFRRconfig(f'vrf {vrf}', daemon=mgmt_daemon) self.assertIn(f' vni {table}', frrconfig) self.assertIn(f' ip route {prefix} {next_hop}', frrconfig) # Increment table ID for the next run table = str(int(table) + 1) def test_vrf_link_local_ip_addresses(self): # Testcase for issue T4331 table = '100' vrf = 'orange' interface = 'dum9998' addresses = ['192.0.2.1/26', '2001:db8:9998::1/64', 'fe80::1/64'] for address in addresses: self.cli_set(['interfaces', 'dummy', interface, 'address', address]) # Create dummy interfaces self.cli_commit() # ... and verify IP addresses got assigned for address in addresses: self.assertTrue(is_intf_addr_assigned(interface, address)) # Move interface to VRF self.cli_set(base_path + ['name', vrf, 'table', table]) self.cli_set(['interfaces', 'dummy', interface, 'vrf', vrf]) # Apply VRF config self.cli_commit() # Ensure VRF got created self.assertTrue(interface_exists(vrf)) # ... and IP addresses are still assigned for address in addresses: self.assertTrue(is_intf_addr_assigned(interface, address)) # Verify VRF table ID self.assertEqual(int(table), get_vrf_tableid(vrf)) # Verify interface is assigned to VRF tmp = get_interface_config(interface) self.assertEqual(vrf, tmp['master']) # Delete Interface self.cli_delete(['interfaces', 'dummy', interface]) self.cli_commit() def test_vrf_disable_forwarding(self): table = '2000' for vrf in vrfs: base = base_path + ['name', vrf] self.cli_set(base + ['table', table]) self.cli_set(base + ['ip', 'disable-forwarding']) self.cli_set(base + ['ipv6', 'disable-forwarding']) table = str(int(table) + 1) # commit changes self.cli_commit() # Verify VRF configuration loopbacks = ['127.0.0.1', '::1'] for vrf in vrfs: # Ensure VRF was created self.assertTrue(interface_exists(vrf)) # Verify IP forwarding is 0 (disabled) self.assertEqual(sysctl_read(f'net.ipv4.conf.{vrf}.forwarding'), '0') self.assertEqual(sysctl_read(f'net.ipv6.conf.{vrf}.forwarding'), '0') def test_vrf_ip_protocol_route_map(self): table = '6000' for vrf in vrfs: base = base_path + ['name', vrf] self.cli_set(base + ['table', table]) for protocol in v4_protocols: self.cli_set(['policy', 'route-map', f'route-map-{vrf}-{protocol}', 'rule', '10', 'action', 'permit']) self.cli_set(base + ['ip', 'protocol', protocol, 'route-map', f'route-map-{vrf}-{protocol}']) table = str(int(table) + 1) self.cli_commit() # Verify route-map properly applied to FRR for vrf in vrfs: - frrconfig = self.getFRRconfig(f'vrf {vrf}', daemon='zebra') + frrconfig = self.getFRRconfig(f'vrf {vrf}', daemon=mgmt_daemon) self.assertIn(f'vrf {vrf}', frrconfig) for protocol in v4_protocols: self.assertIn(f' ip protocol {protocol} route-map route-map-{vrf}-{protocol}', frrconfig) # Delete route-maps for vrf in vrfs: base = base_path + ['name', vrf] self.cli_delete(['policy', 'route-map', f'route-map-{vrf}-{protocol}']) self.cli_delete(base + ['ip', 'protocol']) self.cli_commit() # Verify route-map properly is removed from FRR for vrf in vrfs: - frrconfig = self.getFRRconfig(f'vrf {vrf}', daemon='zebra') - self.assertNotIn(f'vrf {vrf}', frrconfig) + frrconfig = self.getFRRconfig(f'vrf {vrf}', daemon=mgmt_daemon) + self.assertNotIn(f' ip protocol', frrconfig) def test_vrf_ip_ipv6_protocol_non_existing_route_map(self): table = '6100' non_existing = 'non-existing' for vrf in vrfs: base = base_path + ['name', vrf] self.cli_set(base + ['table', table]) for protocol in v4_protocols: self.cli_set(base + ['ip', 'protocol', protocol, 'route-map', f'v4-{non_existing}']) for protocol in v6_protocols: self.cli_set(base + ['ipv6', 'protocol', protocol, 'route-map', f'v6-{non_existing}']) table = str(int(table) + 1) # Both v4 and v6 route-maps do not exist yet with self.assertRaises(ConfigSessionError): self.cli_commit() self.cli_set(['policy', 'route-map', f'v4-{non_existing}', 'rule', '10', 'action', 'deny']) # v6 route-map does not exist yet with self.assertRaises(ConfigSessionError): self.cli_commit() self.cli_set(['policy', 'route-map', f'v6-{non_existing}', 'rule', '10', 'action', 'deny']) # Commit again self.cli_commit() def test_vrf_ipv6_protocol_route_map(self): table = '6200' for vrf in vrfs: base = base_path + ['name', vrf] self.cli_set(base + ['table', table]) for protocol in v6_protocols: route_map = f'route-map-{vrf}-{protocol.replace("ospfv3", "ospf6")}' self.cli_set(['policy', 'route-map', route_map, 'rule', '10', 'action', 'permit']) self.cli_set(base + ['ipv6', 'protocol', protocol, 'route-map', route_map]) table = str(int(table) + 1) self.cli_commit() # Verify route-map properly applied to FRR for vrf in vrfs: - frrconfig = self.getFRRconfig(f'vrf {vrf}', daemon='zebra') + frrconfig = self.getFRRconfig(f'vrf {vrf}', daemon=mgmt_daemon) self.assertIn(f'vrf {vrf}', frrconfig) for protocol in v6_protocols: # VyOS and FRR use a different name for OSPFv3 (IPv6) if protocol == 'ospfv3': protocol = 'ospf6' route_map = f'route-map-{vrf}-{protocol}' self.assertIn(f' ipv6 protocol {protocol} route-map {route_map}', frrconfig) # Delete route-maps for vrf in vrfs: base = base_path + ['name', vrf] self.cli_delete(['policy', 'route-map', f'route-map-{vrf}-{protocol}']) self.cli_delete(base + ['ipv6', 'protocol']) self.cli_commit() # Verify route-map properly is removed from FRR for vrf in vrfs: - frrconfig = self.getFRRconfig(f'vrf {vrf}', daemon='zebra') - self.assertNotIn(f'vrf {vrf}', frrconfig) + frrconfig = self.getFRRconfig(f'vrf {vrf}', daemon=mgmt_daemon) + self.assertNotIn(f' ipv6 protocol', frrconfig) def test_vrf_vni_duplicates(self): base_table = '6300' table = base_table for vrf in vrfs: base = base_path + ['name', vrf] self.cli_set(base + ['table', str(table)]) self.cli_set(base + ['vni', '100']) table = str(int(table) + 1) # L3VNIs can only be used once with self.assertRaises(ConfigSessionError): self.cli_commit() table = base_table for vrf in vrfs: base = base_path + ['name', vrf] self.cli_set(base + ['vni', str(table)]) table = str(int(table) + 1) # commit changes self.cli_commit() # Verify VRF configuration table = base_table for vrf in vrfs: self.assertTrue(interface_exists(vrf)) - frrconfig = self.getFRRconfig(f'vrf {vrf}') + frrconfig = self.getFRRconfig(f'vrf {vrf}', daemon=mgmt_daemon) self.assertIn(f' vni {table}', frrconfig) # Increment table ID for the next run table = str(int(table) + 1) def test_vrf_vni_add_change_remove(self): base_table = '6300' table = base_table for vrf in vrfs: base = base_path + ['name', vrf] self.cli_set(base + ['table', str(table)]) self.cli_set(base + ['vni', str(table)]) table = str(int(table) + 1) # commit changes self.cli_commit() # Verify VRF configuration table = base_table for vrf in vrfs: self.assertTrue(interface_exists(vrf)) - frrconfig = self.getFRRconfig(f'vrf {vrf}') + frrconfig = self.getFRRconfig(f'vrf {vrf}', daemon=mgmt_daemon) self.assertIn(f' vni {table}', frrconfig) # Increment table ID for the next run table = str(int(table) + 1) # Now change all L3VNIs (increment 2) # We must also change the base_table number as we probably could get # duplicate VNI's during the test as VNIs are applied 1:1 to FRR base_table = '5000' table = base_table for vrf in vrfs: base = base_path + ['name', vrf] self.cli_set(base + ['vni', str(table)]) table = str(int(table) + 2) # commit changes self.cli_commit() # Verify VRF configuration table = base_table for vrf in vrfs: self.assertTrue(interface_exists(vrf)) - frrconfig = self.getFRRconfig(f'vrf {vrf}') + frrconfig = self.getFRRconfig(f'vrf {vrf}', daemon=mgmt_daemon) self.assertIn(f' vni {table}', frrconfig) # Increment table ID for the next run table = str(int(table) + 2) # add a new VRF with VNI - this must not delete any existing VRF/VNI purple = 'purple' table = str(int(table) + 10) self.cli_set(base_path + ['name', purple, 'table', table]) self.cli_set(base_path + ['name', purple, 'vni', table]) # commit changes self.cli_commit() # Verify VRF configuration table = base_table for vrf in vrfs: self.assertTrue(interface_exists(vrf)) - frrconfig = self.getFRRconfig(f'vrf {vrf}') + frrconfig = self.getFRRconfig(f'vrf {vrf}', daemon=mgmt_daemon) self.assertIn(f' vni {table}', frrconfig) # Increment table ID for the next run table = str(int(table) + 2) # Verify purple VRF/VNI self.assertTrue(interface_exists(purple)) table = str(int(table) + 10) - frrconfig = self.getFRRconfig(f'vrf {purple}') + frrconfig = self.getFRRconfig(f'vrf {purple}', daemon=mgmt_daemon) self.assertIn(f' vni {table}', frrconfig) # Now delete all the VNIs for vrf in vrfs: base = base_path + ['name', vrf] self.cli_delete(base + ['vni']) # commit changes self.cli_commit() # Verify no VNI is defined for vrf in vrfs: self.assertTrue(interface_exists(vrf)) - frrconfig = self.getFRRconfig(f'vrf {vrf}') + frrconfig = self.getFRRconfig(f'vrf {vrf}', daemon=mgmt_daemon) self.assertNotIn('vni', frrconfig) # Verify purple VNI remains self.assertTrue(interface_exists(purple)) - frrconfig = self.getFRRconfig(f'vrf {purple}') + frrconfig = self.getFRRconfig(f'vrf {purple}', daemon=mgmt_daemon) self.assertIn(f' vni {table}', frrconfig) def test_vrf_ip_ipv6_nht(self): table = '6910' for vrf in vrfs: base = base_path + ['name', vrf] self.cli_set(base + ['table', table]) self.cli_set(base + ['ip', 'nht', 'no-resolve-via-default']) self.cli_set(base + ['ipv6', 'nht', 'no-resolve-via-default']) table = str(int(table) + 1) self.cli_commit() # Verify route-map properly applied to FRR for vrf in vrfs: - frrconfig = self.getFRRconfig(f'vrf {vrf}', daemon='zebra') + frrconfig = self.getFRRconfig(f'vrf {vrf}', daemon=mgmt_daemon) self.assertIn(f'vrf {vrf}', frrconfig) self.assertIn(f' no ip nht resolve-via-default', frrconfig) self.assertIn(f' no ipv6 nht resolve-via-default', frrconfig) # Delete route-maps for vrf in vrfs: base = base_path + ['name', vrf] self.cli_delete(base + ['ip']) self.cli_delete(base + ['ipv6']) self.cli_commit() # Verify route-map properly is removed from FRR for vrf in vrfs: - frrconfig = self.getFRRconfig(f'vrf {vrf}', daemon='zebra') + frrconfig = self.getFRRconfig(f'vrf {vrf}', daemon=mgmt_daemon) self.assertNotIn(f' no ip nht resolve-via-default', frrconfig) self.assertNotIn(f' no ipv6 nht resolve-via-default', frrconfig) def test_vrf_conntrack(self): table = '8710' nftables_rules = { 'vrf_zones_ct_in': ['ct original zone set iifname map @ct_iface_map'], 'vrf_zones_ct_out': ['ct original zone set oifname map @ct_iface_map'] } self.cli_set(base_path + ['name', 'randomVRF', 'table', '1000']) self.cli_commit() # Conntrack rules should not be present for chain, rule in nftables_rules.items(): self.verify_nftables_chain(rule, 'inet vrf_zones', chain, inverse=True) # conntrack is only enabled once NAT, NAT66 or firewalling is enabled self.cli_set(['nat']) for vrf in vrfs: base = base_path + ['name', vrf] self.cli_set(base + ['table', table]) table = str(int(table) + 1) # We need the commit inside the loop to trigger the bug in T6603 self.cli_commit() # Conntrack rules should now be present for chain, rule in nftables_rules.items(): self.verify_nftables_chain(rule, 'inet vrf_zones', chain, inverse=False) # T6603: there should be only ONE entry for the iifname/oifname in the chains tmp = loads(cmd('sudo nft -j list table inet vrf_zones')) num_rules = len(search("nftables[].rule[].chain", tmp)) # ['vrf_zones_ct_in', 'vrf_zones_ct_out'] self.assertEqual(num_rules, 2) self.cli_delete(['nat']) if __name__ == '__main__': unittest.main(verbosity=2) diff --git a/src/conf_mode/interfaces_bonding.py b/src/conf_mode/interfaces_bonding.py index bbbfb0385..633fb797c 100755 --- a/src/conf_mode/interfaces_bonding.py +++ b/src/conf_mode/interfaces_bonding.py @@ -1,305 +1,304 @@ #!/usr/bin/env python3 # # Copyright (C) 2019-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from sys import exit from vyos.config import Config from vyos.configdict import get_interface_dict from vyos.configdict import is_node_changed from vyos.configdict import leaf_node_changed from vyos.configdict import is_member from vyos.configdict import is_source_interface from vyos.configverify import verify_address from vyos.configverify import verify_bridge_delete from vyos.configverify import verify_dhcpv6 from vyos.configverify import verify_eapol from vyos.configverify import verify_mirror_redirect from vyos.configverify import verify_mtu_ipv6 from vyos.configverify import verify_vlan_config from vyos.configverify import verify_vrf from vyos.ifconfig import BondIf from vyos.ifconfig.ethernet import EthernetIf from vyos.ifconfig import Section from vyos.template import render_to_string from vyos.utils.assertion import assert_mac from vyos.utils.dict import dict_search from vyos.utils.dict import dict_to_paths_values from vyos.utils.network import interface_exists from vyos.configdict import has_address_configured from vyos.configdict import has_vrf_configured from vyos.configdep import set_dependents, call_dependents from vyos import ConfigError from vyos import frr from vyos import airbag airbag.enable() def get_bond_mode(mode): if mode == 'round-robin': return 'balance-rr' elif mode == 'active-backup': return 'active-backup' elif mode == 'xor-hash': return 'balance-xor' elif mode == 'broadcast': return 'broadcast' elif mode == '802.3ad': return '802.3ad' elif mode == 'transmit-load-balance': return 'balance-tlb' elif mode == 'adaptive-load-balance': return 'balance-alb' else: raise ConfigError(f'invalid bond mode "{mode}"') def get_config(config=None): """ Retrive CLI config as dictionary. Dictionary can never be empty, as at least the interface name will be added or a deleted flag """ if config: conf = config else: conf = Config() base = ['interfaces', 'bonding'] ifname, bond = get_interface_dict(conf, base, with_pki=True) # To make our own life easier transfor the list of member interfaces # into a dictionary - we will use this to add additional information # later on for each member if 'member' in bond and 'interface' in bond['member']: # convert list of member interfaces to a dictionary bond['member']['interface'] = {k: {} for k in bond['member']['interface']} if 'mode' in bond: bond['mode'] = get_bond_mode(bond['mode']) tmp = is_node_changed(conf, base + [ifname, 'mode']) if tmp: bond['shutdown_required'] = {} tmp = is_node_changed(conf, base + [ifname, 'lacp-rate']) if tmp: bond['shutdown_required'] = {} # determine which members have been removed interfaces_removed = leaf_node_changed(conf, base + [ifname, 'member', 'interface']) # Reset config level to interfaces old_level = conf.get_level() conf.set_level(['interfaces']) if interfaces_removed: bond['shutdown_required'] = {} if 'member' not in bond: bond['member'] = {} tmp = {} for interface in interfaces_removed: # if member is deleted from bond, add dependencies to call # ethernet commit again in apply function # to apply options under ethernet section set_dependents('ethernet', conf, interface) section = Section.section(interface) # this will be 'ethernet' for 'eth0' if conf.exists([section, interface, 'disable']): tmp[interface] = {'disable': ''} else: tmp[interface] = {} # also present the interfaces to be removed from the bond as dictionary bond['member']['interface_remove'] = tmp # Restore existing config level conf.set_level(old_level) if dict_search('member.interface', bond): for interface, interface_config in bond['member']['interface'].items(): interface_ethernet_config = conf.get_config_dict( ['interfaces', 'ethernet', interface], key_mangling=('-', '_'), get_first_key=True, no_tag_node_value_mangle=True, with_defaults=False, with_recursive_defaults=False) interface_config['config_paths'] = dict_to_paths_values(interface_ethernet_config) # Check if member interface is a new member if not conf.exists_effective(base + [ifname, 'member', 'interface', interface]): bond['shutdown_required'] = {} interface_config['new_added'] = {} # Check if member interface is disabled conf.set_level(['interfaces']) section = Section.section(interface) # this will be 'ethernet' for 'eth0' if conf.exists([section, interface, 'disable']): interface_config['disable'] = '' conf.set_level(old_level) # Check if member interface is already member of another bridge tmp = is_member(conf, interface, 'bridge') if tmp: interface_config['is_bridge_member'] = tmp # Check if member interface is already member of a bond tmp = is_member(conf, interface, 'bonding') for tmp in is_member(conf, interface, 'bonding'): if bond['ifname'] == tmp: continue interface_config['is_bond_member'] = tmp # Check if member interface is used as source-interface on another interface tmp = is_source_interface(conf, interface) if tmp: interface_config['is_source_interface'] = tmp # bond members must not have an assigned address tmp = has_address_configured(conf, interface) if tmp: interface_config['has_address'] = {} # bond members must not have a VRF attached tmp = has_vrf_configured(conf, interface) if tmp: interface_config['has_vrf'] = {} return bond def verify(bond): if 'deleted' in bond: verify_bridge_delete(bond) return None if 'arp_monitor' in bond: if 'target' in bond['arp_monitor'] and len(bond['arp_monitor']['target']) > 16: raise ConfigError('The maximum number of arp-monitor targets is 16') if 'interval' in bond['arp_monitor'] and int(bond['arp_monitor']['interval']) > 0: if bond['mode'] in ['802.3ad', 'balance-tlb', 'balance-alb']: raise ConfigError('ARP link monitoring does not work for mode 802.3ad, ' \ 'transmit-load-balance or adaptive-load-balance') if 'primary' in bond: if bond['mode'] not in ['active-backup', 'balance-tlb', 'balance-alb']: raise ConfigError('Option primary - mode dependency failed, not' 'supported in mode {mode}!'.format(**bond)) verify_mtu_ipv6(bond) verify_address(bond) verify_dhcpv6(bond) verify_vrf(bond) verify_mirror_redirect(bond) verify_eapol(bond) # use common function to verify VLAN configuration verify_vlan_config(bond) bond_name = bond['ifname'] if dict_search('member.interface', bond): for interface, interface_config in bond['member']['interface'].items(): error_msg = f'Can not add interface "{interface}" to bond, ' if interface == 'lo': raise ConfigError('Loopback interface "lo" can not be added to a bond') if not interface_exists(interface): raise ConfigError(error_msg + 'it does not exist!') if 'is_bridge_member' in interface_config: tmp = next(iter(interface_config['is_bridge_member'])) raise ConfigError(error_msg + f'it is already a member of bridge "{tmp}"!') if 'is_bond_member' in interface_config: tmp = next(iter(interface_config['is_bond_member'])) raise ConfigError(error_msg + f'it is already a member of bond "{tmp}"!') if 'is_source_interface' in interface_config: tmp = interface_config['is_source_interface'] raise ConfigError(error_msg + f'it is the source-interface of "{tmp}"!') if 'has_address' in interface_config: raise ConfigError(error_msg + 'it has an address assigned!') if 'has_vrf' in interface_config: raise ConfigError(error_msg + 'it has a VRF assigned!') if 'new_added' in interface_config and 'config_paths' in interface_config: for option_path, option_value in interface_config['config_paths'].items(): if option_path in EthernetIf.get_bond_member_allowed_options() : continue if option_path in BondIf.get_inherit_bond_options(): continue raise ConfigError(error_msg + f'it has a "{option_path.replace(".", " ")}" assigned!') if 'primary' in bond: if bond['primary'] not in bond['member']['interface']: raise ConfigError(f'Primary interface of bond "{bond_name}" must be a member interface') if bond['mode'] not in ['active-backup', 'balance-tlb', 'balance-alb']: raise ConfigError('primary interface only works for mode active-backup, ' \ 'transmit-load-balance or adaptive-load-balance') if 'system_mac' in bond: if bond['mode'] != '802.3ad': raise ConfigError('Actor MAC address only available in 802.3ad mode!') system_mac = bond['system_mac'] try: assert_mac(system_mac, test_all_zero=False) except: raise ConfigError(f'Cannot use a multicast MAC address "{system_mac}" as system-mac!') return None def generate(bond): bond['frr_zebra_config'] = '' if 'deleted' not in bond: bond['frr_zebra_config'] = render_to_string('frr/evpn.mh.frr.j2', bond) return None def apply(bond): ifname = bond['ifname'] b = BondIf(ifname) if 'deleted' in bond: # delete interface b.remove() else: b.update(bond) if dict_search('member.interface_remove', bond): try: call_dependents() except ConfigError: raise ConfigError('Error in updating ethernet interface ' 'after deleting it from bond') - zebra_daemon = 'zebra' # Save original configuration prior to starting any commit actions frr_cfg = frr.FRRConfig() # The route-map used for the FIB (zebra) is part of the zebra daemon - frr_cfg.load_configuration(zebra_daemon) + frr_cfg.load_configuration(frr.mgmt_daemon) frr_cfg.modify_section(f'^interface {ifname}', stop_pattern='^exit', remove_stop_mark=True) if 'frr_zebra_config' in bond: frr_cfg.add_before(frr.default_add_before, bond['frr_zebra_config']) - frr_cfg.commit_configuration(zebra_daemon) + frr_cfg.commit_configuration() return None if __name__ == '__main__': try: c = get_config() verify(c) generate(c) apply(c) except ConfigError as e: print(e) exit(1) diff --git a/src/conf_mode/interfaces_ethernet.py b/src/conf_mode/interfaces_ethernet.py index 34ce7bc47..edbbb00c9 100755 --- a/src/conf_mode/interfaces_ethernet.py +++ b/src/conf_mode/interfaces_ethernet.py @@ -1,360 +1,357 @@ #!/usr/bin/env python3 # # Copyright (C) 2019-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import os from sys import exit from vyos.base import Warning from vyos.config import Config from vyos.configdict import get_interface_dict from vyos.configdict import is_node_changed from vyos.configverify import verify_address from vyos.configverify import verify_dhcpv6 from vyos.configverify import verify_interface_exists from vyos.configverify import verify_mirror_redirect from vyos.configverify import verify_mtu from vyos.configverify import verify_mtu_ipv6 from vyos.configverify import verify_vlan_config from vyos.configverify import verify_vrf from vyos.configverify import verify_bond_bridge_member from vyos.configverify import verify_eapol from vyos.ethtool import Ethtool from vyos.ifconfig import EthernetIf from vyos.ifconfig import BondIf from vyos.template import render_to_string from vyos.utils.dict import dict_search from vyos.utils.dict import dict_to_paths_values from vyos.utils.dict import dict_set from vyos.utils.dict import dict_delete from vyos import ConfigError from vyos import frr from vyos import airbag airbag.enable() def update_bond_options(conf: Config, eth_conf: dict) -> list: """ Return list of blocked options if interface is a bond member :param conf: Config object :type conf: Config :param eth_conf: Ethernet config dictionary :type eth_conf: dict :return: List of blocked options :rtype: list """ blocked_list = [] bond_name = list(eth_conf['is_bond_member'].keys())[0] config_without_defaults = conf.get_config_dict( ['interfaces', 'ethernet', eth_conf['ifname']], key_mangling=('-', '_'), get_first_key=True, no_tag_node_value_mangle=True, with_defaults=False, with_recursive_defaults=False) config_with_defaults = conf.get_config_dict( ['interfaces', 'ethernet', eth_conf['ifname']], key_mangling=('-', '_'), get_first_key=True, no_tag_node_value_mangle=True, with_defaults=True, with_recursive_defaults=True) bond_config_with_defaults = conf.get_config_dict( ['interfaces', 'bonding', bond_name], key_mangling=('-', '_'), get_first_key=True, no_tag_node_value_mangle=True, with_defaults=True, with_recursive_defaults=True) eth_dict_paths = dict_to_paths_values(config_without_defaults) eth_path_base = ['interfaces', 'ethernet', eth_conf['ifname']] #if option is configured under ethernet section for option_path, option_value in eth_dict_paths.items(): bond_option_value = dict_search(option_path, bond_config_with_defaults) #If option is allowed for changing then continue if option_path in EthernetIf.get_bond_member_allowed_options(): continue # if option is inherited from bond then set valued from bond interface if option_path in BondIf.get_inherit_bond_options(): # If option equals to bond option then do nothing if option_value == bond_option_value: continue else: # if ethernet has option and bond interface has # then copy it from bond if bond_option_value is not None: if is_node_changed(conf, eth_path_base + option_path.split('.')): Warning( f'Cannot apply "{option_path.replace(".", " ")}" to "{option_value}".' \ f' Interface "{eth_conf["ifname"]}" is a bond member.' \ f' Option is inherited from bond "{bond_name}"') dict_set(option_path, bond_option_value, eth_conf) continue # if ethernet has option and bond interface does not have # then delete it form dict and do not apply it else: if is_node_changed(conf, eth_path_base + option_path.split('.')): Warning( f'Cannot apply "{option_path.replace(".", " ")}".' \ f' Interface "{eth_conf["ifname"]}" is a bond member.' \ f' Option is inherited from bond "{bond_name}"') dict_delete(option_path, eth_conf) blocked_list.append(option_path) # if inherited option is not configured under ethernet section but configured under bond section for option_path in BondIf.get_inherit_bond_options(): bond_option_value = dict_search(option_path, bond_config_with_defaults) if bond_option_value is not None: if option_path not in eth_dict_paths: if is_node_changed(conf, eth_path_base + option_path.split('.')): Warning( f'Cannot apply "{option_path.replace(".", " ")}" to "{dict_search(option_path, config_with_defaults)}".' \ f' Interface "{eth_conf["ifname"]}" is a bond member. ' \ f'Option is inherited from bond "{bond_name}"') dict_set(option_path, bond_option_value, eth_conf) eth_conf['bond_blocked_changes'] = blocked_list return None def get_config(config=None): """ Retrive CLI config as dictionary. Dictionary can never be empty, as at least the interface name will be added or a deleted flag """ if config: conf = config else: conf = Config() base = ['interfaces', 'ethernet'] ifname, ethernet = get_interface_dict(conf, base, with_pki=True) # T5862 - default MTU is not acceptable in some environments # There are cloud environments available where the maximum supported # ethernet MTU is e.g. 1450 bytes, thus we clamp this to the adapters # maximum MTU value or 1500 bytes - whatever is lower if 'mtu' not in ethernet: try: ethernet['mtu'] = '1500' max_mtu = EthernetIf(ifname).get_max_mtu() if max_mtu < int(ethernet['mtu']): ethernet['mtu'] = str(max_mtu) except: pass if 'is_bond_member' in ethernet: update_bond_options(conf, ethernet) tmp = is_node_changed(conf, base + [ifname, 'speed']) if tmp: ethernet.update({'speed_duplex_changed': {}}) tmp = is_node_changed(conf, base + [ifname, 'duplex']) if tmp: ethernet.update({'speed_duplex_changed': {}}) return ethernet def verify_speed_duplex(ethernet: dict, ethtool: Ethtool): """ Verify speed and duplex :param ethernet: dictionary which is received from get_interface_dict :type ethernet: dict :param ethtool: Ethernet object :type ethtool: Ethtool """ if ((ethernet['speed'] == 'auto' and ethernet['duplex'] != 'auto') or (ethernet['speed'] != 'auto' and ethernet['duplex'] == 'auto')): raise ConfigError( 'Speed/Duplex missmatch. Must be both auto or manually configured') if ethernet['speed'] != 'auto' and ethernet['duplex'] != 'auto': # We need to verify if the requested speed and duplex setting is # supported by the underlaying NIC. speed = ethernet['speed'] duplex = ethernet['duplex'] if not ethtool.check_speed_duplex(speed, duplex): raise ConfigError( f'Adapter does not support changing speed ' \ f'and duplex settings to: {speed}/{duplex}!') def verify_flow_control(ethernet: dict, ethtool: Ethtool): """ Verify flow control :param ethernet: dictionary which is received from get_interface_dict :type ethernet: dict :param ethtool: Ethernet object :type ethtool: Ethtool """ if 'disable_flow_control' in ethernet: if not ethtool.check_flow_control(): raise ConfigError( 'Adapter does not support changing flow-control settings!') def verify_ring_buffer(ethernet: dict, ethtool: Ethtool): """ Verify ring buffer :param ethernet: dictionary which is received from get_interface_dict :type ethernet: dict :param ethtool: Ethernet object :type ethtool: Ethtool """ if 'ring_buffer' in ethernet: max_rx = ethtool.get_ring_buffer_max('rx') if not max_rx: raise ConfigError( 'Driver does not support RX ring-buffer configuration!') max_tx = ethtool.get_ring_buffer_max('tx') if not max_tx: raise ConfigError( 'Driver does not support TX ring-buffer configuration!') rx = dict_search('ring_buffer.rx', ethernet) if rx and int(rx) > int(max_rx): raise ConfigError(f'Driver only supports a maximum RX ring-buffer ' \ f'size of "{max_rx}" bytes!') tx = dict_search('ring_buffer.tx', ethernet) if tx and int(tx) > int(max_tx): raise ConfigError(f'Driver only supports a maximum TX ring-buffer ' \ f'size of "{max_tx}" bytes!') def verify_offload(ethernet: dict, ethtool: Ethtool): """ Verify offloading capabilities :param ethernet: dictionary which is received from get_interface_dict :type ethernet: dict :param ethtool: Ethernet object :type ethtool: Ethtool """ if dict_search('offload.rps', ethernet) != None: if not os.path.exists(f'/sys/class/net/{ethernet["ifname"]}/queues/rx-0/rps_cpus'): raise ConfigError('Interface does not suport RPS!') driver = ethtool.get_driver_name() # T3342 - Xen driver requires special treatment if driver == 'vif': if int(ethernet['mtu']) > 1500 and dict_search('offload.sg', ethernet) == None: raise ConfigError('Xen netback drivers requires scatter-gatter offloading '\ 'for MTU size larger then 1500 bytes') def verify_allowedbond_changes(ethernet: dict): """ Verify changed options if interface is in bonding :param ethernet: dictionary which is received from get_interface_dict :type ethernet: dict """ if 'bond_blocked_changes' in ethernet: for option in ethernet['bond_blocked_changes']: raise ConfigError(f'Cannot configure "{option.replace(".", " ")}"' \ f' on interface "{ethernet["ifname"]}".' \ f' Interface is a bond member') def verify(ethernet): if 'deleted' in ethernet: return None if 'is_bond_member' in ethernet: verify_bond_member(ethernet) else: verify_ethernet(ethernet) def verify_bond_member(ethernet): """ Verification function for ethernet interface which is in bonding :param ethernet: dictionary which is received from get_interface_dict :type ethernet: dict """ ifname = ethernet['ifname'] verify_interface_exists(ethernet, ifname) verify_eapol(ethernet) verify_mirror_redirect(ethernet) ethtool = Ethtool(ifname) verify_speed_duplex(ethernet, ethtool) verify_flow_control(ethernet, ethtool) verify_ring_buffer(ethernet, ethtool) verify_offload(ethernet, ethtool) verify_allowedbond_changes(ethernet) def verify_ethernet(ethernet): """ Verification function for simple ethernet interface :param ethernet: dictionary which is received from get_interface_dict :type ethernet: dict """ ifname = ethernet['ifname'] verify_interface_exists(ethernet, ifname) verify_mtu(ethernet) verify_mtu_ipv6(ethernet) verify_dhcpv6(ethernet) verify_address(ethernet) verify_vrf(ethernet) verify_bond_bridge_member(ethernet) verify_eapol(ethernet) verify_mirror_redirect(ethernet) ethtool = Ethtool(ifname) # No need to check speed and duplex keys as both have default values. verify_speed_duplex(ethernet, ethtool) verify_flow_control(ethernet, ethtool) verify_ring_buffer(ethernet, ethtool) verify_offload(ethernet, ethtool) # use common function to verify VLAN configuration verify_vlan_config(ethernet) return None def generate(ethernet): if 'deleted' in ethernet: return None ethernet['frr_zebra_config'] = '' if 'deleted' not in ethernet: ethernet['frr_zebra_config'] = render_to_string('frr/evpn.mh.frr.j2', ethernet) return None def apply(ethernet): ifname = ethernet['ifname'] e = EthernetIf(ifname) if 'deleted' in ethernet: # delete interface e.remove() else: e.update(ethernet) - zebra_daemon = 'zebra' # Save original configuration prior to starting any commit actions frr_cfg = frr.FRRConfig() - - # The route-map used for the FIB (zebra) is part of the zebra daemon - frr_cfg.load_configuration(zebra_daemon) + frr_cfg.load_configuration(frr.mgmt_daemon) frr_cfg.modify_section(f'^interface {ifname}', stop_pattern='^exit', remove_stop_mark=True) if 'frr_zebra_config' in ethernet: frr_cfg.add_before(frr.default_add_before, ethernet['frr_zebra_config']) - frr_cfg.commit_configuration(zebra_daemon) + frr_cfg.commit_configuration() if __name__ == '__main__': try: c = get_config() verify(c) generate(c) apply(c) except ConfigError as e: print(e) exit(1) diff --git a/src/conf_mode/policy.py b/src/conf_mode/policy.py index a5963e72c..aef9b96c4 100755 --- a/src/conf_mode/policy.py +++ b/src/conf_mode/policy.py @@ -1,323 +1,320 @@ #!/usr/bin/env python3 # # Copyright (C) 2021-2022 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from sys import exit from vyos.config import Config from vyos.configdict import dict_merge from vyos.template import render_to_string from vyos.utils.dict import dict_search from vyos import ConfigError from vyos import frr from vyos import airbag airbag.enable() def community_action_compatibility(actions: dict) -> bool: """ Check compatibility of values in community and large community sections :param actions: dictionary with community :type actions: dict :return: true if compatible, false if not :rtype: bool """ if ('none' in actions) and ('replace' in actions or 'add' in actions): return False if 'replace' in actions and 'add' in actions: return False if ('delete' in actions) and ('none' in actions or 'replace' in actions): return False return True def extcommunity_action_compatibility(actions: dict) -> bool: """ Check compatibility of values in extended community sections :param actions: dictionary with community :type actions: dict :return: true if compatible, false if not :rtype: bool """ if ('none' in actions) and ( 'rt' in actions or 'soo' in actions or 'bandwidth' in actions or 'bandwidth_non_transitive' in actions): return False if ('bandwidth_non_transitive' in actions) and ('bandwidth' not in actions): return False return True def routing_policy_find(key, dictionary): # Recursively traverse a dictionary and extract the value assigned to # a given key as generator object. This is made for routing policies, # thus also import/export is checked for k, v in dictionary.items(): if k == key: if isinstance(v, dict): for a, b in v.items(): if a in ['import', 'export']: yield b else: yield v elif isinstance(v, dict): for result in routing_policy_find(key, v): yield result elif isinstance(v, list): for d in v: if isinstance(d, dict): for result in routing_policy_find(key, d): yield result def get_config(config=None): if config: conf = config else: conf = Config() base = ['policy'] policy = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True, no_tag_node_value_mangle=True) # We also need some additional information from the config, prefix-lists # and route-maps for instance. They will be used in verify(). # # XXX: one MUST always call this without the key_mangling() option! See # vyos.configverify.verify_common_route_maps() for more information. tmp = conf.get_config_dict(['protocols'], key_mangling=('-', '_'), no_tag_node_value_mangle=True) # Merge policy dict into "regular" config dict policy = dict_merge(tmp, policy) return policy def verify(policy): if not policy: return None for policy_type in ['access_list', 'access_list6', 'as_path_list', 'community_list', 'extcommunity_list', 'large_community_list', 'prefix_list', 'prefix_list6', 'route_map']: # Bail out early and continue with next policy type if policy_type not in policy: continue # instance can be an ACL name/number, prefix-list name or route-map name for instance, instance_config in policy[policy_type].items(): # If no rule was found within the instance ... sad, but we can leave # early as nothing needs to be verified if 'rule' not in instance_config: continue # human readable instance name (hypen instead of underscore) policy_hr = policy_type.replace('_', '-') entries = [] for rule, rule_config in instance_config['rule'].items(): mandatory_error = f'must be specified for "{policy_hr} {instance} rule {rule}"!' if 'action' not in rule_config: raise ConfigError(f'Action {mandatory_error}') if policy_type == 'access_list': if 'source' not in rule_config: raise ConfigError(f'A source {mandatory_error}') if int(instance) in range(100, 200) or int( instance) in range(2000, 2700): if 'destination' not in rule_config: raise ConfigError( f'A destination {mandatory_error}') if policy_type == 'access_list6': if 'source' not in rule_config: raise ConfigError(f'A source {mandatory_error}') if policy_type in ['as_path_list', 'community_list', 'extcommunity_list', 'large_community_list']: if 'regex' not in rule_config: raise ConfigError(f'A regex {mandatory_error}') if policy_type in ['prefix_list', 'prefix_list6']: if 'prefix' not in rule_config: raise ConfigError(f'A prefix {mandatory_error}') if rule_config in entries: raise ConfigError( f'Rule "{rule}" contains a duplicate prefix definition!') entries.append(rule_config) # route-maps tend to be a bit more complex so they get their own verify() section if 'route_map' in policy: for route_map, route_map_config in policy['route_map'].items(): if 'rule' not in route_map_config: continue for rule, rule_config in route_map_config['rule'].items(): # Action 'deny' cannot be used with "continue" or "on-match" # FRR does not validate it T4827, T6676 if rule_config['action'] == 'deny' and ('continue' in rule_config or 'on_match' in rule_config): raise ConfigError(f'rule {rule} "continue" or "on-match" cannot be used with action deny!') # Specified community-list must exist tmp = dict_search('match.community.community_list', rule_config) if tmp and tmp not in policy.get('community_list', []): raise ConfigError(f'community-list {tmp} does not exist!') # Specified extended community-list must exist tmp = dict_search('match.extcommunity', rule_config) if tmp and tmp not in policy.get('extcommunity_list', []): raise ConfigError( f'extcommunity-list {tmp} does not exist!') # Specified large-community-list must exist tmp = dict_search('match.large_community.large_community_list', rule_config) if tmp and tmp not in policy.get('large_community_list', []): raise ConfigError( f'large-community-list {tmp} does not exist!') # Specified prefix-list must exist tmp = dict_search('match.ip.address.prefix_list', rule_config) if tmp and tmp not in policy.get('prefix_list', []): raise ConfigError(f'prefix-list {tmp} does not exist!') # Specified prefix-list must exist tmp = dict_search('match.ipv6.address.prefix_list', rule_config) if tmp and tmp not in policy.get('prefix_list6', []): raise ConfigError(f'prefix-list6 {tmp} does not exist!') # Specified access_list6 in nexthop must exist tmp = dict_search('match.ipv6.nexthop.access_list', rule_config) if tmp and tmp not in policy.get('access_list6', []): raise ConfigError(f'access_list6 {tmp} does not exist!') # Specified prefix-list6 in nexthop must exist tmp = dict_search('match.ipv6.nexthop.prefix_list', rule_config) if tmp and tmp not in policy.get('prefix_list6', []): raise ConfigError(f'prefix-list6 {tmp} does not exist!') tmp = dict_search('set.community.delete', rule_config) if tmp and tmp not in policy.get('community_list', []): raise ConfigError(f'community-list {tmp} does not exist!') tmp = dict_search('set.large_community.delete', rule_config) if tmp and tmp not in policy.get('large_community_list', []): raise ConfigError( f'large-community-list {tmp} does not exist!') if 'set' in rule_config: rule_action = rule_config['set'] if 'community' in rule_action: if not community_action_compatibility( rule_action['community']): raise ConfigError( f'Unexpected combination between action replace, add, delete or none in community') if 'large_community' in rule_action: if not community_action_compatibility( rule_action['large_community']): raise ConfigError( f'Unexpected combination between action replace, add, delete or none in large-community') if 'extcommunity' in rule_action: if not extcommunity_action_compatibility( rule_action['extcommunity']): raise ConfigError( f'Unexpected combination between none, rt, soo, bandwidth, bandwidth-non-transitive in extended-community') # When routing protocols are active some use prefix-lists, route-maps etc. # to apply the systems routing policy to the learned or redistributed routes. # When the "routing policy" changes and policies, route-maps etc. are deleted, # it is our responsibility to verify that the policy can not be deleted if it # is used by any routing protocol if 'protocols' in policy: for policy_type in ['access_list', 'access_list6', 'as_path_list', 'community_list', 'extcommunity_list', 'large_community_list', 'prefix_list', 'route_map']: if policy_type in policy: for policy_name in list(set(routing_policy_find(policy_type, policy[ 'protocols']))): found = False if policy_name in policy[policy_type]: found = True # BGP uses prefix-list for selecting both an IPv4 or IPv6 AFI related # list - we need to go the extra mile here and check both prefix-lists if policy_type == 'prefix_list' and 'prefix_list6' in policy and policy_name in \ policy['prefix_list6']: found = True if not found: tmp = policy_type.replace('_', '-') raise ConfigError( f'Can not delete {tmp} "{policy_name}", still in use!') return None def generate(policy): if not policy: return None policy['new_frr_config'] = render_to_string('frr/policy.frr.j2', policy) return None def apply(policy): - bgp_daemon = 'bgpd' - zebra_daemon = 'zebra' - # Save original configuration prior to starting any commit actions frr_cfg = frr.FRRConfig() # The route-map used for the FIB (zebra) is part of the zebra daemon - frr_cfg.load_configuration(bgp_daemon) + frr_cfg.load_configuration(frr.bgp_daemon) frr_cfg.modify_section(r'^bgp as-path access-list .*') frr_cfg.modify_section(r'^bgp community-list .*') frr_cfg.modify_section(r'^bgp extcommunity-list .*') frr_cfg.modify_section(r'^bgp large-community-list .*') frr_cfg.modify_section(r'^route-map .*', stop_pattern='^exit', remove_stop_mark=True) if 'new_frr_config' in policy: frr_cfg.add_before(frr.default_add_before, policy['new_frr_config']) - frr_cfg.commit_configuration(bgp_daemon) + frr_cfg.commit_configuration(frr.bgp_daemon) # The route-map used for the FIB (zebra) is part of the zebra daemon - frr_cfg.load_configuration(zebra_daemon) + frr_cfg.load_configuration(frr.zebra_daemon) frr_cfg.modify_section(r'^access-list .*') frr_cfg.modify_section(r'^ipv6 access-list .*') frr_cfg.modify_section(r'^ip prefix-list .*') frr_cfg.modify_section(r'^ipv6 prefix-list .*') frr_cfg.modify_section(r'^route-map .*', stop_pattern='^exit', remove_stop_mark=True) if 'new_frr_config' in policy: frr_cfg.add_before(frr.default_add_before, policy['new_frr_config']) - frr_cfg.commit_configuration(zebra_daemon) + frr_cfg.commit_configuration(frr.zebra_daemon) return None if __name__ == '__main__': try: c = get_config() verify(c) generate(c) apply(c) except ConfigError as e: print(e) exit(1) diff --git a/src/conf_mode/protocols_babel.py b/src/conf_mode/protocols_babel.py index 90b6e4a31..06fd9b9b6 100755 --- a/src/conf_mode/protocols_babel.py +++ b/src/conf_mode/protocols_babel.py @@ -1,159 +1,157 @@ #!/usr/bin/env python3 # # Copyright (C) 2021-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from sys import exit from vyos.config import Config from vyos.config import config_dict_merge from vyos.configdict import dict_merge from vyos.configdict import node_changed from vyos.configverify import verify_access_list from vyos.configverify import verify_prefix_list from vyos.utils.dict import dict_search from vyos.template import render_to_string from vyos import ConfigError from vyos import frr from vyos import airbag airbag.enable() def get_config(config=None): if config: conf = config else: conf = Config() base = ['protocols', 'babel'] babel = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True) # FRR has VRF support for different routing daemons. As interfaces belong # to VRFs - or the global VRF, we need to check for changed interfaces so # that they will be properly rendered for the FRR config. Also this eases # removal of interfaces from the running configuration. interfaces_removed = node_changed(conf, base + ['interface']) if interfaces_removed: babel['interface_removed'] = list(interfaces_removed) # Bail out early if configuration tree does not exist if not conf.exists(base): babel.update({'deleted' : ''}) return babel # We have gathered the dict representation of the CLI, but there are default # values which we need to update into the dictionary retrieved. default_values = conf.get_config_defaults(base, key_mangling=('-', '_'), get_first_key=True, recursive=True) # merge in default values babel = config_dict_merge(default_values, babel) # We also need some additional information from the config, prefix-lists # and route-maps for instance. They will be used in verify(). # # XXX: one MUST always call this without the key_mangling() option! See # vyos.configverify.verify_common_route_maps() for more information. tmp = conf.get_config_dict(['policy']) # Merge policy dict into "regular" config dict babel = dict_merge(tmp, babel) return babel def verify(babel): if not babel: return None # verify distribute_list if "distribute_list" in babel: acl_keys = { "ipv4": [ "distribute_list.ipv4.access_list.in", "distribute_list.ipv4.access_list.out", ], "ipv6": [ "distribute_list.ipv6.access_list.in", "distribute_list.ipv6.access_list.out", ] } prefix_list_keys = { "ipv4": [ "distribute_list.ipv4.prefix_list.in", "distribute_list.ipv4.prefix_list.out", ], "ipv6":[ "distribute_list.ipv6.prefix_list.in", "distribute_list.ipv6.prefix_list.out", ] } for address_family in ["ipv4", "ipv6"]: for iface_key in babel["distribute_list"].get(address_family, {}).get("interface", {}).keys(): acl_keys[address_family].extend([ f"distribute_list.{address_family}.interface.{iface_key}.access_list.in", f"distribute_list.{address_family}.interface.{iface_key}.access_list.out" ]) prefix_list_keys[address_family].extend([ f"distribute_list.{address_family}.interface.{iface_key}.prefix_list.in", f"distribute_list.{address_family}.interface.{iface_key}.prefix_list.out" ]) for address_family, keys in acl_keys.items(): for key in keys: acl = dict_search(key, babel) if acl: verify_access_list(acl, babel, version='6' if address_family == 'ipv6' else '') for address_family, keys in prefix_list_keys.items(): for key in keys: prefix_list = dict_search(key, babel) if prefix_list: verify_prefix_list(prefix_list, babel, version='6' if address_family == 'ipv6' else '') def generate(babel): if not babel or 'deleted' in babel: return None babel['new_frr_config'] = render_to_string('frr/babeld.frr.j2', babel) return None def apply(babel): - babel_daemon = 'babeld' - # Save original configuration prior to starting any commit actions frr_cfg = frr.FRRConfig() - frr_cfg.load_configuration(babel_daemon) + frr_cfg.load_configuration(frr.babel_daemon) frr_cfg.modify_section('^router babel', stop_pattern='^exit', remove_stop_mark=True) for key in ['interface', 'interface_removed']: if key not in babel: continue for interface in babel[key]: frr_cfg.modify_section(f'^interface {interface}', stop_pattern='^exit', remove_stop_mark=True) if 'new_frr_config' in babel: frr_cfg.add_before(frr.default_add_before, babel['new_frr_config']) - frr_cfg.commit_configuration(babel_daemon) + frr_cfg.commit_configuration(frr.babel_daemon) return None if __name__ == '__main__': try: c = get_config() verify(c) generate(c) apply(c) except ConfigError as e: print(e) exit(1) diff --git a/src/conf_mode/protocols_bfd.py b/src/conf_mode/protocols_bfd.py index 1361bb1a9..d94ec6a0d 100755 --- a/src/conf_mode/protocols_bfd.py +++ b/src/conf_mode/protocols_bfd.py @@ -1,112 +1,110 @@ #!/usr/bin/env python3 # # Copyright (C) 2019-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from vyos.config import Config from vyos.configverify import verify_vrf from vyos.template import is_ipv6 from vyos.template import render_to_string from vyos.utils.network import is_ipv6_link_local from vyos import ConfigError from vyos import frr from vyos import airbag airbag.enable() def get_config(config=None): if config: conf = config else: conf = Config() base = ['protocols', 'bfd'] bfd = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True, no_tag_node_value_mangle=True) # Bail out early if configuration tree does not exist if not conf.exists(base): return bfd bfd = conf.merge_defaults(bfd, recursive=True) return bfd def verify(bfd): if not bfd: return None if 'peer' in bfd: for peer, peer_config in bfd['peer'].items(): # IPv6 link local peers require an explicit local address/interface if is_ipv6_link_local(peer): if 'source' not in peer_config or len(peer_config['source']) < 2: raise ConfigError('BFD IPv6 link-local peers require explicit local address and interface setting') # IPv6 peers require an explicit local address if is_ipv6(peer): if 'source' not in peer_config or 'address' not in peer_config['source']: raise ConfigError('BFD IPv6 peers require explicit local address setting') if 'multihop' in peer_config: # multihop require source address if 'source' not in peer_config or 'address' not in peer_config['source']: raise ConfigError('BFD multihop require source address') # multihop and echo-mode cannot be used together if 'echo_mode' in peer_config: raise ConfigError('BFD multihop and echo-mode cannot be used together') # multihop doesn't accept interface names if 'source' in peer_config and 'interface' in peer_config['source']: raise ConfigError('BFD multihop and source interface cannot be used together') if 'minimum_ttl' in peer_config and 'multihop' not in peer_config: raise ConfigError('Minimum TTL is only available for multihop BFD sessions!') if 'profile' in peer_config: profile_name = peer_config['profile'] if 'profile' not in bfd or profile_name not in bfd['profile']: raise ConfigError(f'BFD profile "{profile_name}" does not exist!') if 'vrf' in peer_config: verify_vrf(peer_config) return None def generate(bfd): if not bfd: return None bfd['new_frr_config'] = render_to_string('frr/bfdd.frr.j2', bfd) def apply(bfd): - bfd_daemon = 'bfdd' - # Save original configuration prior to starting any commit actions frr_cfg = frr.FRRConfig() - frr_cfg.load_configuration(bfd_daemon) + frr_cfg.load_configuration(frr.bfd_daemon) frr_cfg.modify_section('^bfd', stop_pattern='^exit', remove_stop_mark=True) if 'new_frr_config' in bfd: frr_cfg.add_before(frr.default_add_before, bfd['new_frr_config']) - frr_cfg.commit_configuration(bfd_daemon) + frr_cfg.commit_configuration(frr.bfd_daemon) return None if __name__ == '__main__': try: c = get_config() verify(c) generate(c) apply(c) except ConfigError as e: print(e) exit(1) diff --git a/src/conf_mode/protocols_bgp.py b/src/conf_mode/protocols_bgp.py index 22f020099..e5c46aee6 100755 --- a/src/conf_mode/protocols_bgp.py +++ b/src/conf_mode/protocols_bgp.py @@ -1,655 +1,653 @@ #!/usr/bin/env python3 # # Copyright (C) 2020-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from sys import exit from sys import argv from vyos.base import Warning from vyos.config import Config from vyos.configdict import dict_merge from vyos.configdict import node_changed from vyos.configverify import verify_prefix_list from vyos.configverify import verify_route_map from vyos.configverify import verify_vrf from vyos.template import is_ip from vyos.template import is_interface from vyos.template import render_to_string from vyos.utils.dict import dict_search from vyos.utils.network import get_interface_vrf from vyos.utils.network import is_addr_assigned from vyos.utils.process import process_named_running from vyos.utils.process import call from vyos import ConfigError from vyos import frr from vyos import airbag airbag.enable() def get_config(config=None): if config: conf = config else: conf = Config() vrf = None if len(argv) > 1: vrf = argv[1] base_path = ['protocols', 'bgp'] # eqivalent of the C foo ? 'a' : 'b' statement base = vrf and ['vrf', 'name', vrf, 'protocols', 'bgp'] or base_path bgp = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True, no_tag_node_value_mangle=True) bgp['dependent_vrfs'] = conf.get_config_dict(['vrf', 'name'], key_mangling=('-', '_'), get_first_key=True, no_tag_node_value_mangle=True) # Remove per interface MPLS configuration - get a list if changed # nodes under the interface tagNode interfaces_removed = node_changed(conf, base + ['interface']) if interfaces_removed: bgp['interface_removed'] = list(interfaces_removed) # Assign the name of our VRF context. This MUST be done before the return # statement below, else on deletion we will delete the default instance # instead of the VRF instance. if vrf: bgp.update({'vrf' : vrf}) # We can not delete the BGP VRF instance if there is a L3VNI configured # FRR L3VNI must be deleted first otherwise we will see error: # "FRR error: Please unconfigure l3vni 3000" tmp = ['vrf', 'name', vrf, 'vni'] if conf.exists_effective(tmp): bgp.update({'vni' : conf.return_effective_value(tmp)}) # We can safely delete ourself from the dependent vrf list if vrf in bgp['dependent_vrfs']: del bgp['dependent_vrfs'][vrf] bgp['dependent_vrfs'].update({'default': {'protocols': { 'bgp': conf.get_config_dict(base_path, key_mangling=('-', '_'), get_first_key=True, no_tag_node_value_mangle=True)}}}) if not conf.exists(base): # If bgp instance is deleted then mark it bgp.update({'deleted' : ''}) return bgp # We have gathered the dict representation of the CLI, but there are default # options which we need to update into the dictionary retrived. bgp = conf.merge_defaults(bgp, recursive=True) # We also need some additional information from the config, prefix-lists # and route-maps for instance. They will be used in verify(). # # XXX: one MUST always call this without the key_mangling() option! See # vyos.configverify.verify_common_route_maps() for more information. tmp = conf.get_config_dict(['policy']) # Merge policy dict into "regular" config dict bgp = dict_merge(tmp, bgp) return bgp def verify_vrf_as_import(search_vrf_name: str, afi_name: str, vrfs_config: dict) -> bool: """ :param search_vrf_name: search vrf name in import list :type search_vrf_name: str :param afi_name: afi/safi name :type afi_name: str :param vrfs_config: configuration dependents vrfs :type vrfs_config: dict :return: if vrf in import list retrun true else false :rtype: bool """ for vrf_name, vrf_config in vrfs_config.items(): import_list = dict_search( f'protocols.bgp.address_family.{afi_name}.import.vrf', vrf_config) if import_list: if search_vrf_name in import_list: return True return False def verify_vrf_import_options(afi_config: dict) -> bool: """ Search if afi contains one of options :param afi_config: afi/safi :type afi_config: dict :return: if vrf contains rd and route-target options return true else false :rtype: bool """ options = [ f'rd.vpn.export', f'route_target.vpn.import', f'route_target.vpn.export', f'route_target.vpn.both' ] for option in options: if dict_search(option, afi_config): return True return False def verify_vrf_import(vrf_name: str, vrfs_config: dict, afi_name: str) -> bool: """ Verify if vrf exists and contain options :param vrf_name: name of VRF :type vrf_name: str :param vrfs_config: dependent vrfs config :type vrfs_config: dict :param afi_name: afi/safi name :type afi_name: str :return: if vrf contains rd and route-target options return true else false :rtype: bool """ if vrf_name != 'default': verify_vrf({'vrf': vrf_name}) if dict_search(f'{vrf_name}.protocols.bgp.address_family.{afi_name}', vrfs_config): afi_config = \ vrfs_config[vrf_name]['protocols']['bgp']['address_family'][ afi_name] if verify_vrf_import_options(afi_config): return True return False def verify_vrflist_import(afi_name: str, afi_config: dict, vrfs_config: dict) -> bool: """ Call function to verify if scpecific vrf contains rd and route-target options return true else false :param afi_name: afi/safi name :type afi_name: str :param afi_config: afi/safi configuration :type afi_config: dict :param vrfs_config: dependent vrfs config :type vrfs_config:dict :return: if vrf contains rd and route-target options return true else false :rtype: bool """ for vrf_name in afi_config['import']['vrf']: if verify_vrf_import(vrf_name, vrfs_config, afi_name): return True return False def verify_remote_as(peer_config, bgp_config): if 'remote_as' in peer_config: return peer_config['remote_as'] if 'peer_group' in peer_config: peer_group_name = peer_config['peer_group'] tmp = dict_search(f'peer_group.{peer_group_name}.remote_as', bgp_config) if tmp: return tmp if 'interface' in peer_config: if 'remote_as' in peer_config['interface']: return peer_config['interface']['remote_as'] if 'peer_group' in peer_config['interface']: peer_group_name = peer_config['interface']['peer_group'] tmp = dict_search(f'peer_group.{peer_group_name}.remote_as', bgp_config) if tmp: return tmp if 'v6only' in peer_config['interface']: if 'remote_as' in peer_config['interface']['v6only']: return peer_config['interface']['v6only']['remote_as'] if 'peer_group' in peer_config['interface']['v6only']: peer_group_name = peer_config['interface']['v6only']['peer_group'] tmp = dict_search(f'peer_group.{peer_group_name}.remote_as', bgp_config) if tmp: return tmp return None def verify_afi(peer_config, bgp_config): # If address_family configured under neighboor if 'address_family' in peer_config: return True # If address_family configured under peer-group # if neighbor interface configured peer_group_name = None if dict_search('interface.peer_group', peer_config): peer_group_name = peer_config['interface']['peer_group'] elif dict_search('interface.v6only.peer_group', peer_config): peer_group_name = peer_config['interface']['v6only']['peer_group'] # if neighbor IP configured. if 'peer_group' in peer_config: peer_group_name = peer_config['peer_group'] if peer_group_name: tmp = dict_search(f'peer_group.{peer_group_name}.address_family', bgp_config) if tmp: return True return False def verify(bgp): if 'deleted' in bgp: if 'vrf' in bgp: # Cannot delete vrf if it exists in import vrf list in other vrfs for tmp_afi in ['ipv4_unicast', 'ipv6_unicast']: if verify_vrf_as_import(bgp['vrf'], tmp_afi, bgp['dependent_vrfs']): raise ConfigError(f'Cannot delete VRF instance "{bgp["vrf"]}", ' \ 'unconfigure "import vrf" commands!') else: # We are running in the default VRF context, thus we can not delete # our main BGP instance if there are dependent BGP VRF instances. if 'dependent_vrfs' in bgp: for vrf, vrf_options in bgp['dependent_vrfs'].items(): if vrf != 'default': if dict_search('protocols.bgp', vrf_options): raise ConfigError('Cannot delete default BGP instance, ' \ 'dependent VRF instance(s) exist(s)!') if 'vni' in vrf_options: raise ConfigError('Cannot delete default BGP instance, ' \ 'dependent L3VNI exists!') return None if 'system_as' not in bgp: raise ConfigError('BGP system-as number must be defined!') # Verify BMP if 'bmp' in bgp: # check bmp flag "bgpd -d -F traditional --daemon -A 127.0.0.1 -M rpki -M bmp" if not process_named_running('bgpd', 'bmp'): raise ConfigError( f'"bmp" flag is not found in bgpd. Configure "set system frr bmp" and restart bgp process' ) # check bmp target if 'target' in bgp['bmp']: for target, target_config in bgp['bmp']['target'].items(): if 'address' not in target_config: raise ConfigError(f'BMP target "{target}" address must be defined!') # Verify vrf on interface and bgp section if 'interface' in bgp: for interface in bgp['interface']: error_msg = f'Interface "{interface}" belongs to different VRF instance' tmp = get_interface_vrf(interface) if 'vrf' in bgp: if bgp['vrf'] != tmp: vrf = bgp['vrf'] raise ConfigError(f'{error_msg} "{vrf}"!') elif tmp != 'default': raise ConfigError(f'{error_msg} "{tmp}"!') peer_groups_context = dict() # Common verification for both peer-group and neighbor statements for neighbor in ['neighbor', 'peer_group']: # bail out early if there is no neighbor or peer-group statement # this also saves one indention level if neighbor not in bgp: continue for peer, peer_config in bgp[neighbor].items(): # Only regular "neighbor" statement can have a peer-group set # Check if the configure peer-group exists if 'peer_group' in peer_config: peer_group = peer_config['peer_group'] if 'peer_group' not in bgp or peer_group not in bgp['peer_group']: raise ConfigError(f'Specified peer-group "{peer_group}" for '\ f'neighbor "{neighbor}" does not exist!') if 'remote_as' in peer_config: is_ibgp = True if peer_config['remote_as'] != 'internal' and \ peer_config['remote_as'] != bgp['system_as']: is_ibgp = False if peer_group not in peer_groups_context: peer_groups_context[peer_group] = is_ibgp elif peer_groups_context[peer_group] != is_ibgp: raise ConfigError(f'Peer-group members must be ' f'all internal or all external') if 'local_role' in peer_config: #Ensure Local Role has only one value. if len(peer_config['local_role']) > 1: raise ConfigError(f'Only one local role can be specified for peer "{peer}"!') if 'local_as' in peer_config: if len(peer_config['local_as']) > 1: raise ConfigError(f'Only one local-as number can be specified for peer "{peer}"!') # Neighbor local-as override can not be the same as the local-as # we use for this BGP instane! asn = list(peer_config['local_as'].keys())[0] if asn == bgp['system_as']: raise ConfigError('Cannot have local-as same as system-as number') # Neighbor AS specified for local-as and remote-as can not be the same if dict_search('remote_as', peer_config) == asn and neighbor != 'peer_group': raise ConfigError(f'Neighbor "{peer}" has local-as specified which is '\ 'the same as remote-as, this is not allowed!') # ttl-security and ebgp-multihop can't be used in the same configration if 'ebgp_multihop' in peer_config and 'ttl_security' in peer_config: raise ConfigError('You can not set both ebgp-multihop and ttl-security hops') # interface and ebgp-multihop can't be used in the same configration if 'ebgp_multihop' in peer_config and 'interface' in peer_config: raise ConfigError(f'Ebgp-multihop can not be used with directly connected '\ f'neighbor "{peer}"') # Check if neighbor has both override capability and strict capability match # configured at the same time. if 'override_capability' in peer_config and 'strict_capability_match' in peer_config: raise ConfigError(f'Neighbor "{peer}" cannot have both override-capability and '\ 'strict-capability-match configured at the same time!') # Check spaces in the password if 'password' in peer_config and ' ' in peer_config['password']: raise ConfigError('Whitespace is not allowed in passwords!') # Some checks can/must only be done on a neighbor and not a peer-group if neighbor == 'neighbor': # remote-as must be either set explicitly for the neighbor # or for the entire peer-group if not verify_remote_as(peer_config, bgp): raise ConfigError(f'Neighbor "{peer}" remote-as must be set!') if not verify_afi(peer_config, bgp): Warning(f'BGP neighbor "{peer}" requires address-family!') # Peer-group member cannot override remote-as of peer-group if 'peer_group' in peer_config: peer_group = peer_config['peer_group'] if 'remote_as' in peer_config and 'remote_as' in bgp['peer_group'][peer_group]: raise ConfigError(f'Peer-group member "{peer}" cannot override remote-as of peer-group "{peer_group}"!') if 'interface' in peer_config: if 'peer_group' in peer_config['interface']: peer_group = peer_config['interface']['peer_group'] if 'remote_as' in peer_config['interface'] and 'remote_as' in bgp['peer_group'][peer_group]: raise ConfigError(f'Peer-group member "{peer}" cannot override remote-as of peer-group "{peer_group}"!') if 'v6only' in peer_config['interface']: if 'peer_group' in peer_config['interface']['v6only']: peer_group = peer_config['interface']['v6only']['peer_group'] if 'remote_as' in peer_config['interface']['v6only'] and 'remote_as' in bgp['peer_group'][peer_group]: raise ConfigError(f'Peer-group member "{peer}" cannot override remote-as of peer-group "{peer_group}"!') # Only checks for ipv4 and ipv6 neighbors # Check if neighbor address is assigned as system interface address vrf = None vrf_error_msg = f' in default VRF!' if 'vrf' in bgp: vrf = bgp['vrf'] vrf_error_msg = f' in VRF "{vrf}"!' if is_ip(peer) and is_addr_assigned(peer, vrf): raise ConfigError(f'Can not configure local address as neighbor "{peer}"{vrf_error_msg}') elif is_interface(peer): if 'peer_group' in peer_config: raise ConfigError(f'peer-group must be set under the interface node of "{peer}"') if 'remote_as' in peer_config: raise ConfigError(f'remote-as must be set under the interface node of "{peer}"') if 'source_interface' in peer_config['interface']: raise ConfigError(f'"source-interface" option not allowed for neighbor "{peer}"') # Local-AS allowed only for EBGP peers if 'local_as' in peer_config: remote_as = verify_remote_as(peer_config, bgp) if remote_as == bgp['system_as']: raise ConfigError(f'local-as configured for "{peer}", allowed only for eBGP peers!') for afi in ['ipv4_unicast', 'ipv4_multicast', 'ipv4_labeled_unicast', 'ipv4_flowspec', 'ipv6_unicast', 'ipv6_multicast', 'ipv6_labeled_unicast', 'ipv6_flowspec', 'l2vpn_evpn']: # Bail out early if address family is not configured if 'address_family' not in peer_config or afi not in peer_config['address_family']: continue # Check if neighbor has both ipv4 unicast and ipv4 labeled unicast configured at the same time. if 'ipv4_unicast' in peer_config['address_family'] and 'ipv4_labeled_unicast' in peer_config['address_family']: raise ConfigError(f'Neighbor "{peer}" cannot have both ipv4-unicast and ipv4-labeled-unicast configured at the same time!') # Check if neighbor has both ipv6 unicast and ipv6 labeled unicast configured at the same time. if 'ipv6_unicast' in peer_config['address_family'] and 'ipv6_labeled_unicast' in peer_config['address_family']: raise ConfigError(f'Neighbor "{peer}" cannot have both ipv6-unicast and ipv6-labeled-unicast configured at the same time!') afi_config = peer_config['address_family'][afi] if 'conditionally_advertise' in afi_config: if 'advertise_map' not in afi_config['conditionally_advertise']: raise ConfigError('Must speficy advertise-map when conditionally-advertise is in use!') # Verify advertise-map (which is a route-map) exists verify_route_map(afi_config['conditionally_advertise']['advertise_map'], bgp) if ('exist_map' not in afi_config['conditionally_advertise'] and 'non_exist_map' not in afi_config['conditionally_advertise']): raise ConfigError('Must either speficy exist-map or non-exist-map when ' \ 'conditionally-advertise is in use!') if {'exist_map', 'non_exist_map'} <= set(afi_config['conditionally_advertise']): raise ConfigError('Can not specify both exist-map and non-exist-map for ' \ 'conditionally-advertise!') if 'exist_map' in afi_config['conditionally_advertise']: verify_route_map(afi_config['conditionally_advertise']['exist_map'], bgp) if 'non_exist_map' in afi_config['conditionally_advertise']: verify_route_map(afi_config['conditionally_advertise']['non_exist_map'], bgp) # T4332: bgp deterministic-med cannot be disabled while addpath-tx-bestpath-per-AS is in use if 'addpath_tx_per_as' in afi_config: if dict_search('parameters.deterministic_med', bgp) == None: raise ConfigError('addpath-tx-per-as requires BGP deterministic-med paramtere to be set!') # Validate if configured Prefix list exists if 'prefix_list' in afi_config: for tmp in ['import', 'export']: if tmp not in afi_config['prefix_list']: # bail out early continue if afi == 'ipv4_unicast': verify_prefix_list(afi_config['prefix_list'][tmp], bgp) elif afi == 'ipv6_unicast': verify_prefix_list(afi_config['prefix_list'][tmp], bgp, version='6') if 'route_map' in afi_config: for tmp in ['import', 'export']: if tmp in afi_config['route_map']: verify_route_map(afi_config['route_map'][tmp], bgp) if 'route_reflector_client' in afi_config: peer_group_as = peer_config.get('remote_as') if peer_group_as is None or (peer_group_as != 'internal' and peer_group_as != bgp['system_as']): raise ConfigError('route-reflector-client only supported for iBGP peers') else: if 'peer_group' in peer_config: peer_group_as = dict_search(f'peer_group.{peer_group}.remote_as', bgp) if peer_group_as is None or (peer_group_as != 'internal' and peer_group_as != bgp['system_as']): raise ConfigError('route-reflector-client only supported for iBGP peers') # T5833 not all AFIs are supported for VRF if 'vrf' in bgp and 'address_family' in peer_config: unsupported_vrf_afi = { 'ipv4_flowspec', 'ipv6_flowspec', 'ipv4_labeled_unicast', 'ipv6_labeled_unicast', 'ipv4_vpn', 'ipv6_vpn', } for afi in peer_config['address_family']: if afi in unsupported_vrf_afi: raise ConfigError( f"VRF is not allowed for address-family '{afi.replace('_', '-')}'" ) # Throw an error if a peer group is not configured for allow range for prefix in dict_search('listen.range', bgp) or []: # we can not use dict_search() here as prefix contains dots ... if 'peer_group' not in bgp['listen']['range'][prefix]: raise ConfigError(f'Listen range for prefix "{prefix}" has no peer group configured.') peer_group = bgp['listen']['range'][prefix]['peer_group'] if 'peer_group' not in bgp or peer_group not in bgp['peer_group']: raise ConfigError(f'Peer-group "{peer_group}" for listen range "{prefix}" does not exist!') if not verify_remote_as(bgp['listen']['range'][prefix], bgp): raise ConfigError(f'Peer-group "{peer_group}" requires remote-as to be set!') # Throw an error if the global administrative distance parameters aren't all filled out. if dict_search('parameters.distance.global', bgp) != None: for key in ['external', 'internal', 'local']: if dict_search(f'parameters.distance.global.{key}', bgp) == None: raise ConfigError('Missing mandatory configuration option for '\ f'global administrative distance {key}!') # TCP keepalive requires all three parameters to be set if dict_search('parameters.tcp_keepalive', bgp) != None: if not {'idle', 'interval', 'probes'} <= set(bgp['parameters']['tcp_keepalive']): raise ConfigError('TCP keepalive incomplete - idle, keepalive and probes must be set') # Address Family specific validation if 'address_family' in bgp: for afi, afi_config in bgp['address_family'].items(): if 'distance' in afi_config: # Throw an error if the address family specific administrative # distance parameters aren't all filled out. for key in ['external', 'internal', 'local']: if key not in afi_config['distance']: raise ConfigError('Missing mandatory configuration option for '\ f'{afi} administrative distance {key}!') if afi in ['ipv4_unicast', 'ipv6_unicast']: vrf_name = bgp['vrf'] if dict_search('vrf', bgp) else 'default' # Verify if currant VRF contains rd and route-target options # and does not exist in import list in other VRFs if dict_search(f'rd.vpn.export', afi_config): if verify_vrf_as_import(vrf_name, afi, bgp['dependent_vrfs']): raise ConfigError( 'Command "import vrf" conflicts with "rd vpn export" command!') if not dict_search('parameters.router_id', bgp): Warning(f'BGP "router-id" is required when using "rd" and "route-target"!') if dict_search('route_target.vpn.both', afi_config): if verify_vrf_as_import(vrf_name, afi, bgp['dependent_vrfs']): raise ConfigError( 'Command "import vrf" conflicts with "route-target vpn both" command!') if dict_search('route_target.vpn.export', afi_config): raise ConfigError( 'Command "route-target vpn export" conflicts '\ 'with "route-target vpn both" command!') if dict_search('route_target.vpn.import', afi_config): raise ConfigError( 'Command "route-target vpn import" conflicts '\ 'with "route-target vpn both" command!') if dict_search('route_target.vpn.import', afi_config): if verify_vrf_as_import(vrf_name, afi, bgp['dependent_vrfs']): raise ConfigError( 'Command "import vrf conflicts" with "route-target vpn import" command!') if dict_search('route_target.vpn.export', afi_config): if verify_vrf_as_import(vrf_name, afi, bgp['dependent_vrfs']): raise ConfigError( 'Command "import vrf" conflicts with "route-target vpn export" command!') # Verify if VRFs in import do not contain rd # and route-target options if dict_search('import.vrf', afi_config) is not None: # Verify if VRF with import does not contain rd # and route-target options if verify_vrf_import_options(afi_config): raise ConfigError( 'Please unconfigure "import vrf" commands before using vpn commands in the same VRF!') # Verify if VRFs in import list do not contain rd # and route-target options if verify_vrflist_import(afi, afi_config, bgp['dependent_vrfs']): raise ConfigError( 'Please unconfigure import vrf commands before using vpn commands in dependent VRFs!') # FRR error: please unconfigure vpn to vrf commands before # using import vrf commands if 'vpn' in afi_config['import'] or dict_search('export.vpn', afi_config) != None: raise ConfigError('Please unconfigure VPN to VRF commands before '\ 'using "import vrf" commands!') # Verify that the export/import route-maps do exist for export_import in ['export', 'import']: tmp = dict_search(f'route_map.vpn.{export_import}', afi_config) if tmp: verify_route_map(tmp, bgp) # per-vrf sid and per-af sid are mutually exclusive if 'sid' in afi_config and 'sid' in bgp: raise ConfigError('SID per VRF and SID per address-family are mutually exclusive!') # Checks only required for L2VPN EVPN if afi in ['l2vpn_evpn']: if 'vni' in afi_config: for vni, vni_config in afi_config['vni'].items(): if 'rd' in vni_config and 'advertise_all_vni' not in afi_config: raise ConfigError('BGP EVPN "rd" requires "advertise-all-vni" to be set!') if 'route_target' in vni_config and 'advertise_all_vni' not in afi_config: raise ConfigError('BGP EVPN "route-target" requires "advertise-all-vni" to be set!') return None def generate(bgp): if not bgp or 'deleted' in bgp: return None bgp['frr_bgpd_config'] = render_to_string('frr/bgpd.frr.j2', bgp) return None def apply(bgp): if 'deleted' in bgp: # We need to ensure that the L3VNI is deleted first. # This is not possible with old config backend # priority bug if {'vrf', 'vni'} <= set(bgp): call('vtysh -c "conf t" -c "vrf {vrf}" -c "no vni {vni}"'.format(**bgp)) - bgp_daemon = 'bgpd' - # Save original configuration prior to starting any commit actions frr_cfg = frr.FRRConfig() # Generate empty helper string which can be ammended to FRR commands, it # will be either empty (default VRF) or contain the "vrf <name" statement vrf = '' if 'vrf' in bgp: vrf = ' vrf ' + bgp['vrf'] - frr_cfg.load_configuration(bgp_daemon) + frr_cfg.load_configuration(frr.bgp_daemon) # Remove interface specific config for key in ['interface', 'interface_removed']: if key not in bgp: continue for interface in bgp[key]: frr_cfg.modify_section(f'^interface {interface}', stop_pattern='^exit', remove_stop_mark=True) frr_cfg.modify_section(f'^router bgp \d+{vrf}', stop_pattern='^exit', remove_stop_mark=True) if 'frr_bgpd_config' in bgp: frr_cfg.add_before(frr.default_add_before, bgp['frr_bgpd_config']) - frr_cfg.commit_configuration(bgp_daemon) + frr_cfg.commit_configuration(frr.bgp_daemon) return None if __name__ == '__main__': try: c = get_config() verify(c) generate(c) apply(c) except ConfigError as e: print(e) exit(1) diff --git a/src/conf_mode/protocols_eigrp.py b/src/conf_mode/protocols_eigrp.py index c13e52a3d..a948b47da 100755 --- a/src/conf_mode/protocols_eigrp.py +++ b/src/conf_mode/protocols_eigrp.py @@ -1,119 +1,117 @@ #!/usr/bin/env python3 # -# Copyright (C) 2022 VyOS maintainers and contributors +# Copyright (C) 2022-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from sys import exit from sys import argv from vyos.config import Config from vyos.configdict import dict_merge from vyos.configverify import verify_vrf from vyos.template import render_to_string from vyos import ConfigError from vyos import frr from vyos import airbag airbag.enable() def get_config(config=None): if config: conf = config else: conf = Config() vrf = None if len(argv) > 1: vrf = argv[1] base_path = ['protocols', 'eigrp'] # eqivalent of the C foo ? 'a' : 'b' statement base = vrf and ['vrf', 'name', vrf, 'protocols', 'eigrp'] or base_path eigrp = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True, no_tag_node_value_mangle=True) # Assign the name of our VRF context. This MUST be done before the return # statement below, else on deletion we will delete the default instance # instead of the VRF instance. if vrf: eigrp.update({'vrf' : vrf}) if not conf.exists(base): eigrp.update({'deleted' : ''}) if not vrf: # We are running in the default VRF context, thus we can not delete # our main EIGRP instance if there are dependent EIGRP VRF instances. eigrp['dependent_vrfs'] = conf.get_config_dict(['vrf', 'name'], key_mangling=('-', '_'), get_first_key=True, no_tag_node_value_mangle=True) return eigrp # We also need some additional information from the config, prefix-lists # and route-maps for instance. They will be used in verify(). # # XXX: one MUST always call this without the key_mangling() option! See # vyos.configverify.verify_common_route_maps() for more information. tmp = conf.get_config_dict(['policy']) # Merge policy dict into "regular" config dict eigrp = dict_merge(tmp, eigrp) return eigrp def verify(eigrp): if not eigrp or 'deleted' in eigrp: return if 'system_as' not in eigrp: raise ConfigError('EIGRP system-as must be defined!') if 'vrf' in eigrp: verify_vrf(eigrp) def generate(eigrp): if not eigrp or 'deleted' in eigrp: return None eigrp['frr_eigrpd_config'] = render_to_string('frr/eigrpd.frr.j2', eigrp) def apply(eigrp): - eigrp_daemon = 'eigrpd' - # Save original configuration prior to starting any commit actions frr_cfg = frr.FRRConfig() # Generate empty helper string which can be ammended to FRR commands, it # will be either empty (default VRF) or contain the "vrf <name" statement vrf = '' if 'vrf' in eigrp: vrf = ' vrf ' + eigrp['vrf'] - frr_cfg.load_configuration(eigrp_daemon) + frr_cfg.load_configuration(frr.eigrp_daemon) frr_cfg.modify_section(f'^router eigrp \d+{vrf}', stop_pattern='^exit', remove_stop_mark=True) if 'frr_eigrpd_config' in eigrp: frr_cfg.add_before(frr.default_add_before, eigrp['frr_eigrpd_config']) - frr_cfg.commit_configuration(eigrp_daemon) + frr_cfg.commit_configuration(frr.eigrp_daemon) return None if __name__ == '__main__': try: c = get_config() verify(c) generate(c) apply(c) except ConfigError as e: print(e) exit(1) diff --git a/src/conf_mode/protocols_isis.py b/src/conf_mode/protocols_isis.py index ba2f3cf0d..9b70c7329 100755 --- a/src/conf_mode/protocols_isis.py +++ b/src/conf_mode/protocols_isis.py @@ -1,312 +1,310 @@ #!/usr/bin/env python3 # # Copyright (C) 2020-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from sys import exit from sys import argv from vyos.config import Config from vyos.configdict import dict_merge from vyos.configdict import node_changed from vyos.configverify import verify_common_route_maps from vyos.configverify import verify_interface_exists from vyos.ifconfig import Interface from vyos.utils.dict import dict_search from vyos.utils.network import get_interface_config from vyos.template import render_to_string from vyos import ConfigError from vyos import frr from vyos import airbag airbag.enable() def get_config(config=None): if config: conf = config else: conf = Config() vrf = None if len(argv) > 1: vrf = argv[1] base_path = ['protocols', 'isis'] # eqivalent of the C foo ? 'a' : 'b' statement base = vrf and ['vrf', 'name', vrf, 'protocols', 'isis'] or base_path isis = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True, no_tag_node_value_mangle=True) # Assign the name of our VRF context. This MUST be done before the return # statement below, else on deletion we will delete the default instance # instead of the VRF instance. if vrf: isis['vrf'] = vrf # FRR has VRF support for different routing daemons. As interfaces belong # to VRFs - or the global VRF, we need to check for changed interfaces so # that they will be properly rendered for the FRR config. Also this eases # removal of interfaces from the running configuration. interfaces_removed = node_changed(conf, base + ['interface']) if interfaces_removed: isis['interface_removed'] = list(interfaces_removed) # Bail out early if configuration tree does no longer exist. this must # be done after retrieving the list of interfaces to be removed. if not conf.exists(base): isis.update({'deleted' : ''}) return isis # merge in default values isis = conf.merge_defaults(isis, recursive=True) # We also need some additional information from the config, prefix-lists # and route-maps for instance. They will be used in verify(). # # XXX: one MUST always call this without the key_mangling() option! See # vyos.configverify.verify_common_route_maps() for more information. tmp = conf.get_config_dict(['policy']) # Merge policy dict into "regular" config dict isis = dict_merge(tmp, isis) return isis def verify(isis): # bail out early - looks like removal from running config if not isis or 'deleted' in isis: return None if 'net' not in isis: raise ConfigError('Network entity is mandatory!') # last byte in IS-IS area address must be 0 tmp = isis['net'].split('.') if int(tmp[-1]) != 0: raise ConfigError('Last byte of IS-IS network entity title must always be 0!') verify_common_route_maps(isis) # If interface not set if 'interface' not in isis: raise ConfigError('Interface used for routing updates is mandatory!') for interface in isis['interface']: verify_interface_exists(isis, interface) # Interface MTU must be >= configured lsp-mtu mtu = Interface(interface).get_mtu() area_mtu = isis['lsp_mtu'] # Recommended maximum PDU size = interface MTU - 3 bytes recom_area_mtu = mtu - 3 if mtu < int(area_mtu) or int(area_mtu) > recom_area_mtu: raise ConfigError(f'Interface {interface} has MTU {mtu}, ' \ f'current area MTU is {area_mtu}! \n' \ f'Recommended area lsp-mtu {recom_area_mtu} or less ' \ '(calculated on MTU size).') if 'vrf' in isis: # If interface specific options are set, we must ensure that the # interface is bound to our requesting VRF. Due to the VyOS # priorities the interface is bound to the VRF after creation of # the VRF itself, and before any routing protocol is configured. vrf = isis['vrf'] tmp = get_interface_config(interface) if 'master' not in tmp or tmp['master'] != vrf: raise ConfigError(f'Interface "{interface}" is not a member of VRF "{vrf}"!') # If md5 and plaintext-password set at the same time for password in ['area_password', 'domain_password']: if password in isis: if {'md5', 'plaintext_password'} <= set(isis[password]): tmp = password.replace('_', '-') raise ConfigError(f'Can use either md5 or plaintext-password for {tmp}!') # If one param from delay set, but not set others if 'spf_delay_ietf' in isis: required_timers = ['holddown', 'init_delay', 'long_delay', 'short_delay', 'time_to_learn'] exist_timers = [] for elm_timer in required_timers: if elm_timer in isis['spf_delay_ietf']: exist_timers.append(elm_timer) exist_timers = set(required_timers).difference(set(exist_timers)) if len(exist_timers) > 0: raise ConfigError('All types of spf-delay must be configured. Missing: ' + ', '.join(exist_timers).replace('_', '-')) # If Redistribute set, but level don't set if 'redistribute' in isis: proc_level = isis.get('level','').replace('-','_') for afi in ['ipv4', 'ipv6']: if afi not in isis['redistribute']: continue for proto, proto_config in isis['redistribute'][afi].items(): if 'level_1' not in proto_config and 'level_2' not in proto_config: raise ConfigError(f'Redistribute level-1 or level-2 should be specified in ' \ f'"protocols isis redistribute {afi} {proto}"!') for redistr_level, redistr_config in proto_config.items(): if proc_level and proc_level != 'level_1_2' and proc_level != redistr_level: raise ConfigError(f'"protocols isis redistribute {afi} {proto} {redistr_level}" ' \ f'can not be used with \"protocols isis level {proc_level}\"!') # Segment routing checks if dict_search('segment_routing.global_block', isis): g_high_label_value = dict_search('segment_routing.global_block.high_label_value', isis) g_low_label_value = dict_search('segment_routing.global_block.low_label_value', isis) # If segment routing global block high or low value is blank, throw error if not (g_low_label_value or g_high_label_value): raise ConfigError('Segment routing global-block requires both low and high value!') # If segment routing global block low value is higher than the high value, throw error if int(g_low_label_value) > int(g_high_label_value): raise ConfigError('Segment routing global-block low value must be lower than high value') if dict_search('segment_routing.local_block', isis): if dict_search('segment_routing.global_block', isis) == None: raise ConfigError('Segment routing local-block requires global-block to be configured!') l_high_label_value = dict_search('segment_routing.local_block.high_label_value', isis) l_low_label_value = dict_search('segment_routing.local_block.low_label_value', isis) # If segment routing local-block high or low value is blank, throw error if not (l_low_label_value or l_high_label_value): raise ConfigError('Segment routing local-block requires both high and low value!') # If segment routing local-block low value is higher than the high value, throw error if int(l_low_label_value) > int(l_high_label_value): raise ConfigError('Segment routing local-block low value must be lower than high value') # local-block most live outside global block global_range = range(int(g_low_label_value), int(g_high_label_value) +1) local_range = range(int(l_low_label_value), int(l_high_label_value) +1) # Check for overlapping ranges if list(set(global_range) & set(local_range)): raise ConfigError(f'Segment-Routing Global Block ({g_low_label_value}/{g_high_label_value}) '\ f'conflicts with Local Block ({l_low_label_value}/{l_high_label_value})!') # Check for a blank or invalid value per prefix if dict_search('segment_routing.prefix', isis): for prefix, prefix_config in isis['segment_routing']['prefix'].items(): if 'absolute' in prefix_config: if prefix_config['absolute'].get('value') is None: raise ConfigError(f'Segment routing prefix {prefix} absolute value cannot be blank.') elif 'index' in prefix_config: if prefix_config['index'].get('value') is None: raise ConfigError(f'Segment routing prefix {prefix} index value cannot be blank.') # Check for explicit-null and no-php-flag configured at the same time per prefix if dict_search('segment_routing.prefix', isis): for prefix, prefix_config in isis['segment_routing']['prefix'].items(): if 'absolute' in prefix_config: if ("explicit_null" in prefix_config['absolute']) and ("no_php_flag" in prefix_config['absolute']): raise ConfigError(f'Segment routing prefix {prefix} cannot have both explicit-null '\ f'and no-php-flag configured at the same time.') elif 'index' in prefix_config: if ("explicit_null" in prefix_config['index']) and ("no_php_flag" in prefix_config['index']): raise ConfigError(f'Segment routing prefix {prefix} cannot have both explicit-null '\ f'and no-php-flag configured at the same time.') # Check for index ranges being larger than the segment routing global block if dict_search('segment_routing.global_block', isis): g_high_label_value = dict_search('segment_routing.global_block.high_label_value', isis) g_low_label_value = dict_search('segment_routing.global_block.low_label_value', isis) g_label_difference = int(g_high_label_value) - int(g_low_label_value) if dict_search('segment_routing.prefix', isis): for prefix, prefix_config in isis['segment_routing']['prefix'].items(): if 'index' in prefix_config: index_size = isis['segment_routing']['prefix'][prefix]['index']['value'] if int(index_size) > int(g_label_difference): raise ConfigError(f'Segment routing prefix {prefix} cannot have an '\ f'index base size larger than the SRGB label base.') # Check for LFA tiebreaker index duplication if dict_search('fast_reroute.lfa.local.tiebreaker', isis): comparison_dictionary = {} for item, item_options in isis['fast_reroute']['lfa']['local']['tiebreaker'].items(): for index, index_options in item_options.items(): for index_value, index_value_options in index_options.items(): if index_value not in comparison_dictionary.keys(): comparison_dictionary[index_value] = [item] else: comparison_dictionary[index_value].append(item) for index, index_length in comparison_dictionary.items(): if int(len(index_length)) > 1: raise ConfigError(f'LFA index {index} cannot have more than one tiebreaker configured.') # Check for LFA priority-limit configured multiple times per level if dict_search('fast_reroute.lfa.local.priority_limit', isis): comparison_dictionary = {} for priority, priority_options in isis['fast_reroute']['lfa']['local']['priority_limit'].items(): for level, level_options in priority_options.items(): if level not in comparison_dictionary.keys(): comparison_dictionary[level] = [priority] else: comparison_dictionary[level].append(priority) for level, level_length in comparison_dictionary.items(): if int(len(level_length)) > 1: raise ConfigError(f'LFA priority-limit on {level.replace("_", "-")} cannot have more than one priority configured.') # Check for LFA remote prefix list configured with more than one list if dict_search('fast_reroute.lfa.remote.prefix_list', isis): if int(len(isis['fast_reroute']['lfa']['remote']['prefix_list'].items())) > 1: raise ConfigError(f'LFA remote prefix-list has more than one configured. Cannot have more than one configured.') return None def generate(isis): if not isis or 'deleted' in isis: return None isis['frr_isisd_config'] = render_to_string('frr/isisd.frr.j2', isis) return None def apply(isis): - isis_daemon = 'isisd' - # Save original configuration prior to starting any commit actions frr_cfg = frr.FRRConfig() # Generate empty helper string which can be ammended to FRR commands, it # will be either empty (default VRF) or contain the "vrf <name" statement vrf = '' if 'vrf' in isis: vrf = ' vrf ' + isis['vrf'] - frr_cfg.load_configuration(isis_daemon) + frr_cfg.load_configuration(frr.isis_daemon) frr_cfg.modify_section(f'^router isis VyOS{vrf}', stop_pattern='^exit', remove_stop_mark=True) for key in ['interface', 'interface_removed']: if key not in isis: continue for interface in isis[key]: frr_cfg.modify_section(f'^interface {interface}', stop_pattern='^exit', remove_stop_mark=True) if 'frr_isisd_config' in isis: frr_cfg.add_before(frr.default_add_before, isis['frr_isisd_config']) - frr_cfg.commit_configuration(isis_daemon) + frr_cfg.commit_configuration(frr.isis_daemon) return None if __name__ == '__main__': try: c = get_config() verify(c) generate(c) apply(c) except ConfigError as e: print(e) exit(1) diff --git a/src/conf_mode/protocols_mpls.py b/src/conf_mode/protocols_mpls.py index ad164db9f..4a05b8044 100755 --- a/src/conf_mode/protocols_mpls.py +++ b/src/conf_mode/protocols_mpls.py @@ -1,148 +1,146 @@ #!/usr/bin/env python3 # # Copyright (C) 2020-2022 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import os from sys import exit from glob import glob from vyos.config import Config from vyos.template import render_to_string from vyos.utils.dict import dict_search from vyos.utils.file import read_file from vyos.utils.system import sysctl_write from vyos.configverify import verify_interface_exists from vyos import ConfigError from vyos import frr from vyos import airbag airbag.enable() config_file = r'/tmp/ldpd.frr' def get_config(config=None): if config: conf = config else: conf = Config() base = ['protocols', 'mpls'] mpls = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True) return mpls def verify(mpls): # If no config, then just bail out early. if not mpls: return None if 'interface' in mpls: for interface in mpls['interface']: verify_interface_exists(mpls, interface) # Checks to see if LDP is properly configured if 'ldp' in mpls: # If router ID not defined if 'router_id' not in mpls['ldp']: raise ConfigError('Router ID missing. An LDP router id is mandatory!') # If interface not set if 'interface' not in mpls['ldp']: raise ConfigError('LDP interfaces are missing. An LDP interface is mandatory!') # If transport addresses are not set if not dict_search('ldp.discovery.transport_ipv4_address', mpls) and \ not dict_search('ldp.discovery.transport_ipv6_address', mpls): raise ConfigError('LDP transport address missing!') return None def generate(mpls): # If there's no MPLS config generated, create dictionary key with no value. if not mpls or 'deleted' in mpls: return None mpls['frr_ldpd_config'] = render_to_string('frr/ldpd.frr.j2', mpls) return None def apply(mpls): - ldpd_damon = 'ldpd' - # Save original configuration prior to starting any commit actions frr_cfg = frr.FRRConfig() - frr_cfg.load_configuration(ldpd_damon) + frr_cfg.load_configuration(frr.ldpd_daemon) frr_cfg.modify_section(f'^mpls ldp', stop_pattern='^exit', remove_stop_mark=True) if 'frr_ldpd_config' in mpls: frr_cfg.add_before(frr.default_add_before, mpls['frr_ldpd_config']) - frr_cfg.commit_configuration(ldpd_damon) + frr_cfg.commit_configuration(frr.ldpd_daemon) # Set number of entries in the platform label tables labels = '0' if 'interface' in mpls: labels = '1048575' sysctl_write('net.mpls.platform_labels', labels) # Check for changes in global MPLS options if 'parameters' in mpls: # Choose whether to copy IP TTL to MPLS header TTL if 'no_propagate_ttl' in mpls['parameters']: sysctl_write('net.mpls.ip_ttl_propagate', 0) # Choose whether to limit maximum MPLS header TTL if 'maximum_ttl' in mpls['parameters']: ttl = mpls['parameters']['maximum_ttl'] sysctl_write('net.mpls.default_ttl', ttl) else: # Set default global MPLS options if not defined. sysctl_write('net.mpls.ip_ttl_propagate', 1) sysctl_write('net.mpls.default_ttl', 255) # Enable and disable MPLS processing on interfaces per configuration if 'interface' in mpls: system_interfaces = [] # Populate system interfaces list with local MPLS capable interfaces for interface in glob('/proc/sys/net/mpls/conf/*'): system_interfaces.append(os.path.basename(interface)) # This is where the comparison is done on if an interface needs to be enabled/disabled. for system_interface in system_interfaces: interface_state = read_file(f'/proc/sys/net/mpls/conf/{system_interface}/input') if '1' in interface_state: if system_interface not in mpls['interface']: system_interface = system_interface.replace('.', '/') sysctl_write(f'net.mpls.conf.{system_interface}.input', 0) elif '0' in interface_state: if system_interface in mpls['interface']: system_interface = system_interface.replace('.', '/') sysctl_write(f'net.mpls.conf.{system_interface}.input', 1) else: system_interfaces = [] # If MPLS interfaces are not configured, set MPLS processing disabled for interface in glob('/proc/sys/net/mpls/conf/*'): system_interfaces.append(os.path.basename(interface)) for system_interface in system_interfaces: system_interface = system_interface.replace('.', '/') sysctl_write(f'net.mpls.conf.{system_interface}.input', 0) return None if __name__ == '__main__': try: c = get_config() verify(c) generate(c) apply(c) except ConfigError as e: print(e) exit(1) diff --git a/src/conf_mode/protocols_openfabric.py b/src/conf_mode/protocols_openfabric.py index 8e8c50c06..b60117a02 100644 --- a/src/conf_mode/protocols_openfabric.py +++ b/src/conf_mode/protocols_openfabric.py @@ -1,145 +1,143 @@ #!/usr/bin/env python3 # # Copyright (C) 2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from sys import exit from vyos.base import Warning from vyos.config import Config from vyos.configdict import node_changed from vyos.configverify import verify_interface_exists from vyos.template import render_to_string from vyos import ConfigError from vyos import frr from vyos import airbag airbag.enable() def get_config(config=None): if config: conf = config else: conf = Config() base_path = ['protocols', 'openfabric'] openfabric = conf.get_config_dict(base_path, key_mangling=('-', '_'), get_first_key=True, no_tag_node_value_mangle=True) # Remove per domain MPLS configuration - get a list of all changed Openfabric domains # (removed and added) so that they will be properly rendered for the FRR config. openfabric['domains_all'] = list(conf.list_nodes(' '.join(base_path) + f' domain') + node_changed(conf, base_path + ['domain'])) # Get a list of all interfaces openfabric['interfaces_all'] = [] for domain in openfabric['domains_all']: interfaces_modified = list(node_changed(conf, base_path + ['domain', domain, 'interface']) + conf.list_nodes(' '.join(base_path) + f' domain {domain} interface')) openfabric['interfaces_all'].extend(interfaces_modified) if not conf.exists(base_path): openfabric.update({'deleted': ''}) return openfabric def verify(openfabric): # bail out early - looks like removal from running config if not openfabric or 'deleted' in openfabric: return None if 'net' not in openfabric: raise ConfigError('Network entity is mandatory!') # last byte in OpenFabric area address must be 0 tmp = openfabric['net'].split('.') if int(tmp[-1]) != 0: raise ConfigError('Last byte of OpenFabric network entity title must always be 0!') if 'domain' not in openfabric: raise ConfigError('OpenFabric domain name is mandatory!') interfaces_used = [] for domain, domain_config in openfabric['domain'].items(): # If interface not set if 'interface' not in domain_config: raise ConfigError(f'Interface used for routing updates in OpenFabric "{domain}" is mandatory!') for iface, iface_config in domain_config['interface'].items(): verify_interface_exists(openfabric, iface) # interface can be activated only on one OpenFabric instance if iface in interfaces_used: raise ConfigError(f'Interface {iface} is already used in different OpenFabric instance!') if 'address_family' not in iface_config or len(iface_config['address_family']) < 1: raise ConfigError(f'Need to specify address family for the interface "{iface}"!') # If md5 and plaintext-password set at the same time if 'password' in iface_config: if {'md5', 'plaintext_password'} <= set(iface_config['password']): raise ConfigError(f'Can use either md5 or plaintext-password for password for the interface!') if iface == 'lo' and 'passive' not in iface_config: Warning('For loopback interface passive mode is implied!') interfaces_used.append(iface) # If md5 and plaintext-password set at the same time password = 'domain_password' if password in domain_config: if {'md5', 'plaintext_password'} <= set(domain_config[password]): raise ConfigError(f'Can use either md5 or plaintext-password for domain-password!') return None def generate(openfabric): if not openfabric or 'deleted' in openfabric: return None openfabric['frr_fabricd_config'] = render_to_string('frr/fabricd.frr.j2', openfabric) return None def apply(openfabric): - openfabric_daemon = 'fabricd' - # Save original configuration prior to starting any commit actions frr_cfg = frr.FRRConfig() - frr_cfg.load_configuration(openfabric_daemon) + frr_cfg.load_configuration(frr.openfabric_daemon) for domain in openfabric['domains_all']: frr_cfg.modify_section(f'^router openfabric {domain}', stop_pattern='^exit', remove_stop_mark=True) for interface in openfabric['interfaces_all']: frr_cfg.modify_section(f'^interface {interface}', stop_pattern='^exit', remove_stop_mark=True) if 'frr_fabricd_config' in openfabric: frr_cfg.add_before(frr.default_add_before, openfabric['frr_fabricd_config']) - frr_cfg.commit_configuration(openfabric_daemon) + frr_cfg.commit_configuration(frr.openfabric_daemon) return None if __name__ == '__main__': try: c = get_config() verify(c) generate(c) apply(c) except ConfigError as e: print(e) exit(1) diff --git a/src/conf_mode/protocols_ospf.py b/src/conf_mode/protocols_ospf.py index 7347c4faa..44817b9b7 100755 --- a/src/conf_mode/protocols_ospf.py +++ b/src/conf_mode/protocols_ospf.py @@ -1,290 +1,288 @@ #!/usr/bin/env python3 # # Copyright (C) 2021-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from sys import exit from sys import argv from vyos.config import Config from vyos.config import config_dict_merge from vyos.configdict import dict_merge from vyos.configdict import node_changed from vyos.configverify import verify_common_route_maps from vyos.configverify import verify_route_map from vyos.configverify import verify_interface_exists from vyos.configverify import verify_access_list from vyos.template import render_to_string from vyos.utils.dict import dict_search from vyos.utils.network import get_interface_config from vyos import ConfigError from vyos import frr from vyos import airbag airbag.enable() def get_config(config=None): if config: conf = config else: conf = Config() vrf = None if len(argv) > 1: vrf = argv[1] base_path = ['protocols', 'ospf'] # eqivalent of the C foo ? 'a' : 'b' statement base = vrf and ['vrf', 'name', vrf, 'protocols', 'ospf'] or base_path ospf = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True) # Assign the name of our VRF context. This MUST be done before the return # statement below, else on deletion we will delete the default instance # instead of the VRF instance. if vrf: ospf['vrf'] = vrf # FRR has VRF support for different routing daemons. As interfaces belong # to VRFs - or the global VRF, we need to check for changed interfaces so # that they will be properly rendered for the FRR config. Also this eases # removal of interfaces from the running configuration. interfaces_removed = node_changed(conf, base + ['interface']) if interfaces_removed: ospf['interface_removed'] = list(interfaces_removed) # Bail out early if configuration tree does no longer exist. this must # be done after retrieving the list of interfaces to be removed. if not conf.exists(base): ospf.update({'deleted' : ''}) return ospf # We have gathered the dict representation of the CLI, but there are default # options which we need to update into the dictionary retrived. default_values = conf.get_config_defaults(**ospf.kwargs, recursive=True) # We have to cleanup the default dict, as default values could enable features # which are not explicitly enabled on the CLI. Example: default-information # originate comes with a default metric-type of 2, which will enable the # entire default-information originate tree, even when not set via CLI so we # need to check this first and probably drop that key. if dict_search('default_information.originate', ospf) is None: del default_values['default_information'] if 'mpls_te' not in ospf: del default_values['mpls_te'] if 'graceful_restart' not in ospf: del default_values['graceful_restart'] for area_num in default_values.get('area', []): if dict_search(f'area.{area_num}.area_type.nssa', ospf) is None: del default_values['area'][area_num]['area_type']['nssa'] for protocol in ['babel', 'bgp', 'connected', 'isis', 'kernel', 'rip', 'static']: if dict_search(f'redistribute.{protocol}', ospf) is None: del default_values['redistribute'][protocol] if not bool(default_values['redistribute']): del default_values['redistribute'] for interface in ospf.get('interface', []): # We need to reload the defaults on every pass b/c of # hello-multiplier dependency on dead-interval # If hello-multiplier is set, we need to remove the default from # dead-interval. if 'hello_multiplier' in ospf['interface'][interface]: del default_values['interface'][interface]['dead_interval'] ospf = config_dict_merge(default_values, ospf) # We also need some additional information from the config, prefix-lists # and route-maps for instance. They will be used in verify(). # # XXX: one MUST always call this without the key_mangling() option! See # vyos.configverify.verify_common_route_maps() for more information. tmp = conf.get_config_dict(['policy']) # Merge policy dict into "regular" config dict ospf = dict_merge(tmp, ospf) return ospf def verify(ospf): if not ospf: return None verify_common_route_maps(ospf) # As we can have a default-information route-map, we need to validate it! route_map_name = dict_search('default_information.originate.route_map', ospf) if route_map_name: verify_route_map(route_map_name, ospf) # Validate if configured Access-list exists if 'area' in ospf: networks = [] for area, area_config in ospf['area'].items(): if 'import_list' in area_config: acl_import = area_config['import_list'] if acl_import: verify_access_list(acl_import, ospf) if 'export_list' in area_config: acl_export = area_config['export_list'] if acl_export: verify_access_list(acl_export, ospf) if 'network' in area_config: for network in area_config['network']: if network in networks: raise ConfigError(f'Network "{network}" already defined in different area!') networks.append(network) if 'interface' in ospf: for interface, interface_config in ospf['interface'].items(): verify_interface_exists(ospf, interface) # One can not use dead-interval and hello-multiplier at the same # time. FRR will only activate the last option set via CLI. if {'hello_multiplier', 'dead_interval'} <= set(interface_config): raise ConfigError(f'Can not use hello-multiplier and dead-interval ' \ f'concurrently for {interface}!') # One can not use the "network <prefix> area <id>" command and an # per interface area assignment at the same time. FRR will error # out using: "Please remove all network commands first." if 'area' in ospf and 'area' in interface_config: for area, area_config in ospf['area'].items(): if 'network' in area_config: raise ConfigError('Can not use OSPF interface area and area ' \ 'network configuration at the same time!') # If interface specific options are set, we must ensure that the # interface is bound to our requesting VRF. Due to the VyOS # priorities the interface is bound to the VRF after creation of # the VRF itself, and before any routing protocol is configured. if 'vrf' in ospf: vrf = ospf['vrf'] tmp = get_interface_config(interface) if 'master' not in tmp or tmp['master'] != vrf: raise ConfigError(f'Interface "{interface}" is not a member of VRF "{vrf}"!') # Segment routing checks if dict_search('segment_routing.global_block', ospf): g_high_label_value = dict_search('segment_routing.global_block.high_label_value', ospf) g_low_label_value = dict_search('segment_routing.global_block.low_label_value', ospf) # If segment routing global block high or low value is blank, throw error if not (g_low_label_value or g_high_label_value): raise ConfigError('Segment routing global-block requires both low and high value!') # If segment routing global block low value is higher than the high value, throw error if int(g_low_label_value) > int(g_high_label_value): raise ConfigError('Segment routing global-block low value must be lower than high value') if dict_search('segment_routing.local_block', ospf): if dict_search('segment_routing.global_block', ospf) == None: raise ConfigError('Segment routing local-block requires global-block to be configured!') l_high_label_value = dict_search('segment_routing.local_block.high_label_value', ospf) l_low_label_value = dict_search('segment_routing.local_block.low_label_value', ospf) # If segment routing local-block high or low value is blank, throw error if not (l_low_label_value or l_high_label_value): raise ConfigError('Segment routing local-block requires both high and low value!') # If segment routing local-block low value is higher than the high value, throw error if int(l_low_label_value) > int(l_high_label_value): raise ConfigError('Segment routing local-block low value must be lower than high value') # local-block most live outside global block global_range = range(int(g_low_label_value), int(g_high_label_value) +1) local_range = range(int(l_low_label_value), int(l_high_label_value) +1) # Check for overlapping ranges if list(set(global_range) & set(local_range)): raise ConfigError(f'Segment-Routing Global Block ({g_low_label_value}/{g_high_label_value}) '\ f'conflicts with Local Block ({l_low_label_value}/{l_high_label_value})!') # Check for a blank or invalid value per prefix if dict_search('segment_routing.prefix', ospf): for prefix, prefix_config in ospf['segment_routing']['prefix'].items(): if 'index' in prefix_config: if prefix_config['index'].get('value') is None: raise ConfigError(f'Segment routing prefix {prefix} index value cannot be blank.') # Check for explicit-null and no-php-flag configured at the same time per prefix if dict_search('segment_routing.prefix', ospf): for prefix, prefix_config in ospf['segment_routing']['prefix'].items(): if 'index' in prefix_config: if ("explicit_null" in prefix_config['index']) and ("no_php_flag" in prefix_config['index']): raise ConfigError(f'Segment routing prefix {prefix} cannot have both explicit-null '\ f'and no-php-flag configured at the same time.') # Check for index ranges being larger than the segment routing global block if dict_search('segment_routing.global_block', ospf): g_high_label_value = dict_search('segment_routing.global_block.high_label_value', ospf) g_low_label_value = dict_search('segment_routing.global_block.low_label_value', ospf) g_label_difference = int(g_high_label_value) - int(g_low_label_value) if dict_search('segment_routing.prefix', ospf): for prefix, prefix_config in ospf['segment_routing']['prefix'].items(): if 'index' in prefix_config: index_size = ospf['segment_routing']['prefix'][prefix]['index']['value'] if int(index_size) > int(g_label_difference): raise ConfigError(f'Segment routing prefix {prefix} cannot have an '\ f'index base size larger than the SRGB label base.') # Check route summarisation if 'summary_address' in ospf: for prefix, prefix_options in ospf['summary_address'].items(): if {'tag', 'no_advertise'} <= set(prefix_options): raise ConfigError(f'Can not set both "tag" and "no-advertise" for Type-5 '\ f'and Type-7 route summarisation of "{prefix}"!') return None def generate(ospf): if not ospf or 'deleted' in ospf: return None ospf['frr_ospfd_config'] = render_to_string('frr/ospfd.frr.j2', ospf) return None def apply(ospf): - ospf_daemon = 'ospfd' - # Save original configuration prior to starting any commit actions frr_cfg = frr.FRRConfig() # Generate empty helper string which can be ammended to FRR commands, it # will be either empty (default VRF) or contain the "vrf <name" statement vrf = '' if 'vrf' in ospf: vrf = ' vrf ' + ospf['vrf'] - frr_cfg.load_configuration(ospf_daemon) + frr_cfg.load_configuration(frr.ospf_daemon) frr_cfg.modify_section(f'^router ospf{vrf}', stop_pattern='^exit', remove_stop_mark=True) for key in ['interface', 'interface_removed']: if key not in ospf: continue for interface in ospf[key]: frr_cfg.modify_section(f'^interface {interface}', stop_pattern='^exit', remove_stop_mark=True) if 'frr_ospfd_config' in ospf: frr_cfg.add_before(frr.default_add_before, ospf['frr_ospfd_config']) - frr_cfg.commit_configuration(ospf_daemon) + frr_cfg.commit_configuration(frr.ospf_daemon) return None if __name__ == '__main__': try: c = get_config() verify(c) generate(c) apply(c) except ConfigError as e: print(e) exit(1) diff --git a/src/conf_mode/protocols_ospfv3.py b/src/conf_mode/protocols_ospfv3.py index 60c2a9b16..7bdab3017 100755 --- a/src/conf_mode/protocols_ospfv3.py +++ b/src/conf_mode/protocols_ospfv3.py @@ -1,191 +1,189 @@ #!/usr/bin/env python3 # # Copyright (C) 2021-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from sys import exit from sys import argv from vyos.config import Config from vyos.config import config_dict_merge from vyos.configdict import dict_merge from vyos.configdict import node_changed from vyos.configverify import verify_common_route_maps from vyos.configverify import verify_route_map from vyos.configverify import verify_interface_exists from vyos.template import render_to_string from vyos.ifconfig import Interface from vyos.utils.dict import dict_search from vyos.utils.network import get_interface_config from vyos import ConfigError from vyos import frr from vyos import airbag airbag.enable() def get_config(config=None): if config: conf = config else: conf = Config() vrf = None if len(argv) > 1: vrf = argv[1] base_path = ['protocols', 'ospfv3'] # eqivalent of the C foo ? 'a' : 'b' statement base = vrf and ['vrf', 'name', vrf, 'protocols', 'ospfv3'] or base_path ospfv3 = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True) # Assign the name of our VRF context. This MUST be done before the return # statement below, else on deletion we will delete the default instance # instead of the VRF instance. if vrf: ospfv3['vrf'] = vrf # FRR has VRF support for different routing daemons. As interfaces belong # to VRFs - or the global VRF, we need to check for changed interfaces so # that they will be properly rendered for the FRR config. Also this eases # removal of interfaces from the running configuration. interfaces_removed = node_changed(conf, base + ['interface']) if interfaces_removed: ospfv3['interface_removed'] = list(interfaces_removed) # Bail out early if configuration tree does no longer exist. this must # be done after retrieving the list of interfaces to be removed. if not conf.exists(base): ospfv3.update({'deleted' : ''}) return ospfv3 # We have gathered the dict representation of the CLI, but there are default # options which we need to update into the dictionary retrived. default_values = conf.get_config_defaults(**ospfv3.kwargs, recursive=True) # We have to cleanup the default dict, as default values could enable features # which are not explicitly enabled on the CLI. Example: default-information # originate comes with a default metric-type of 2, which will enable the # entire default-information originate tree, even when not set via CLI so we # need to check this first and probably drop that key. if dict_search('default_information.originate', ospfv3) is None: del default_values['default_information'] if 'graceful_restart' not in ospfv3: del default_values['graceful_restart'] for protocol in ['babel', 'bgp', 'connected', 'isis', 'kernel', 'ripng', 'static']: if dict_search(f'redistribute.{protocol}', ospfv3) is None: del default_values['redistribute'][protocol] if not bool(default_values['redistribute']): del default_values['redistribute'] default_values.pop('interface', {}) # merge in remaining default values ospfv3 = config_dict_merge(default_values, ospfv3) # We also need some additional information from the config, prefix-lists # and route-maps for instance. They will be used in verify(). # # XXX: one MUST always call this without the key_mangling() option! See # vyos.configverify.verify_common_route_maps() for more information. tmp = conf.get_config_dict(['policy']) # Merge policy dict into "regular" config dict ospfv3 = dict_merge(tmp, ospfv3) return ospfv3 def verify(ospfv3): if not ospfv3: return None verify_common_route_maps(ospfv3) # As we can have a default-information route-map, we need to validate it! route_map_name = dict_search('default_information.originate.route_map', ospfv3) if route_map_name: verify_route_map(route_map_name, ospfv3) if 'area' in ospfv3: for area, area_config in ospfv3['area'].items(): if 'area_type' in area_config: if len(area_config['area_type']) > 1: raise ConfigError(f'Can only configure one area-type for OSPFv3 area "{area}"!') if 'range' in area_config: for range, range_config in area_config['range'].items(): if {'not_advertise', 'advertise'} <= range_config.keys(): raise ConfigError(f'"not-advertise" and "advertise" for "range {range}" cannot be both configured at the same time!') if 'interface' in ospfv3: for interface, interface_config in ospfv3['interface'].items(): verify_interface_exists(ospfv3, interface) if 'ifmtu' in interface_config: mtu = Interface(interface).get_mtu() if int(interface_config['ifmtu']) > int(mtu): raise ConfigError(f'OSPFv3 ifmtu can not exceed physical MTU of "{mtu}"') # If interface specific options are set, we must ensure that the # interface is bound to our requesting VRF. Due to the VyOS # priorities the interface is bound to the VRF after creation of # the VRF itself, and before any routing protocol is configured. if 'vrf' in ospfv3: vrf = ospfv3['vrf'] tmp = get_interface_config(interface) if 'master' not in tmp or tmp['master'] != vrf: raise ConfigError(f'Interface "{interface}" is not a member of VRF "{vrf}"!') return None def generate(ospfv3): if not ospfv3 or 'deleted' in ospfv3: return None ospfv3['new_frr_config'] = render_to_string('frr/ospf6d.frr.j2', ospfv3) return None def apply(ospfv3): - ospf6_daemon = 'ospf6d' - # Save original configuration prior to starting any commit actions frr_cfg = frr.FRRConfig() # Generate empty helper string which can be ammended to FRR commands, it # will be either empty (default VRF) or contain the "vrf <name" statement vrf = '' if 'vrf' in ospfv3: vrf = ' vrf ' + ospfv3['vrf'] - frr_cfg.load_configuration(ospf6_daemon) + frr_cfg.load_configuration(frr.ospf6_daemon) frr_cfg.modify_section(f'^router ospf6{vrf}', stop_pattern='^exit', remove_stop_mark=True) for key in ['interface', 'interface_removed']: if key not in ospfv3: continue for interface in ospfv3[key]: frr_cfg.modify_section(f'^interface {interface}', stop_pattern='^exit', remove_stop_mark=True) if 'new_frr_config' in ospfv3: frr_cfg.add_before(frr.default_add_before, ospfv3['new_frr_config']) - frr_cfg.commit_configuration(ospf6_daemon) + frr_cfg.commit_configuration(frr.ospf6_daemon) return None if __name__ == '__main__': try: c = get_config() verify(c) generate(c) apply(c) except ConfigError as e: print(e) exit(1) diff --git a/src/conf_mode/protocols_pim.py b/src/conf_mode/protocols_pim.py index 79294a1f0..9ef734eff 100755 --- a/src/conf_mode/protocols_pim.py +++ b/src/conf_mode/protocols_pim.py @@ -1,172 +1,171 @@ #!/usr/bin/env python3 # # Copyright (C) 2020-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import os from ipaddress import IPv4Address from ipaddress import IPv4Network from signal import SIGTERM from sys import exit from vyos.config import Config from vyos.config import config_dict_merge from vyos.configdict import node_changed from vyos.configverify import verify_interface_exists from vyos.utils.process import process_named_running from vyos.utils.process import call from vyos.template import render_to_string from vyos import ConfigError from vyos import frr from vyos import airbag airbag.enable() RESERVED_MC_NET = '224.0.0.0/24' def get_config(config=None): if config: conf = config else: conf = Config() base = ['protocols', 'pim'] pim = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True, no_tag_node_value_mangle=True) # We can not run both IGMP proxy and PIM at the same time - get IGMP # proxy status if conf.exists(['protocols', 'igmp-proxy']): pim.update({'igmp_proxy_enabled' : {}}) # FRR has VRF support for different routing daemons. As interfaces belong # to VRFs - or the global VRF, we need to check for changed interfaces so # that they will be properly rendered for the FRR config. Also this eases # removal of interfaces from the running configuration. interfaces_removed = node_changed(conf, base + ['interface']) if interfaces_removed: pim['interface_removed'] = list(interfaces_removed) # Bail out early if configuration tree does no longer exist. this must # be done after retrieving the list of interfaces to be removed. if not conf.exists(base): pim.update({'deleted' : ''}) return pim # We have gathered the dict representation of the CLI, but there are default # options which we need to update into the dictionary retrived. default_values = conf.get_config_defaults(**pim.kwargs, recursive=True) # We have to cleanup the default dict, as default values could enable features # which are not explicitly enabled on the CLI. Example: default-information # originate comes with a default metric-type of 2, which will enable the # entire default-information originate tree, even when not set via CLI so we # need to check this first and probably drop that key. for interface in pim.get('interface', []): # We need to reload the defaults on every pass b/c of # hello-multiplier dependency on dead-interval # If hello-multiplier is set, we need to remove the default from # dead-interval. if 'igmp' not in pim['interface'][interface]: del default_values['interface'][interface]['igmp'] pim = config_dict_merge(default_values, pim) return pim def verify(pim): if not pim or 'deleted' in pim: return None if 'igmp_proxy_enabled' in pim: raise ConfigError('IGMP proxy and PIM cannot be configured at the same time!') if 'interface' not in pim: raise ConfigError('PIM require defined interfaces!') for interface, interface_config in pim['interface'].items(): verify_interface_exists(pim, interface) # Check join group in reserved net if 'igmp' in interface_config and 'join' in interface_config['igmp']: for join_addr in interface_config['igmp']['join']: if IPv4Address(join_addr) in IPv4Network(RESERVED_MC_NET): raise ConfigError(f'Groups within {RESERVED_MC_NET} are reserved and cannot be joined!') if 'rp' in pim: if 'address' not in pim['rp']: raise ConfigError('PIM rendezvous point needs to be defined!') # Check unique multicast groups unique = [] pim_base_error = 'PIM rendezvous point group' for address, address_config in pim['rp']['address'].items(): if 'group' not in address_config: raise ConfigError(f'{pim_base_error} should be defined for "{address}"!') # Check if it is a multicast group for gr_addr in address_config['group']: if not IPv4Network(gr_addr).is_multicast: raise ConfigError(f'{pim_base_error} "{gr_addr}" is not a multicast group!') if gr_addr in unique: raise ConfigError(f'{pim_base_error} must be unique!') unique.append(gr_addr) def generate(pim): if not pim or 'deleted' in pim: return None pim['frr_pimd_config'] = render_to_string('frr/pimd.frr.j2', pim) return None def apply(pim): - pim_daemon = 'pimd' - pim_pid = process_named_running(pim_daemon) + pim_pid = process_named_running(frr.pim_daemon) if not pim or 'deleted' in pim: if 'deleted' in pim: os.kill(int(pim_pid), SIGTERM) return None if not pim_pid: call('/usr/lib/frr/pimd -d -F traditional --daemon -A 127.0.0.1') # Save original configuration prior to starting any commit actions frr_cfg = frr.FRRConfig() - frr_cfg.load_configuration(pim_daemon) + frr_cfg.load_configuration(frr.pim_daemon) frr_cfg.modify_section(f'^ip pim') frr_cfg.modify_section(f'^ip igmp') for key in ['interface', 'interface_removed']: if key not in pim: continue for interface in pim[key]: frr_cfg.modify_section(f'^interface {interface}', stop_pattern='^exit', remove_stop_mark=True) if 'frr_pimd_config' in pim: frr_cfg.add_before(frr.default_add_before, pim['frr_pimd_config']) - frr_cfg.commit_configuration(pim_daemon) + frr_cfg.commit_configuration(frr.pim_daemon) return None if __name__ == '__main__': try: c = get_config() verify(c) generate(c) apply(c) except ConfigError as e: print(e) exit(1) diff --git a/src/conf_mode/protocols_pim6.py b/src/conf_mode/protocols_pim6.py index 581ffe238..1abc256fe 100755 --- a/src/conf_mode/protocols_pim6.py +++ b/src/conf_mode/protocols_pim6.py @@ -1,133 +1,130 @@ #!/usr/bin/env python3 # # Copyright (C) 2023 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from ipaddress import IPv6Address from ipaddress import IPv6Network from sys import exit from vyos.config import Config from vyos.config import config_dict_merge from vyos.configdict import node_changed from vyos.configverify import verify_interface_exists from vyos.template import render_to_string from vyos import ConfigError from vyos import frr from vyos import airbag airbag.enable() def get_config(config=None): if config: conf = config else: conf = Config() base = ['protocols', 'pim6'] pim6 = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True, with_recursive_defaults=True) # FRR has VRF support for different routing daemons. As interfaces belong # to VRFs - or the global VRF, we need to check for changed interfaces so # that they will be properly rendered for the FRR config. Also this eases # removal of interfaces from the running configuration. interfaces_removed = node_changed(conf, base + ['interface']) if interfaces_removed: pim6['interface_removed'] = list(interfaces_removed) # Bail out early if configuration tree does no longer exist. this must # be done after retrieving the list of interfaces to be removed. if not conf.exists(base): pim6.update({'deleted' : ''}) return pim6 # We have gathered the dict representation of the CLI, but there are default # options which we need to update into the dictionary retrived. default_values = conf.get_config_defaults(**pim6.kwargs, recursive=True) pim6 = config_dict_merge(default_values, pim6) return pim6 def verify(pim6): if not pim6 or 'deleted' in pim6: return for interface, interface_config in pim6.get('interface', {}).items(): verify_interface_exists(pim6, interface) if 'mld' in interface_config: mld = interface_config['mld'] for group in mld.get('join', {}).keys(): # Validate multicast group address if not IPv6Address(group).is_multicast: raise ConfigError(f"{group} is not a multicast group") if 'rp' in pim6: if 'address' not in pim6['rp']: raise ConfigError('PIM6 rendezvous point needs to be defined!') # Check unique multicast groups unique = [] pim_base_error = 'PIM6 rendezvous point group' if {'address', 'prefix-list6'} <= set(pim6['rp']): raise ConfigError(f'{pim_base_error} supports either address or a prefix-list!') for address, address_config in pim6['rp']['address'].items(): if 'group' not in address_config: raise ConfigError(f'{pim_base_error} should be defined for "{address}"!') # Check if it is a multicast group for gr_addr in address_config['group']: if not IPv6Network(gr_addr).is_multicast: raise ConfigError(f'{pim_base_error} "{gr_addr}" is not a multicast group!') if gr_addr in unique: raise ConfigError(f'{pim_base_error} must be unique!') unique.append(gr_addr) def generate(pim6): if not pim6 or 'deleted' in pim6: return pim6['new_frr_config'] = render_to_string('frr/pim6d.frr.j2', pim6) return None def apply(pim6): if pim6 is None: return - pim6_daemon = 'pim6d' - # Save original configuration prior to starting any commit actions frr_cfg = frr.FRRConfig() - - frr_cfg.load_configuration(pim6_daemon) + frr_cfg.load_configuration(frr.pim6_daemon) for key in ['interface', 'interface_removed']: if key not in pim6: continue for interface in pim6[key]: frr_cfg.modify_section(f'^interface {interface}', stop_pattern='^exit', remove_stop_mark=True) if 'new_frr_config' in pim6: frr_cfg.add_before(frr.default_add_before, pim6['new_frr_config']) - frr_cfg.commit_configuration(pim6_daemon) + frr_cfg.commit_configuration(frr.pim6_daemon) return None if __name__ == '__main__': try: c = get_config() verify(c) generate(c) apply(c) except ConfigError as e: print(e) exit(1) diff --git a/src/conf_mode/protocols_rip.py b/src/conf_mode/protocols_rip.py index 9afac544d..994007137 100755 --- a/src/conf_mode/protocols_rip.py +++ b/src/conf_mode/protocols_rip.py @@ -1,139 +1,133 @@ #!/usr/bin/env python3 # # Copyright (C) 2021-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from sys import exit from vyos.config import Config from vyos.configdict import dict_merge from vyos.configdict import node_changed from vyos.configverify import verify_common_route_maps from vyos.configverify import verify_access_list from vyos.configverify import verify_prefix_list from vyos.utils.dict import dict_search from vyos.template import render_to_string from vyos import ConfigError from vyos import frr from vyos import airbag airbag.enable() def get_config(config=None): if config: conf = config else: conf = Config() base = ['protocols', 'rip'] rip = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True) # FRR has VRF support for different routing daemons. As interfaces belong # to VRFs - or the global VRF, we need to check for changed interfaces so # that they will be properly rendered for the FRR config. Also this eases # removal of interfaces from the running configuration. interfaces_removed = node_changed(conf, base + ['interface']) if interfaces_removed: rip['interface_removed'] = list(interfaces_removed) # Bail out early if configuration tree does not exist if not conf.exists(base): rip.update({'deleted' : ''}) return rip # We have gathered the dict representation of the CLI, but there are default # options which we need to update into the dictionary retrived. rip = conf.merge_defaults(rip, recursive=True) # We also need some additional information from the config, prefix-lists # and route-maps for instance. They will be used in verify(). # # XXX: one MUST always call this without the key_mangling() option! See # vyos.configverify.verify_common_route_maps() for more information. tmp = conf.get_config_dict(['policy']) # Merge policy dict into "regular" config dict rip = dict_merge(tmp, rip) return rip def verify(rip): if not rip: return None verify_common_route_maps(rip) acl_in = dict_search('distribute_list.access_list.in', rip) if acl_in: verify_access_list(acl_in, rip) acl_out = dict_search('distribute_list.access_list.out', rip) if acl_out: verify_access_list(acl_out, rip) prefix_list_in = dict_search('distribute_list.prefix-list.in', rip) if prefix_list_in: verify_prefix_list(prefix_list_in, rip) prefix_list_out = dict_search('distribute_list.prefix_list.out', rip) if prefix_list_out: verify_prefix_list(prefix_list_out, rip) if 'interface' in rip: for interface, interface_options in rip['interface'].items(): if 'authentication' in interface_options: if {'md5', 'plaintext_password'} <= set(interface_options['authentication']): raise ConfigError('Can not use both md5 and plaintext-password at the same time!') if 'split_horizon' in interface_options: if {'disable', 'poison_reverse'} <= set(interface_options['split_horizon']): raise ConfigError(f'You can not have "split-horizon poison-reverse" enabled ' \ f'with "split-horizon disable" for "{interface}"!') def generate(rip): if not rip or 'deleted' in rip: return None rip['new_frr_config'] = render_to_string('frr/ripd.frr.j2', rip) return None def apply(rip): - rip_daemon = 'ripd' - zebra_daemon = 'zebra' - # Save original configuration prior to starting any commit actions frr_cfg = frr.FRRConfig() - # The route-map used for the FIB (zebra) is part of the zebra daemon - frr_cfg.load_configuration(zebra_daemon) + # The route-map used for the FIB (zebra) is part of the mgmt daemon as of FRR 10.1 + frr_cfg.load_configuration(frr.mgmt_daemon) frr_cfg.modify_section('^ip protocol rip route-map [-a-zA-Z0-9.]+', stop_pattern='(\s|!)') - frr_cfg.commit_configuration(zebra_daemon) - - frr_cfg.load_configuration(rip_daemon) frr_cfg.modify_section('^key chain \S+', stop_pattern='^exit', remove_stop_mark=True) frr_cfg.modify_section('^router rip', stop_pattern='^exit', remove_stop_mark=True) for key in ['interface', 'interface_removed']: if key not in rip: continue for interface in rip[key]: frr_cfg.modify_section(f'^interface {interface}', stop_pattern='^exit', remove_stop_mark=True) if 'new_frr_config' in rip: frr_cfg.add_before(frr.default_add_before, rip['new_frr_config']) - frr_cfg.commit_configuration(rip_daemon) + frr_cfg.commit_configuration() return None if __name__ == '__main__': try: c = get_config() verify(c) generate(c) apply(c) except ConfigError as e: print(e) exit(1) diff --git a/src/conf_mode/protocols_ripng.py b/src/conf_mode/protocols_ripng.py index 23416ff96..9d4447d1f 100755 --- a/src/conf_mode/protocols_ripng.py +++ b/src/conf_mode/protocols_ripng.py @@ -1,124 +1,115 @@ #!/usr/bin/env python3 # # Copyright (C) 2021-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from sys import exit from vyos.config import Config from vyos.configdict import dict_merge from vyos.configverify import verify_common_route_maps from vyos.configverify import verify_access_list from vyos.configverify import verify_prefix_list from vyos.utils.dict import dict_search from vyos.template import render_to_string from vyos import ConfigError from vyos import frr from vyos import airbag airbag.enable() def get_config(config=None): if config: conf = config else: conf = Config() base = ['protocols', 'ripng'] ripng = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True) # Bail out early if configuration tree does not exist if not conf.exists(base): return ripng # We have gathered the dict representation of the CLI, but there are default # options which we need to update into the dictionary retrived. ripng = conf.merge_defaults(ripng, recursive=True) # We also need some additional information from the config, prefix-lists # and route-maps for instance. They will be used in verify(). # # XXX: one MUST always call this without the key_mangling() option! See # vyos.configverify.verify_common_route_maps() for more information. tmp = conf.get_config_dict(['policy']) # Merge policy dict into "regular" config dict ripng = dict_merge(tmp, ripng) return ripng def verify(ripng): if not ripng: return None verify_common_route_maps(ripng) acl_in = dict_search('distribute_list.access_list.in', ripng) if acl_in: verify_access_list(acl_in, ripng, version='6') acl_out = dict_search('distribute_list.access_list.out', ripng) if acl_out: verify_access_list(acl_out, ripng, version='6') prefix_list_in = dict_search('distribute_list.prefix_list.in', ripng) if prefix_list_in: verify_prefix_list(prefix_list_in, ripng, version='6') prefix_list_out = dict_search('distribute_list.prefix_list.out', ripng) if prefix_list_out: verify_prefix_list(prefix_list_out, ripng, version='6') if 'interface' in ripng: for interface, interface_options in ripng['interface'].items(): if 'authentication' in interface_options: if {'md5', 'plaintext_password'} <= set(interface_options['authentication']): raise ConfigError('Can not use both md5 and plaintext-password at the same time!') if 'split_horizon' in interface_options: if {'disable', 'poison_reverse'} <= set(interface_options['split_horizon']): raise ConfigError(f'You can not have "split-horizon poison-reverse" enabled ' \ f'with "split-horizon disable" for "{interface}"!') def generate(ripng): if not ripng: - ripng['new_frr_config'] = '' return None - ripng['new_frr_config'] = render_to_string('frr/ripngd.frr.j2', ripng) - return None def apply(ripng): - ripng_daemon = 'ripngd' - zebra_daemon = 'zebra' - # Save original configuration prior to starting any commit actions frr_cfg = frr.FRRConfig() # The route-map used for the FIB (zebra) is part of the zebra daemon - frr_cfg.load_configuration(zebra_daemon) + frr_cfg.load_configuration(frr.mgmt_daemon) frr_cfg.modify_section('^ipv6 protocol ripng route-map [-a-zA-Z0-9.]+', stop_pattern='(\s|!)') - frr_cfg.commit_configuration(zebra_daemon) - - frr_cfg.load_configuration(ripng_daemon) frr_cfg.modify_section('key chain \S+', stop_pattern='^exit', remove_stop_mark=True) frr_cfg.modify_section('interface \S+', stop_pattern='^exit', remove_stop_mark=True) frr_cfg.modify_section('^router ripng', stop_pattern='^exit', remove_stop_mark=True) if 'new_frr_config' in ripng: frr_cfg.add_before(frr.default_add_before, ripng['new_frr_config']) - frr_cfg.commit_configuration(ripng_daemon) + frr_cfg.commit_configuration() return None if __name__ == '__main__': try: c = get_config() verify(c) generate(c) apply(c) except ConfigError as e: print(e) exit(1) diff --git a/src/conf_mode/protocols_rpki.py b/src/conf_mode/protocols_rpki.py index a59ecf3e4..bec0cda91 100755 --- a/src/conf_mode/protocols_rpki.py +++ b/src/conf_mode/protocols_rpki.py @@ -1,130 +1,128 @@ #!/usr/bin/env python3 # # Copyright (C) 2021-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import os from glob import glob from sys import exit from vyos.config import Config from vyos.pki import wrap_openssh_public_key from vyos.pki import wrap_openssh_private_key from vyos.template import render_to_string from vyos.utils.dict import dict_search_args from vyos.utils.file import write_file from vyos import ConfigError from vyos import frr from vyos import airbag airbag.enable() rpki_ssh_key_base = '/run/frr/id_rpki' def get_config(config=None): if config: conf = config else: conf = Config() base = ['protocols', 'rpki'] rpki = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True, with_pki=True) # Bail out early if configuration tree does not exist if not conf.exists(base): rpki.update({'deleted' : ''}) return rpki # We have gathered the dict representation of the CLI, but there are default # options which we need to update into the dictionary retrived. rpki = conf.merge_defaults(rpki, recursive=True) return rpki def verify(rpki): if not rpki: return None if 'cache' in rpki: preferences = [] for peer, peer_config in rpki['cache'].items(): for mandatory in ['port', 'preference']: if mandatory not in peer_config: raise ConfigError(f'RPKI cache "{peer}" {mandatory} must be defined!') if 'preference' in peer_config: preference = peer_config['preference'] if preference in preferences: raise ConfigError(f'RPKI cache with preference {preference} already configured!') preferences.append(preference) if 'ssh' in peer_config: if 'username' not in peer_config['ssh']: raise ConfigError('RPKI+SSH requires username to be defined!') if 'key' not in peer_config['ssh'] or 'openssh' not in rpki['pki']: raise ConfigError('RPKI+SSH requires key to be defined!') if peer_config['ssh']['key'] not in rpki['pki']['openssh']: raise ConfigError('RPKI+SSH key not found on PKI subsystem!') return None def generate(rpki): for key in glob(f'{rpki_ssh_key_base}*'): os.unlink(key) if not rpki: return if 'cache' in rpki: for cache, cache_config in rpki['cache'].items(): if 'ssh' in cache_config: key_name = cache_config['ssh']['key'] public_key_data = dict_search_args(rpki['pki'], 'openssh', key_name, 'public', 'key') public_key_type = dict_search_args(rpki['pki'], 'openssh', key_name, 'public', 'type') private_key_data = dict_search_args(rpki['pki'], 'openssh', key_name, 'private', 'key') cache_config['ssh']['public_key_file'] = f'{rpki_ssh_key_base}_{cache}.pub' cache_config['ssh']['private_key_file'] = f'{rpki_ssh_key_base}_{cache}' write_file(cache_config['ssh']['public_key_file'], wrap_openssh_public_key(public_key_data, public_key_type)) write_file(cache_config['ssh']['private_key_file'], wrap_openssh_private_key(private_key_data)) rpki['new_frr_config'] = render_to_string('frr/rpki.frr.j2', rpki) return None def apply(rpki): - bgp_daemon = 'bgpd' - # Save original configuration prior to starting any commit actions frr_cfg = frr.FRRConfig() - frr_cfg.load_configuration(bgp_daemon) + frr_cfg.load_configuration(frr.bgp_daemon) frr_cfg.modify_section('^rpki', stop_pattern='^exit', remove_stop_mark=True) if 'new_frr_config' in rpki: frr_cfg.add_before(frr.default_add_before, rpki['new_frr_config']) - frr_cfg.commit_configuration(bgp_daemon) + frr_cfg.commit_configuration(frr.bgp_daemon) return None if __name__ == '__main__': try: c = get_config() verify(c) generate(c) apply(c) except ConfigError as e: print(e) exit(1) diff --git a/src/conf_mode/protocols_segment-routing.py b/src/conf_mode/protocols_segment-routing.py index b36c2ca11..67f8005ef 100755 --- a/src/conf_mode/protocols_segment-routing.py +++ b/src/conf_mode/protocols_segment-routing.py @@ -1,116 +1,114 @@ #!/usr/bin/env python3 # # Copyright (C) 2023-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from sys import exit from vyos.config import Config from vyos.configdict import node_changed from vyos.template import render_to_string from vyos.utils.dict import dict_search from vyos.utils.system import sysctl_write from vyos import ConfigError from vyos import frr from vyos import airbag airbag.enable() def get_config(config=None): if config: conf = config else: conf = Config() base = ['protocols', 'segment-routing'] sr = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True, no_tag_node_value_mangle=True, with_recursive_defaults=True) # FRR has VRF support for different routing daemons. As interfaces belong # to VRFs - or the global VRF, we need to check for changed interfaces so # that they will be properly rendered for the FRR config. Also this eases # removal of interfaces from the running configuration. interfaces_removed = node_changed(conf, base + ['interface']) if interfaces_removed: sr['interface_removed'] = list(interfaces_removed) import pprint pprint.pprint(sr) return sr def verify(sr): if 'srv6' in sr: srv6_enable = False if 'interface' in sr: for interface, interface_config in sr['interface'].items(): if 'srv6' in interface_config: srv6_enable = True break if not srv6_enable: raise ConfigError('SRv6 should be enabled on at least one interface!') return None def generate(sr): if not sr: return None sr['new_frr_config'] = render_to_string('frr/zebra.segment_routing.frr.j2', sr) return None def apply(sr): - zebra_daemon = 'zebra' - if 'interface_removed' in sr: for interface in sr['interface_removed']: # Disable processing of IPv6-SR packets sysctl_write(f'net.ipv6.conf.{interface}.seg6_enabled', '0') if 'interface' in sr: for interface, interface_config in sr['interface'].items(): # Accept or drop SR-enabled IPv6 packets on this interface if 'srv6' in interface_config: sysctl_write(f'net.ipv6.conf.{interface}.seg6_enabled', '1') # Define HMAC policy for ingress SR-enabled packets on this interface # It's a redundant check as HMAC has a default value - but better safe # then sorry tmp = dict_search('srv6.hmac', interface_config) if tmp == 'accept': sysctl_write(f'net.ipv6.conf.{interface}.seg6_require_hmac', '0') elif tmp == 'drop': sysctl_write(f'net.ipv6.conf.{interface}.seg6_require_hmac', '1') elif tmp == 'ignore': sysctl_write(f'net.ipv6.conf.{interface}.seg6_require_hmac', '-1') else: sysctl_write(f'net.ipv6.conf.{interface}.seg6_enabled', '0') # Save original configuration prior to starting any commit actions frr_cfg = frr.FRRConfig() - frr_cfg.load_configuration(zebra_daemon) + frr_cfg.load_configuration(frr.zebra_daemon) frr_cfg.modify_section(r'^segment-routing') if 'new_frr_config' in sr: frr_cfg.add_before(frr.default_add_before, sr['new_frr_config']) - frr_cfg.commit_configuration(zebra_daemon) + frr_cfg.commit_configuration(frr.zebra_daemon) return None if __name__ == '__main__': try: c = get_config() verify(c) generate(c) apply(c) except ConfigError as e: print(e) exit(1) diff --git a/src/conf_mode/protocols_static.py b/src/conf_mode/protocols_static.py index 430cc69d4..c5dc77fd2 100755 --- a/src/conf_mode/protocols_static.py +++ b/src/conf_mode/protocols_static.py @@ -1,132 +1,130 @@ #!/usr/bin/env python3 # # Copyright (C) 2021-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from sys import exit from sys import argv from vyos.config import Config from vyos.configdict import dict_merge from vyos.configdict import get_dhcp_interfaces from vyos.configdict import get_pppoe_interfaces from vyos.configverify import verify_common_route_maps from vyos.configverify import verify_vrf from vyos.template import render from vyos.template import render_to_string from vyos import ConfigError from vyos import frr from vyos import airbag airbag.enable() config_file = '/etc/iproute2/rt_tables.d/vyos-static.conf' def get_config(config=None): if config: conf = config else: conf = Config() vrf = None if len(argv) > 1: vrf = argv[1] base_path = ['protocols', 'static'] # eqivalent of the C foo ? 'a' : 'b' statement base = vrf and ['vrf', 'name', vrf, 'protocols', 'static'] or base_path static = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True, no_tag_node_value_mangle=True) # Assign the name of our VRF context if vrf: static['vrf'] = vrf # We also need some additional information from the config, prefix-lists # and route-maps for instance. They will be used in verify(). # # XXX: one MUST always call this without the key_mangling() option! See # vyos.configverify.verify_common_route_maps() for more information. tmp = conf.get_config_dict(['policy']) # Merge policy dict into "regular" config dict static = dict_merge(tmp, static) # T3680 - get a list of all interfaces currently configured to use DHCP tmp = get_dhcp_interfaces(conf, vrf) if tmp: static.update({'dhcp' : tmp}) tmp = get_pppoe_interfaces(conf, vrf) if tmp: static.update({'pppoe' : tmp}) return static def verify(static): verify_common_route_maps(static) for route in ['route', 'route6']: # if there is no route(6) key in the dictionary we can immediately # bail out early if route not in static: continue # When leaking routes to other VRFs we must ensure that the destination # VRF exists for prefix, prefix_options in static[route].items(): # both the interface and next-hop CLI node can have a VRF subnode, # thus we check this using a for loop for type in ['interface', 'next_hop']: if type in prefix_options: for interface, interface_config in prefix_options[type].items(): verify_vrf(interface_config) if {'blackhole', 'reject'} <= set(prefix_options): raise ConfigError(f'Can not use both blackhole and reject for '\ f'prefix "{prefix}"!') return None def generate(static): if not static: return None # Put routing table names in /etc/iproute2/rt_tables render(config_file, 'iproute2/static.conf.j2', static) static['new_frr_config'] = render_to_string('frr/staticd.frr.j2', static) return None def apply(static): - static_daemon = 'staticd' - # Save original configuration prior to starting any commit actions frr_cfg = frr.FRRConfig() - frr_cfg.load_configuration(static_daemon) + frr_cfg.load_configuration(frr.mgmt_daemon) if 'vrf' in static: vrf = static['vrf'] frr_cfg.modify_section(f'^vrf {vrf}', stop_pattern='^exit-vrf', remove_stop_mark=True) else: frr_cfg.modify_section(r'^ip route .*') frr_cfg.modify_section(r'^ipv6 route .*') if 'new_frr_config' in static: frr_cfg.add_before(frr.default_add_before, static['new_frr_config']) - frr_cfg.commit_configuration(static_daemon) + frr_cfg.commit_configuration() return None if __name__ == '__main__': try: c = get_config() verify(c) generate(c) apply(c) except ConfigError as e: print(e) exit(1) diff --git a/src/conf_mode/protocols_static_multicast.py b/src/conf_mode/protocols_static_multicast.py index c8894fd41..4393f3ed3 100755 --- a/src/conf_mode/protocols_static_multicast.py +++ b/src/conf_mode/protocols_static_multicast.py @@ -1,135 +1,133 @@ #!/usr/bin/env python3 # # Copyright (C) 2020-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from ipaddress import IPv4Address from sys import exit from vyos import ConfigError from vyos import frr from vyos.config import Config from vyos.template import render_to_string from vyos import airbag airbag.enable() config_file = r'/tmp/static_mcast.frr' # Get configuration for static multicast route def get_config(config=None): if config: conf = config else: conf = Config() mroute = { 'old_mroute' : {}, 'mroute' : {} } base_path = "protocols static multicast" if not (conf.exists(base_path) or conf.exists_effective(base_path)): return None conf.set_level(base_path) # Get multicast effective routes for route in conf.list_effective_nodes('route'): mroute['old_mroute'][route] = {} for next_hop in conf.list_effective_nodes('route {0} next-hop'.format(route)): mroute['old_mroute'][route].update({ next_hop : conf.return_value('route {0} next-hop {1} distance'.format(route, next_hop)) }) # Get multicast effective interface-routes for route in conf.list_effective_nodes('interface-route'): if not route in mroute['old_mroute']: mroute['old_mroute'][route] = {} for next_hop in conf.list_effective_nodes('interface-route {0} next-hop-interface'.format(route)): mroute['old_mroute'][route].update({ next_hop : conf.return_value('interface-route {0} next-hop-interface {1} distance'.format(route, next_hop)) }) # Get multicast routes for route in conf.list_nodes('route'): mroute['mroute'][route] = {} for next_hop in conf.list_nodes('route {0} next-hop'.format(route)): mroute['mroute'][route].update({ next_hop : conf.return_value('route {0} next-hop {1} distance'.format(route, next_hop)) }) # Get multicast interface-routes for route in conf.list_nodes('interface-route'): if not route in mroute['mroute']: mroute['mroute'][route] = {} for next_hop in conf.list_nodes('interface-route {0} next-hop-interface'.format(route)): mroute['mroute'][route].update({ next_hop : conf.return_value('interface-route {0} next-hop-interface {1} distance'.format(route, next_hop)) }) return mroute def verify(mroute): if mroute is None: return None for mcast_route in mroute['mroute']: route = mcast_route.split('/') if IPv4Address(route[0]) < IPv4Address('224.0.0.0'): raise ConfigError(f'{mcast_route} not a multicast network') def generate(mroute): if mroute is None: return None mroute['new_frr_config'] = render_to_string('frr/static_mcast.frr.j2', mroute) return None def apply(mroute): if mroute is None: return None - static_daemon = 'staticd' frr_cfg = frr.FRRConfig() - frr_cfg.load_configuration(static_daemon) + frr_cfg.load_configuration(frr.mgmt_daemon) if 'old_mroute' in mroute: for route_gr in mroute['old_mroute']: for nh in mroute['old_mroute'][route_gr]: if mroute['old_mroute'][route_gr][nh]: frr_cfg.modify_section(f'^ip mroute {route_gr} {nh} {mroute["old_mroute"][route_gr][nh]}') else: frr_cfg.modify_section(f'^ip mroute {route_gr} {nh}') if 'new_frr_config' in mroute: frr_cfg.add_before(frr.default_add_before, mroute['new_frr_config']) - frr_cfg.commit_configuration(static_daemon) - + frr_cfg.commit_configuration() return None if __name__ == '__main__': try: c = get_config() verify(c) generate(c) apply(c) except ConfigError as e: print(e) exit(1) diff --git a/src/conf_mode/service_snmp.py b/src/conf_mode/service_snmp.py index c9c0ed9a0..134662f85 100755 --- a/src/conf_mode/service_snmp.py +++ b/src/conf_mode/service_snmp.py @@ -1,282 +1,284 @@ #!/usr/bin/env python3 # # Copyright (C) 2018-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import os from sys import exit from vyos.base import Warning from vyos.config import Config from vyos.configdict import dict_merge from vyos.configverify import verify_vrf from vyos.snmpv3_hashgen import plaintext_to_md5 from vyos.snmpv3_hashgen import plaintext_to_sha1 from vyos.snmpv3_hashgen import random from vyos.template import render from vyos.utils.configfs import delete_cli_node from vyos.utils.configfs import add_cli_node from vyos.utils.dict import dict_search from vyos.utils.network import is_addr_assigned from vyos.utils.process import call from vyos.utils.permission import chmod_755 from vyos.version import get_version_data from vyos import ConfigError +from vyos import frr from vyos import airbag airbag.enable() config_file_client = r'/etc/snmp/snmp.conf' config_file_daemon = r'/etc/snmp/snmpd.conf' config_file_access = r'/usr/share/snmp/snmpd.conf' config_file_user = r'/var/lib/snmp/snmpd.conf' default_script_dir = r'/config/user-data/' systemd_override = r'/run/systemd/system/snmpd.service.d/override.conf' systemd_service = 'snmpd.service' def get_config(config=None): if config: conf = config else: conf = Config() base = ['service', 'snmp'] snmp = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True, no_tag_node_value_mangle=True) if not conf.exists(base): snmp.update({'deleted' : ''}) if conf.exists(['service', 'lldp', 'snmp']): snmp.update({'lldp_snmp' : ''}) if 'deleted' in snmp: return snmp version_data = get_version_data() snmp['version'] = version_data['version'] # create an internal snmpv3 user of the form 'vyosxxxxxxxxxxxxxxxx' snmp['vyos_user'] = 'vyos' + random(8) snmp['vyos_user_pass'] = random(16) # We have gathered the dict representation of the CLI, but there are default # options which we need to update into the dictionary retrived. snmp = conf.merge_defaults(snmp, recursive=True) if 'listen_address' in snmp: # Always listen on localhost if an explicit address has been configured # This is a safety measure to not end up with invalid listen addresses # that are not configured on this system. See https://vyos.dev/T850 if '127.0.0.1' not in snmp['listen_address']: tmp = {'127.0.0.1': {'port': '161'}} snmp['listen_address'] = dict_merge(tmp, snmp['listen_address']) if '::1' not in snmp['listen_address']: tmp = {'::1': {'port': '161'}} snmp['listen_address'] = dict_merge(tmp, snmp['listen_address']) if 'script_extensions' in snmp and 'extension_name' in snmp['script_extensions']: for key, val in snmp['script_extensions']['extension_name'].items(): if 'script' not in val: continue script_path = val['script'] # if script has not absolute path, use pre configured path if not os.path.isabs(script_path): script_path = os.path.join(default_script_dir, script_path) snmp['script_extensions']['extension_name'][key]['script'] = script_path return snmp def verify(snmp): if 'deleted' in snmp: return None if {'deleted', 'lldp_snmp'} <= set(snmp): raise ConfigError('Can not delete SNMP service, as LLDP still uses SNMP!') ### check if the configured script actually exist if 'script_extensions' in snmp and 'extension_name' in snmp['script_extensions']: for extension, extension_opt in snmp['script_extensions']['extension_name'].items(): if 'script' not in extension_opt: raise ConfigError(f'Script extension "{extension}" requires an actual script to be configured!') tmp = extension_opt['script'] if not os.path.isfile(tmp): Warning(f'script "{tmp}" does not exist!') else: chmod_755(extension_opt['script']) if 'listen_address' in snmp: for address in snmp['listen_address']: # We only wan't to configure addresses that exist on the system. # Hint the user if they don't exist if 'vrf' in snmp: vrf_name = snmp['vrf'] if not is_addr_assigned(address, vrf_name) and address not in ['::1','127.0.0.1']: raise ConfigError(f'SNMP listen address "{address}" not configured in vrf "{vrf_name}"!') elif not is_addr_assigned(address): raise ConfigError(f'SNMP listen address "{address}" not configured in default vrf!') if 'trap_target' in snmp: for trap, trap_config in snmp['trap_target'].items(): if 'community' not in trap_config: raise ConfigError(f'Trap target "{trap}" requires a community to be set!') if 'oid_enable' in snmp: Warning(f'Custom OIDs are enabled and may lead to system instability and high resource consumption') verify_vrf(snmp) # bail out early if SNMP v3 is not configured if 'v3' not in snmp: return None if 'user' in snmp['v3']: for user, user_config in snmp['v3']['user'].items(): if 'group' not in user_config: raise ConfigError(f'Group membership required for user "{user}"!') if 'plaintext_password' not in user_config['auth'] and 'encrypted_password' not in user_config['auth']: raise ConfigError(f'Must specify authentication encrypted-password or plaintext-password for user "{user}"!') if 'plaintext_password' not in user_config['privacy'] and 'encrypted_password' not in user_config['privacy']: raise ConfigError(f'Must specify privacy encrypted-password or plaintext-password for user "{user}"!') if 'group' in snmp['v3']: for group, group_config in snmp['v3']['group'].items(): if 'seclevel' not in group_config: raise ConfigError(f'Must configure "seclevel" for group "{group}"!') if 'view' not in group_config: raise ConfigError(f'Must configure "view" for group "{group}"!') # Check if 'view' exists view = group_config['view'] if 'view' not in snmp['v3'] or view not in snmp['v3']['view']: raise ConfigError(f'You must create view "{view}" first!') if 'view' in snmp['v3']: for view, view_config in snmp['v3']['view'].items(): if 'oid' not in view_config: raise ConfigError(f'Must configure an "oid" for view "{view}"!') if 'trap_target' in snmp['v3']: for trap, trap_config in snmp['v3']['trap_target'].items(): if 'plaintext_password' not in trap_config['auth'] and 'encrypted_password' not in trap_config['auth']: raise ConfigError(f'Must specify one of authentication encrypted-password or plaintext-password for trap "{trap}"!') if {'plaintext_password', 'encrypted_password'} <= set(trap_config['auth']): raise ConfigError(f'Can not specify both authentication encrypted-password and plaintext-password for trap "{trap}"!') if 'plaintext_password' not in trap_config['privacy'] and 'encrypted_password' not in trap_config['privacy']: raise ConfigError(f'Must specify one of privacy encrypted-password or plaintext-password for trap "{trap}"!') if {'plaintext_password', 'encrypted_password'} <= set(trap_config['privacy']): raise ConfigError(f'Can not specify both privacy encrypted-password and plaintext-password for trap "{trap}"!') if 'type' not in trap_config: raise ConfigError('SNMP v3 trap "type" must be specified!') return None def generate(snmp): # As we are manipulating the snmpd user database we have to stop it first! # This is even save if service is going to be removed call(f'systemctl stop {systemd_service}') # Clean config files config_files = [config_file_client, config_file_daemon, config_file_access, config_file_user, systemd_override] for file in config_files: if os.path.isfile(file): os.unlink(file) if 'deleted' in snmp: return None if 'v3' in snmp: # SNMPv3 uses a hashed password. If CLI defines a plaintext password, # we will hash it in the background and replace the CLI node! if 'user' in snmp['v3']: for user, user_config in snmp['v3']['user'].items(): if dict_search('auth.type', user_config) == 'sha': hash = plaintext_to_sha1 else: hash = plaintext_to_md5 if dict_search('auth.plaintext_password', user_config) is not None: tmp = hash(dict_search('auth.plaintext_password', user_config), dict_search('v3.engineid', snmp)) snmp['v3']['user'][user]['auth']['encrypted_password'] = tmp del snmp['v3']['user'][user]['auth']['plaintext_password'] cli_base = ['service', 'snmp', 'v3', 'user', user, 'auth'] delete_cli_node(cli_base + ['plaintext-password']) add_cli_node(cli_base + ['encrypted-password'], value=tmp) if dict_search('privacy.plaintext_password', user_config) is not None: tmp = hash(dict_search('privacy.plaintext_password', user_config), dict_search('v3.engineid', snmp)) snmp['v3']['user'][user]['privacy']['encrypted_password'] = tmp del snmp['v3']['user'][user]['privacy']['plaintext_password'] cli_base = ['service', 'snmp', 'v3', 'user', user, 'privacy'] delete_cli_node(cli_base + ['plaintext-password']) add_cli_node(cli_base + ['encrypted-password'], value=tmp) # Write client config file render(config_file_client, 'snmp/etc.snmp.conf.j2', snmp) # Write server config file render(config_file_daemon, 'snmp/etc.snmpd.conf.j2', snmp) # Write access rights config file render(config_file_access, 'snmp/usr.snmpd.conf.j2', snmp) # Write access rights config file render(config_file_user, 'snmp/var.snmpd.conf.j2', snmp) # Write daemon configuration file render(systemd_override, 'snmp/override.conf.j2', snmp) return None def apply(snmp): # Always reload systemd manager configuration call('systemctl daemon-reload') if 'deleted' in snmp: return None # start SNMP daemon call(f'systemctl reload-or-restart {systemd_service}') # Enable AgentX in FRR # This should be done for each daemon individually because common command # works only if all the daemons started with SNMP support # Following daemons from FRR 9.0/stable have SNMP module compiled in VyOS - frr_daemons_list = ['zebra', 'bgpd', 'ospf6d', 'ospfd', 'ripd', 'isisd', 'ldpd'] + frr_daemons_list = [frr.zebra_daemon, frr.bgp_daemon, frr.ospf_daemon, frr.ospf6_daemon, + frr.rip_daemon, frr.isis_daemon, frr.ldpd_daemon] for frr_daemon in frr_daemons_list: call(f'vtysh -c "configure terminal" -d {frr_daemon} -c "agentx" >/dev/null') return None if __name__ == '__main__': try: c = get_config() verify(c) generate(c) apply(c) except ConfigError as e: print(e) exit(1) diff --git a/src/conf_mode/system_ip.py b/src/conf_mode/system_ip.py index c8a91fd2f..5afb57404 100755 --- a/src/conf_mode/system_ip.py +++ b/src/conf_mode/system_ip.py @@ -1,146 +1,145 @@ #!/usr/bin/env python3 # # Copyright (C) 2019-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from sys import exit from vyos.config import Config from vyos.configdict import dict_merge from vyos.configverify import verify_route_map from vyos.template import render_to_string from vyos.utils.dict import dict_search from vyos.utils.file import write_file from vyos.utils.process import is_systemd_service_active from vyos.utils.system import sysctl_write from vyos.configdep import set_dependents from vyos.configdep import call_dependents from vyos import ConfigError from vyos import frr from vyos import airbag airbag.enable() def get_config(config=None): if config: conf = config else: conf = Config() base = ['system', 'ip'] opt = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True, with_recursive_defaults=True) # When working with FRR we need to know the corresponding address-family opt['afi'] = 'ip' # We also need the route-map information from the config # # XXX: one MUST always call this without the key_mangling() option! See # vyos.configverify.verify_common_route_maps() for more information. tmp = {'policy' : {'route-map' : conf.get_config_dict(['policy', 'route-map'], get_first_key=True)}} # Merge policy dict into "regular" config dict opt = dict_merge(tmp, opt) # If IPv4 ARP table size is set here and also manually in sysctl, the more # fine grained value from sysctl must win set_dependents('sysctl', conf) return opt def verify(opt): if 'protocol' in opt: for protocol, protocol_options in opt['protocol'].items(): if 'route_map' in protocol_options: verify_route_map(protocol_options['route_map'], opt) return def generate(opt): opt['frr_zebra_config'] = render_to_string('frr/zebra.route-map.frr.j2', opt) return def apply(opt): # Apply ARP threshold values # table_size has a default value - thus the key always exists size = int(dict_search('arp.table_size', opt)) # Amount upon reaching which the records begin to be cleared immediately sysctl_write('net.ipv4.neigh.default.gc_thresh3', size) # Amount after which the records begin to be cleaned after 5 seconds sysctl_write('net.ipv4.neigh.default.gc_thresh2', size // 2) # Minimum number of stored records is indicated which is not cleared sysctl_write('net.ipv4.neigh.default.gc_thresh1', size // 8) # enable/disable IPv4 forwarding tmp = dict_search('disable_forwarding', opt) value = '0' if (tmp != None) else '1' write_file('/proc/sys/net/ipv4/conf/all/forwarding', value) # configure multipath tmp = dict_search('multipath.ignore_unreachable_nexthops', opt) value = '1' if (tmp != None) else '0' sysctl_write('net.ipv4.fib_multipath_use_neigh', value) tmp = dict_search('multipath.layer4_hashing', opt) value = '1' if (tmp != None) else '0' sysctl_write('net.ipv4.fib_multipath_hash_policy', value) # configure TCP options (defaults as of Linux 6.4) tmp = dict_search('tcp.mss.probing', opt) if tmp is None: value = 0 elif tmp == 'on-icmp-black-hole': value = 1 elif tmp == 'force': value = 2 else: # Shouldn't happen raise ValueError("TCP MSS probing is neither 'on-icmp-black-hole' nor 'force'!") sysctl_write('net.ipv4.tcp_mtu_probing', value) tmp = dict_search('tcp.mss.base', opt) value = '1024' if (tmp is None) else tmp sysctl_write('net.ipv4.tcp_base_mss', value) tmp = dict_search('tcp.mss.floor', opt) value = '48' if (tmp is None) else tmp sysctl_write('net.ipv4.tcp_mtu_probe_floor', value) # During startup of vyos-router that brings up FRR, the service is not yet # running when this script is called first. Skip this part and wait for initial # commit of the configuration to trigger this statement if is_systemd_service_active('frr.service'): - zebra_daemon = 'zebra' # Save original configuration prior to starting any commit actions frr_cfg = frr.FRRConfig() # The route-map used for the FIB (zebra) is part of the zebra daemon - frr_cfg.load_configuration(zebra_daemon) + frr_cfg.load_configuration(frr.mgmt_daemon) frr_cfg.modify_section(r'no ip nht resolve-via-default') frr_cfg.modify_section(r'ip protocol \w+ route-map [-a-zA-Z0-9.]+', stop_pattern='(\s|!)') if 'frr_zebra_config' in opt: frr_cfg.add_before(frr.default_add_before, opt['frr_zebra_config']) - frr_cfg.commit_configuration(zebra_daemon) + frr_cfg.commit_configuration() call_dependents() if __name__ == '__main__': try: c = get_config() verify(c) generate(c) apply(c) except ConfigError as e: print(e) exit(1) diff --git a/src/conf_mode/system_ipv6.py b/src/conf_mode/system_ipv6.py index a2442d009..90d5100d7 100755 --- a/src/conf_mode/system_ipv6.py +++ b/src/conf_mode/system_ipv6.py @@ -1,130 +1,127 @@ #!/usr/bin/env python3 # -# Copyright (C) 2019-2023 VyOS maintainers and contributors +# Copyright (C) 2019-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import os from sys import exit from vyos.config import Config from vyos.configdict import dict_merge from vyos.configverify import verify_route_map from vyos.template import render_to_string from vyos.utils.dict import dict_search from vyos.utils.file import write_file from vyos.utils.process import is_systemd_service_active from vyos.utils.system import sysctl_write from vyos.configdep import set_dependents from vyos.configdep import call_dependents from vyos import ConfigError from vyos import frr from vyos import airbag airbag.enable() def get_config(config=None): if config: conf = config else: conf = Config() base = ['system', 'ipv6'] opt = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True, with_recursive_defaults=True) # When working with FRR we need to know the corresponding address-family opt['afi'] = 'ipv6' # We also need the route-map information from the config # # XXX: one MUST always call this without the key_mangling() option! See # vyos.configverify.verify_common_route_maps() for more information. tmp = {'policy' : {'route-map' : conf.get_config_dict(['policy', 'route-map'], get_first_key=True)}} # Merge policy dict into "regular" config dict opt = dict_merge(tmp, opt) # If IPv6 neighbor table size is set here and also manually in sysctl, the more # fine grained value from sysctl must win set_dependents('sysctl', conf) return opt def verify(opt): if 'protocol' in opt: for protocol, protocol_options in opt['protocol'].items(): if 'route_map' in protocol_options: verify_route_map(protocol_options['route_map'], opt) return def generate(opt): opt['frr_zebra_config'] = render_to_string('frr/zebra.route-map.frr.j2', opt) return def apply(opt): # configure multipath tmp = dict_search('multipath.layer4_hashing', opt) value = '1' if (tmp != None) else '0' sysctl_write('net.ipv6.fib_multipath_hash_policy', value) # Apply ND threshold values # table_size has a default value - thus the key always exists size = int(dict_search('neighbor.table_size', opt)) # Amount upon reaching which the records begin to be cleared immediately sysctl_write('net.ipv6.neigh.default.gc_thresh3', size) # Amount after which the records begin to be cleaned after 5 seconds sysctl_write('net.ipv6.neigh.default.gc_thresh2', size // 2) # Minimum number of stored records is indicated which is not cleared sysctl_write('net.ipv6.neigh.default.gc_thresh1', size // 8) # enable/disable IPv6 forwarding tmp = dict_search('disable_forwarding', opt) value = '0' if (tmp != None) else '1' write_file('/proc/sys/net/ipv6/conf/all/forwarding', value) # configure IPv6 strict-dad tmp = dict_search('strict_dad', opt) value = '2' if (tmp != None) else '1' for root, dirs, files in os.walk('/proc/sys/net/ipv6/conf'): for name in files: if name == 'accept_dad': write_file(os.path.join(root, name), value) # During startup of vyos-router that brings up FRR, the service is not yet # running when this script is called first. Skip this part and wait for initial # commit of the configuration to trigger this statement if is_systemd_service_active('frr.service'): - zebra_daemon = 'zebra' # Save original configuration prior to starting any commit actions frr_cfg = frr.FRRConfig() - - # The route-map used for the FIB (zebra) is part of the zebra daemon - frr_cfg.load_configuration(zebra_daemon) + frr_cfg.load_configuration(frr.mgmt_daemon) frr_cfg.modify_section(r'no ipv6 nht resolve-via-default') frr_cfg.modify_section(r'ipv6 protocol \w+ route-map [-a-zA-Z0-9.]+', stop_pattern='(\s|!)') if 'frr_zebra_config' in opt: frr_cfg.add_before(frr.default_add_before, opt['frr_zebra_config']) - frr_cfg.commit_configuration(zebra_daemon) + frr_cfg.commit_configuration() call_dependents() if __name__ == '__main__': try: c = get_config() verify(c) generate(c) apply(c) except ConfigError as e: print(e) exit(1) diff --git a/src/conf_mode/vrf.py b/src/conf_mode/vrf.py index 72b178c89..6d17c192c 100755 --- a/src/conf_mode/vrf.py +++ b/src/conf_mode/vrf.py @@ -1,364 +1,360 @@ #!/usr/bin/env python3 # # Copyright (C) 2020-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from sys import exit from jmespath import search from json import loads from vyos.config import Config from vyos.configdict import dict_merge from vyos.configdict import node_changed from vyos.configverify import verify_route_map from vyos.firewall import conntrack_required from vyos.ifconfig import Interface from vyos.template import render from vyos.template import render_to_string from vyos.utils.dict import dict_search from vyos.utils.network import get_vrf_tableid from vyos.utils.network import get_vrf_members from vyos.utils.network import interface_exists from vyos.utils.process import call from vyos.utils.process import cmd from vyos.utils.process import popen from vyos.utils.system import sysctl_write from vyos import ConfigError from vyos import frr from vyos import airbag airbag.enable() config_file = '/etc/iproute2/rt_tables.d/vyos-vrf.conf' k_mod = ['vrf'] nftables_table = 'inet vrf_zones' nftables_rules = { 'vrf_zones_ct_in': 'counter ct original zone set iifname map @ct_iface_map', 'vrf_zones_ct_out': 'counter ct original zone set oifname map @ct_iface_map' } def has_rule(af : str, priority : int, table : str=None): """ Check if a given ip rule exists $ ip --json -4 rule show [{'l3mdev': None, 'priority': 1000, 'src': 'all'}, {'action': 'unreachable', 'l3mdev': None, 'priority': 2000, 'src': 'all'}, {'priority': 32765, 'src': 'all', 'table': 'local'}, {'priority': 32766, 'src': 'all', 'table': 'main'}, {'priority': 32767, 'src': 'all', 'table': 'default'}] """ if af not in ['-4', '-6']: raise ValueError() command = f'ip --detail --json {af} rule show' for tmp in loads(cmd(command)): if 'priority' in tmp and 'table' in tmp: if tmp['priority'] == priority and tmp['table'] == table: return True elif 'priority' in tmp and table in tmp: # l3mdev table has a different layout if tmp['priority'] == priority: return True return False def is_nft_vrf_zone_rule_setup() -> bool: """ Check if an nftables connection tracking rule already exists """ tmp = loads(cmd('sudo nft -j list table inet vrf_zones')) num_rules = len(search("nftables[].rule[].chain", tmp)) return bool(num_rules) def vrf_interfaces(c, match): matched = [] old_level = c.get_level() c.set_level(['interfaces']) section = c.get_config_dict([], get_first_key=True) for type in section: interfaces = section[type] for name in interfaces: interface = interfaces[name] if 'vrf' in interface: v = interface.get('vrf', '') if v == match: matched.append(name) c.set_level(old_level) return matched def vrf_routing(c, match): matched = [] old_level = c.get_level() c.set_level(['protocols', 'vrf']) if match in c.list_nodes([]): matched.append(match) c.set_level(old_level) return matched def get_config(config=None): if config: conf = config else: conf = Config() base = ['vrf'] vrf = conf.get_config_dict(base, key_mangling=('-', '_'), no_tag_node_value_mangle=True, get_first_key=True) # determine which VRF has been removed for name in node_changed(conf, base + ['name']): if 'vrf_remove' not in vrf: vrf.update({'vrf_remove' : {}}) vrf['vrf_remove'][name] = {} # get VRF bound interfaces interfaces = vrf_interfaces(conf, name) if interfaces: vrf['vrf_remove'][name]['interface'] = interfaces # get VRF bound routing instances routes = vrf_routing(conf, name) if routes: vrf['vrf_remove'][name]['route'] = routes if 'name' in vrf: vrf['conntrack'] = conntrack_required(conf) # We also need the route-map information from the config # # XXX: one MUST always call this without the key_mangling() option! See # vyos.configverify.verify_common_route_maps() for more information. tmp = {'policy' : {'route-map' : conf.get_config_dict(['policy', 'route-map'], get_first_key=True)}} # Merge policy dict into "regular" config dict vrf = dict_merge(tmp, vrf) return vrf def verify(vrf): # ensure VRF is not assigned to any interface if 'vrf_remove' in vrf: for name, config in vrf['vrf_remove'].items(): if 'interface' in config: raise ConfigError(f'Can not remove VRF "{name}", it still has '\ f'member interfaces!') if 'route' in config: raise ConfigError(f'Can not remove VRF "{name}", it still has '\ f'static routes installed!') if 'name' in vrf: reserved_names = ["add", "all", "broadcast", "default", "delete", "dev", "get", "inet", "mtu", "link", "type", "vrf"] table_ids = [] for name, vrf_config in vrf['name'].items(): # Reserved VRF names if name in reserved_names: raise ConfigError(f'VRF name "{name}" is reserved and connot be used!') # table id is mandatory if 'table' not in vrf_config: raise ConfigError(f'VRF "{name}" table id is mandatory!') # routing table id can't be changed - OS restriction if interface_exists(name): tmp = get_vrf_tableid(name) if tmp and tmp != int(vrf_config['table']): raise ConfigError(f'VRF "{name}" table id modification not possible!') # VRF routing table ID must be unique on the system if 'table' in vrf_config and vrf_config['table'] in table_ids: raise ConfigError(f'VRF "{name}" table id is not unique!') table_ids.append(vrf_config['table']) tmp = dict_search('ip.protocol', vrf_config) if tmp != None: for protocol, protocol_options in tmp.items(): if 'route_map' in protocol_options: verify_route_map(protocol_options['route_map'], vrf) tmp = dict_search('ipv6.protocol', vrf_config) if tmp != None: for protocol, protocol_options in tmp.items(): if 'route_map' in protocol_options: verify_route_map(protocol_options['route_map'], vrf) return None def generate(vrf): # Render iproute2 VR helper names render(config_file, 'iproute2/vrf.conf.j2', vrf) # Render VRF Kernel/Zebra route-map filters vrf['frr_zebra_config'] = render_to_string('frr/zebra.vrf.route-map.frr.j2', vrf) return None def apply(vrf): # Documentation # # - https://github.com/torvalds/linux/blob/master/Documentation/networking/vrf.txt # - https://github.com/Mellanox/mlxsw/wiki/Virtual-Routing-and-Forwarding-(VRF) # - https://github.com/Mellanox/mlxsw/wiki/L3-Tunneling # - https://netdevconf.info/1.1/proceedings/slides/ahern-vrf-tutorial.pdf # - https://netdevconf.info/1.2/slides/oct6/02_ahern_what_is_l3mdev_slides.pdf # set the default VRF global behaviour bind_all = '0' if 'bind_to_all' in vrf: bind_all = '1' sysctl_write('net.ipv4.tcp_l3mdev_accept', bind_all) sysctl_write('net.ipv4.udp_l3mdev_accept', bind_all) for tmp in (dict_search('vrf_remove', vrf) or []): if interface_exists(tmp): # T5492: deleting a VRF instance may leafe processes running # (e.g. dhclient) as there is a depedency ordering issue in the CLI. # We need to ensure that we stop the dhclient processes first so # a proper DHCLP RELEASE message is sent for interface in get_vrf_members(tmp): vrf_iface = Interface(interface) vrf_iface.set_dhcp(False) vrf_iface.set_dhcpv6(False) # Remove nftables conntrack zone map item nft_del_element = f'delete element inet vrf_zones ct_iface_map {{ "{tmp}" }}' # Check if deleting is possible first to avoid raising errors _, err = popen(f'nft --check {nft_del_element}') if not err: # Remove map element cmd(f'nft {nft_del_element}') # Delete the VRF Kernel interface call(f'ip link delete dev {tmp}') if 'name' in vrf: # Linux routing uses rules to find tables - routing targets are then # looked up in those tables. If the lookup got a matching route, the # process ends. # # TL;DR; first table with a matching entry wins! # # You can see your routing table lookup rules using "ip rule", sadly the # local lookup is hit before any VRF lookup. Pinging an addresses from the # VRF will usually find a hit in the local table, and never reach the VRF # routing table - this is usually not what you want. Thus we will # re-arrange the tables and move the local lookup further down once VRFs # are enabled. # # Thanks to https://stbuehler.de/blog/article/2020/02/29/using_vrf__virtual_routing_and_forwarding__on_linux.html for afi in ['-4', '-6']: # move lookup local to pref 32765 (from 0) if not has_rule(afi, 32765, 'local'): call(f'ip {afi} rule add pref 32765 table local') if has_rule(afi, 0, 'local'): call(f'ip {afi} rule del pref 0') # make sure that in VRFs after failed lookup in the VRF specific table # nothing else is reached if not has_rule(afi, 1000, 'l3mdev'): # this should be added by the kernel when a VRF is created # add it here for completeness call(f'ip {afi} rule add pref 1000 l3mdev protocol kernel') # add another rule with an unreachable target which only triggers in VRF context # if a route could not be reached if not has_rule(afi, 2000, 'l3mdev'): call(f'ip {afi} rule add pref 2000 l3mdev unreachable') nft_vrf_zone_rule_setup = False for name, config in vrf['name'].items(): table = config['table'] if not interface_exists(name): # For each VRF apart from your default context create a VRF # interface with a separate routing table call(f'ip link add {name} type vrf table {table}') # set VRF description for e.g. SNMP monitoring vrf_if = Interface(name) # We also should add proper loopback IP addresses to the newly added # VRF for services bound to the loopback address (SNMP, NTP) vrf_if.add_addr('127.0.0.1/8') vrf_if.add_addr('::1/128') # add VRF description if available vrf_if.set_alias(config.get('description', '')) # Enable/Disable IPv4 forwarding tmp = dict_search('ip.disable_forwarding', config) value = '0' if (tmp != None) else '1' vrf_if.set_ipv4_forwarding(value) # Enable/Disable IPv6 forwarding tmp = dict_search('ipv6.disable_forwarding', config) value = '0' if (tmp != None) else '1' vrf_if.set_ipv6_forwarding(value) # Enable/Disable of an interface must always be done at the end of the # derived class to make use of the ref-counting set_admin_state() # function. We will only enable the interface if 'up' was called as # often as 'down'. This is required by some interface implementations # as certain parameters can only be changed when the interface is # in admin-down state. This ensures the link does not flap during # reconfiguration. state = 'down' if 'disable' in config else 'up' vrf_if.set_admin_state(state) # Add nftables conntrack zone map item nft_add_element = f'add element inet vrf_zones ct_iface_map {{ "{name}" : {table} }}' cmd(f'nft {nft_add_element}') # Only call into nftables as long as there is nothing setup to avoid wasting # CPU time and thus lenghten the commit process if not nft_vrf_zone_rule_setup: nft_vrf_zone_rule_setup = is_nft_vrf_zone_rule_setup() # Install nftables conntrack rules only once if vrf['conntrack'] and not nft_vrf_zone_rule_setup: for chain, rule in nftables_rules.items(): cmd(f'nft add rule inet vrf_zones {chain} {rule}') if 'name' not in vrf or not vrf['conntrack']: for chain, rule in nftables_rules.items(): cmd(f'nft flush chain inet vrf_zones {chain}') # Return default ip rule values if 'name' not in vrf: for afi in ['-4', '-6']: # move lookup local to pref 0 (from 32765) if not has_rule(afi, 0, 'local'): call(f'ip {afi} rule add pref 0 from all lookup local') if has_rule(afi, 32765, 'local'): call(f'ip {afi} rule del pref 32765 table local') if has_rule(afi, 1000, 'l3mdev'): call(f'ip {afi} rule del pref 1000 l3mdev protocol kernel') if has_rule(afi, 2000, 'l3mdev'): call(f'ip {afi} rule del pref 2000 l3mdev unreachable') - # Apply FRR filters - zebra_daemon = 'zebra' # Save original configuration prior to starting any commit actions frr_cfg = frr.FRRConfig() - - # The route-map used for the FIB (zebra) is part of the zebra daemon - frr_cfg.load_configuration(zebra_daemon) + frr_cfg.load_configuration(frr.mgmt_daemon) frr_cfg.modify_section(f'^vrf .+', stop_pattern='^exit-vrf', remove_stop_mark=True) if 'frr_zebra_config' in vrf: frr_cfg.add_before(frr.default_add_before, vrf['frr_zebra_config']) - frr_cfg.commit_configuration(zebra_daemon) + frr_cfg.commit_configuration() return None if __name__ == '__main__': try: c = get_config() verify(c) generate(c) apply(c) except ConfigError as e: print(e) exit(1)