diff --git a/data/templates/frr/bgpd.frr.j2 b/data/templates/frr/bgpd.frr.j2 index 71716250d..51a3f2564 100644 --- a/data/templates/frr/bgpd.frr.j2 +++ b/data/templates/frr/bgpd.frr.j2 @@ -1,668 +1,674 @@ {### MACRO definition for recurring peer patter, this can be either fed by a ###} {### peer-group or an individual BGP neighbor ###} {% macro bgp_neighbor(neighbor, config, peer_group=false) %} +{# BGP order of peer-group and remote-as placement must be honored #} {% if peer_group == true %} neighbor {{ neighbor }} peer-group -{% elif config.peer_group is vyos_defined %} - neighbor {{ neighbor }} peer-group {{ config.peer_group }} -{% endif %} -{% if config.remote_as is vyos_defined %} +{% if config.remote_as is vyos_defined %} + neighbor {{ neighbor }} remote-as {{ config.remote_as }} +{% endif %} +{% else %} +{% if config.remote_as is vyos_defined %} neighbor {{ neighbor }} remote-as {{ config.remote_as }} +{% endif %} +{% if config.peer_group is vyos_defined %} + neighbor {{ neighbor }} peer-group {{ config.peer_group }} +{% endif %} {% endif %} {% if config.local_role is vyos_defined %} {% for role, strict in config.local_role.items() %} neighbor {{ neighbor }} local-role {{ role }} {{ 'strict-mode' if strict }} {% endfor %} {% endif %} {% if config.interface.remote_as is vyos_defined %} neighbor {{ neighbor }} interface remote-as {{ config.interface.remote_as }} {% endif %} {% if config.advertisement_interval is vyos_defined %} neighbor {{ neighbor }} advertisement-interval {{ config.advertisement_interval }} {% endif %} {% if config.bfd is vyos_defined %} neighbor {{ neighbor }} bfd {% if config.bfd.check_control_plane_failure is vyos_defined %} neighbor {{ neighbor }} bfd check-control-plane-failure {% endif %} {% if config.bfd.profile is vyos_defined %} neighbor {{ neighbor }} bfd profile {{ config.bfd.profile }} {% endif %} {% endif %} {% if config.capability.dynamic is vyos_defined %} neighbor {{ neighbor }} capability dynamic {% endif %} {% if config.capability.extended_nexthop is vyos_defined %} neighbor {{ neighbor }} capability extended-nexthop {% endif %} {% if config.capability.software_version is vyos_defined %} neighbor {{ neighbor }} capability software-version {% endif %} {% if config.description is vyos_defined %} neighbor {{ neighbor }} description {{ config.description }} {% endif %} {% if config.disable_capability_negotiation is vyos_defined %} neighbor {{ neighbor }} dont-capability-negotiate {% endif %} {% if config.disable_connected_check is vyos_defined %} neighbor {{ neighbor }} disable-connected-check {% endif %} {% if config.ebgp_multihop is vyos_defined %} neighbor {{ neighbor }} ebgp-multihop {{ config.ebgp_multihop }} {% endif %} {% if config.graceful_restart is vyos_defined %} {% if config.graceful_restart is vyos_defined('enable') %} {% set graceful_restart = 'graceful-restart' %} {% elif config.graceful_restart is vyos_defined('disable') %} {% set graceful_restart = 'graceful-restart-disable' %} {% elif config.graceful_restart is vyos_defined('restart-helper') %} {% set graceful_restart = 'graceful-restart-helper' %} {% endif %} neighbor {{ neighbor }} {{ graceful_restart }} {% endif %} {% if config.local_as is vyos_defined %} {% for local_as, local_as_config in config.local_as.items() %} {# There can be only one local-as value, this is checked in the Python code #} neighbor {{ neighbor }} local-as {{ local_as }} {{ 'no-prepend' if local_as_config.no_prepend is vyos_defined }} {{ 'replace-as' if local_as_config.no_prepend is vyos_defined and local_as_config.no_prepend.replace_as is vyos_defined }} {% endfor %} {% endif %} {% if config.override_capability is vyos_defined %} neighbor {{ neighbor }} override-capability {% endif %} {% if config.passive is vyos_defined %} neighbor {{ neighbor }} passive {% endif %} {% if config.password is vyos_defined %} neighbor {{ neighbor }} password {{ config.password }} {% endif %} {% if config.path_attribute.discard is vyos_defined %} neighbor {{ neighbor }} path-attribute discard {{ config.path_attribute.discard | join(' ') }} {% endif %} {% if config.path_attribute.treat_as_withdraw is vyos_defined %} neighbor {{ neighbor }} path-attribute treat-as-withdraw {{ config.path_attribute.treat_as_withdraw }} {% endif %} {% if config.port is vyos_defined %} neighbor {{ neighbor }} port {{ config.port }} {% endif %} {% if config.shutdown is vyos_defined %} neighbor {{ neighbor }} shutdown {% endif %} {% if config.solo is vyos_defined %} neighbor {{ neighbor }} solo {% endif %} {% if config.enforce_first_as is vyos_defined %} neighbor {{ neighbor }} enforce-first-as {% endif %} {% if config.strict_capability_match is vyos_defined %} neighbor {{ neighbor }} strict-capability-match {% endif %} {% if config.ttl_security.hops is vyos_defined %} neighbor {{ neighbor }} ttl-security hops {{ config.ttl_security.hops }} {% endif %} {% if config.timers.connect is vyos_defined %} neighbor {{ neighbor }} timers connect {{ config.timers.connect }} {% endif %} {% if config.timers.keepalive is vyos_defined and config.timers.holdtime is vyos_defined %} neighbor {{ neighbor }} timers {{ config.timers.keepalive }} {{ config.timers.holdtime }} {% endif %} {% if config.update_source is vyos_defined %} neighbor {{ neighbor }} update-source {{ config.update_source }} {% endif %} {% if config.interface is vyos_defined %} {% if config.interface.peer_group is vyos_defined %} neighbor {{ neighbor }} interface peer-group {{ config.interface.peer_group }} {% endif %} {% if config.interface.source_interface is vyos_defined %} neighbor {{ neighbor }} interface {{ config.interface.source_interface }} {% endif %} {% if config.interface.v6only is vyos_defined %} {% if config.interface.v6only.peer_group is vyos_defined %} neighbor {{ neighbor }} interface v6only peer-group {{ config.interface.v6only.peer_group }} {% endif %} {% if config.interface.v6only.remote_as is vyos_defined %} neighbor {{ neighbor }} interface v6only remote-as {{ config.interface.v6only.remote_as }} {% endif %} {% endif %} {% endif %} ! {% if config.address_family is vyos_defined %} {% for afi, afi_config in config.address_family.items() %} {% if afi == 'ipv4_unicast' %} address-family ipv4 unicast {% elif afi == 'ipv4_multicast' %} address-family ipv4 multicast {% elif afi == 'ipv4_labeled_unicast' %} address-family ipv4 labeled-unicast {% elif afi == 'ipv4_vpn' %} address-family ipv4 vpn {% elif afi == 'ipv4_flowspec' %} address-family ipv4 flowspec {% elif afi == 'ipv6_unicast' %} address-family ipv6 unicast {% elif afi == 'ipv6_multicast' %} address-family ipv6 multicast {% elif afi == 'ipv6_labeled_unicast' %} address-family ipv6 labeled-unicast {% elif afi == 'ipv6_vpn' %} address-family ipv6 vpn {% elif afi == 'ipv6_flowspec' %} address-family ipv6 flowspec {% elif afi == 'l2vpn_evpn' %} address-family l2vpn evpn {% endif %} {% if afi_config.addpath_tx_all is vyos_defined %} neighbor {{ neighbor }} addpath-tx-all-paths {% endif %} {% if afi_config.addpath_tx_per_as is vyos_defined %} neighbor {{ neighbor }} addpath-tx-bestpath-per-AS {% endif %} {% if afi_config.allowas_in is vyos_defined %} neighbor {{ neighbor }} allowas-in {{ afi_config.allowas_in.number if afi_config.allowas_in.number is vyos_defined }} {% endif %} {% if afi_config.as_override is vyos_defined %} neighbor {{ neighbor }} as-override {% endif %} {% if afi_config.conditionally_advertise is vyos_defined %} {% if afi_config.conditionally_advertise.advertise_map is vyos_defined %} {% set exist_non_exist_map = 'exist-map' %} {% if afi_config.conditionally_advertise.exist_map is vyos_defined %} {% set exist_non_exist_map = 'exist-map ' ~ afi_config.conditionally_advertise.exist_map %} {% elif afi_config.conditionally_advertise.non_exist_map is vyos_defined %} {% set exist_non_exist_map = 'non-exist-map ' ~ afi_config.conditionally_advertise.non_exist_map %} {% endif %} neighbor {{ neighbor }} advertise-map {{ afi_config.conditionally_advertise.advertise_map }} {{ exist_non_exist_map }} {% endif %} {% endif %} {% if afi_config.remove_private_as is vyos_defined %} neighbor {{ neighbor }} remove-private-AS {{ 'all' if afi_config.remove_private_as.all is vyos_defined }} {% endif %} {% if afi_config.route_reflector_client is vyos_defined %} neighbor {{ neighbor }} route-reflector-client {% endif %} {% if afi_config.weight is vyos_defined %} neighbor {{ neighbor }} weight {{ afi_config.weight }} {% endif %} {% if afi_config.attribute_unchanged is vyos_defined %} neighbor {{ neighbor }} attribute-unchanged {{ 'as-path ' if afi_config.attribute_unchanged.as_path is vyos_defined }}{{ 'med ' if afi_config.attribute_unchanged.med is vyos_defined }}{{ 'next-hop ' if afi_config.attribute_unchanged.next_hop is vyos_defined }} {% endif %} {% if afi_config.capability.orf.prefix_list.send is vyos_defined %} neighbor {{ neighbor }} capability orf prefix-list send {% endif %} {% if afi_config.capability.orf.prefix_list.receive is vyos_defined %} neighbor {{ neighbor }} capability orf prefix-list receive {% endif %} {% if afi_config.default_originate is vyos_defined %} neighbor {{ neighbor }} default-originate {{ 'route-map ' ~ afi_config.default_originate.route_map if afi_config.default_originate.route_map is vyos_defined }} {% endif %} {% if afi_config.distribute_list.export is vyos_defined %} neighbor {{ neighbor }} distribute-list {{ afi_config.distribute_list.export }} out {% endif %} {% if afi_config.distribute_list.import is vyos_defined %} neighbor {{ neighbor }} distribute-list {{ afi_config.distribute_list.import }} in {% endif %} {% if afi_config.filter_list.export is vyos_defined %} neighbor {{ neighbor }} filter-list {{ afi_config.filter_list.export }} out {% endif %} {% if afi_config.filter_list.import is vyos_defined %} neighbor {{ neighbor }} filter-list {{ afi_config.filter_list.import }} in {% endif %} {% if afi_config.maximum_prefix is vyos_defined %} neighbor {{ neighbor }} maximum-prefix {{ afi_config.maximum_prefix }} {% endif %} {% if afi_config.maximum_prefix_out is vyos_defined %} neighbor {{ neighbor }} maximum-prefix-out {{ afi_config.maximum_prefix_out }} {% endif %} {% if afi_config.nexthop_self is vyos_defined %} neighbor {{ neighbor }} next-hop-self {{ 'force' if afi_config.nexthop_self.force is vyos_defined }} {% endif %} {% if afi_config.route_server_client is vyos_defined %} neighbor {{ neighbor }} route-server-client {% endif %} {% if afi_config.route_map.export is vyos_defined %} neighbor {{ neighbor }} route-map {{ afi_config.route_map.export }} out {% endif %} {% if afi_config.route_map.import is vyos_defined %} neighbor {{ neighbor }} route-map {{ afi_config.route_map.import }} in {% endif %} {% if afi_config.prefix_list.export is vyos_defined %} neighbor {{ neighbor }} prefix-list {{ afi_config.prefix_list.export }} out {% endif %} {% if afi_config.prefix_list.import is vyos_defined %} neighbor {{ neighbor }} prefix-list {{ afi_config.prefix_list.import }} in {% endif %} {% if afi_config.soft_reconfiguration.inbound is vyos_defined %} neighbor {{ neighbor }} soft-reconfiguration inbound {% endif %} {% if afi_config.unsuppress_map is vyos_defined %} neighbor {{ neighbor }} unsuppress-map {{ afi_config.unsuppress_map }} {% endif %} {% if afi_config.disable_send_community.extended is vyos_defined %} no neighbor {{ neighbor }} send-community extended {% endif %} {% if afi_config.disable_send_community.standard is vyos_defined %} no neighbor {{ neighbor }} send-community standard {% endif %} neighbor {{ neighbor }} activate exit-address-family ! {# j2lint: disable=jinja-statements-delimeter #} {% endfor %} {% endif %} {# j2lint: disable=jinja-statements-delimeter #} {%- endmacro -%} ! router bgp {{ system_as }} {{ 'vrf ' ~ vrf if vrf is vyos_defined }} {% if parameters.ebgp_requires_policy is vyos_defined %} bgp ebgp-requires-policy {% else %} no bgp ebgp-requires-policy {% endif %} {# Option must be set before any neighbor - see https://vyos.dev/T3463 #} no bgp default ipv4-unicast {# Workaround for T2100 until we have decided about a migration script #} no bgp network import-check {% if address_family is vyos_defined %} {% for afi, afi_config in address_family.items() %} ! {% if afi == 'ipv4_unicast' %} address-family ipv4 unicast {% elif afi == 'ipv4_multicast' %} address-family ipv4 multicast {% elif afi == 'ipv4_labeled_unicast' %} address-family ipv4 labeled-unicast {% elif afi == 'ipv4_vpn' %} address-family ipv4 vpn {% elif afi == 'ipv4_flowspec' %} address-family ipv4 flowspec {% elif afi == 'ipv6_unicast' %} address-family ipv6 unicast {% elif afi == 'ipv6_multicast' %} address-family ipv6 multicast {% elif afi == 'ipv6_labeled_unicast' %} address-family ipv6 labeled-unicast {% elif afi == 'ipv6_vpn' %} address-family ipv6 vpn {% elif afi == 'ipv6_flowspec' %} address-family ipv6 flowspec {% elif afi == 'l2vpn_evpn' %} address-family l2vpn evpn {% if afi_config.rd is vyos_defined %} rd {{ afi_config.rd }} {% endif %} {% endif %} {% if afi_config.aggregate_address is vyos_defined %} {% for aggregate, aggregate_config in afi_config.aggregate_address.items() %} aggregate-address {{ aggregate }} {{ 'as-set' if aggregate_config.as_set is vyos_defined }} {{ 'summary-only' if aggregate_config.summary_only is vyos_defined }} {{ 'route-map ' ~ aggregate_config.route_map if aggregate_config.route_map is vyos_defined }} {% endfor %} {% endif %} {% if afi_config.maximum_paths.ebgp is vyos_defined %} maximum-paths {{ afi_config.maximum_paths.ebgp }} {% endif %} {% if afi_config.maximum_paths.ibgp is vyos_defined %} maximum-paths ibgp {{ afi_config.maximum_paths.ibgp }} {% endif %} {% if afi_config.redistribute is vyos_defined %} {% for protocol, protocol_config in afi_config.redistribute.items() %} {% if protocol == 'table' %} redistribute table {{ protocol_config.table }} {% else %} {% set redistribution_protocol = protocol %} {% if protocol == 'ospfv3' %} {% set redistribution_protocol = 'ospf6' %} {% endif %} redistribute {{ redistribution_protocol }} {{ 'metric ' ~ protocol_config.metric if protocol_config.metric is vyos_defined }} {{ 'route-map ' ~ protocol_config.route_map if protocol_config.route_map is vyos_defined }} {####### we need this blank line!! #######} {% endif %} {% endfor %} {% endif %} {% if afi_config.network is vyos_defined %} {% for network, network_config in afi_config.network.items() %} network {{ network }} {{ 'route-map ' ~ network_config.route_map if network_config.route_map is vyos_defined }} {{ 'backdoor' if network_config.backdoor is vyos_defined }} {{ 'rd ' ~ network_config.rd if network_config.rd is vyos_defined }} {{ 'label ' ~ network_config.label if network_config.label is vyos_defined }} {####### we need this blank line!! #######} {% endfor %} {% endif %} {% if afi_config.advertise is vyos_defined %} {% for adv_afi, adv_afi_config in afi_config.advertise.items() %} {% if adv_afi_config.unicast is vyos_defined %} advertise {{ adv_afi }} unicast {{ 'route-map ' ~ adv_afi_config.unicast.route_map if adv_afi_config.unicast.route_map is vyos_defined }} {% endif %} {% endfor %} {% endif %} {% if afi_config.distance.external is vyos_defined and afi_config.distance.internal is vyos_defined and afi_config.distance.local is vyos_defined %} distance bgp {{ afi_config.distance.external }} {{ afi_config.distance.internal }} {{ afi_config.distance.local }} {% endif %} {% if afi_config.distance.prefix is vyos_defined %} {% for prefix in afi_config.distance.prefix %} distance {{ afi_config.distance.prefix[prefix].distance }} {{ prefix }} {% endfor %} {% endif %} {% if afi_config.export.vpn is vyos_defined %} export vpn {% endif %} {% if afi_config.import.vpn is vyos_defined %} import vpn {% endif %} {% if afi_config.import.vrf is vyos_defined %} {% for vrf in afi_config.import.vrf %} import vrf {{ vrf }} {% endfor %} {% endif %} {% if afi_config.label.vpn.export is vyos_defined %} label vpn export {{ afi_config.label.vpn.export }} {% endif %} {% if afi_config.label.vpn.allocation_mode.per_nexthop is vyos_defined %} label vpn export allocation-mode per-nexthop {% endif %} {% if afi_config.local_install is vyos_defined %} {% for interface in afi_config.local_install.interface %} local-install {{ interface }} {% endfor %} {% endif %} {% if afi_config.advertise_all_vni is vyos_defined %} advertise-all-vni {% endif %} {% if afi_config.advertise_default_gw is vyos_defined %} advertise-default-gw {% endif %} {% if afi_config.advertise_pip is vyos_defined %} advertise-pip ip {{ afi_config.advertise_pip }} {% endif %} {% if afi_config.advertise_svi_ip is vyos_defined %} advertise-svi-ip {% endif %} {% if afi_config.default_originate.ipv4 is vyos_defined %} default-originate ipv4 {% endif %} {% if afi_config.default_originate.ipv6 is vyos_defined %} default-originate ipv6 {% endif %} {% if afi_config.disable_ead_evi_rx is vyos_defined %} disable-ead-evi-rx {% endif %} {% if afi_config.disable_ead_evi_tx is vyos_defined %} disable-ead-evi-tx {% endif %} {% if afi_config.ead_es_frag.evi_limit is vyos_defined %} ead-es-frag evi-limit {{ afi_config.ead_es_frag.evi_limit }} {% endif %} {% if afi_config.ead_es_route_target.export is vyos_defined %} {% for route_target in afi_config.ead_es_route_target.export %} ead-es-route-target export {{ route_target }} {% endfor %} {% endif %} {% if afi_config.rt_auto_derive is vyos_defined %} autort rfc8365-compatible {% endif %} {% if afi_config.flooding.disable is vyos_defined %} flooding disable {% endif %} {% if afi_config.flooding.head_end_replication is vyos_defined %} flooding head-end-replication {% endif %} {% if afi_config.mac_vrf.soo is vyos_defined %} mac-vrf soo {{ afi_config.mac_vrf.soo }} {% endif %} {% if afi_config.nexthop.vpn.export is vyos_defined %} nexthop vpn export {{ afi_config.nexthop.vpn.export }} {% endif %} {% if afi_config.rd.vpn.export is vyos_defined %} rd vpn export {{ afi_config.rd.vpn.export }} {% endif %} {% if afi_config.route_target.vpn.both is vyos_defined %} route-target vpn both {{ afi_config.route_target.vpn.both }} {% else %} {% if afi_config.route_target.vpn.export is vyos_defined %} route-target vpn export {{ afi_config.route_target.vpn.export }} {% endif %} {% if afi_config.route_target.vpn.import is vyos_defined %} route-target vpn import {{ afi_config.route_target.vpn.import }} {% endif %} {% endif %} {% if afi_config.route_target.both is vyos_defined %} {% for route_target in afi_config.route_target.both %} route-target both {{ route_target }} {% endfor %} {% endif %} {% if afi_config.route_target.export is vyos_defined %} {% for route_target in afi_config.route_target.export %} route-target export {{ route_target }} {% endfor %} {% endif %} {% if afi_config.route_target.import is vyos_defined %} {% for route_target in afi_config.route_target.import %} route-target import {{ route_target }} {% endfor %} {% endif %} {% if afi_config.route_map.vpn.export is vyos_defined %} route-map vpn export {{ afi_config.route_map.vpn.export }} {% endif %} {% if afi_config.route_map.vpn.import is vyos_defined %} route-map vpn import {{ afi_config.route_map.vpn.import }} {% endif %} {% if afi_config.sid.vpn.export is vyos_defined %} sid vpn export {{ afi_config.sid.vpn.export }} {% endif %} {% if afi_config.vni is vyos_defined %} {% for vni, vni_config in afi_config.vni.items() %} vni {{ vni }} {% if vni_config.advertise_default_gw is vyos_defined %} advertise-default-gw {% endif %} {% if vni_config.advertise_svi_ip is vyos_defined %} advertise-svi-ip {% endif %} {% if vni_config.rd is vyos_defined %} rd {{ vni_config.rd }} {% endif %} {% if vni_config.route_target.both is vyos_defined %} {% for route_target in vni_config.route_target.both %} route-target both {{ route_target }} {% endfor %} {% endif %} {% if vni_config.route_target.export is vyos_defined %} {% for route_target in vni_config.route_target.export %} route-target export {{ route_target }} {% endfor %} {% endif %} {% if vni_config.route_target.import is vyos_defined %} {% for route_target in vni_config.route_target.import %} route-target import {{ route_target }} {% endfor %} {% endif %} exit-vni {% endfor %} {% endif %} exit-address-family ! {% endfor %} {% endif %} ! {% if bmp is vyos_defined %} {% if bmp.mirror_buffer_limit is vyos_defined %} bmp mirror buffer-limit {{ bmp.mirror_buffer_limit }} ! {% endif %} {% if bmp.target is vyos_defined %} {% for bmp, bmp_config in bmp.target.items() %} bmp targets {{ bmp }} {% if bmp_config.mirror is vyos_defined %} bmp mirror {% endif %} {% if bmp_config.monitor is vyos_defined %} {% if bmp_config.monitor.ipv4_unicast.pre_policy is vyos_defined %} bmp monitor ipv4 unicast pre-policy {% endif %} {% if bmp_config.monitor.ipv4_unicast.post_policy is vyos_defined %} bmp monitor ipv4 unicast post-policy {% endif %} {% if bmp_config.monitor.ipv6_unicast.pre_policy is vyos_defined %} bmp monitor ipv6 unicast pre-policy {% endif %} {% if bmp_config.monitor.ipv6_unicast.post_policy is vyos_defined %} bmp monitor ipv6 unicast post-policy {% endif %} {% endif %} {% if bmp_config.address is vyos_defined %} bmp connect {{ bmp_config.address }} port {{ bmp_config.port }} min-retry {{ bmp_config.min_retry }} max-retry {{ bmp_config.max_retry }} {% endif %} {% endfor %} exit {% endif %} {% endif %} {% if peer_group is vyos_defined %} {% for peer, config in peer_group.items() %} {{ bgp_neighbor(peer, config, true) }} {# j2lint: disable=jinja-statements-delimeter #} {%- endfor %} {% endif %} ! {% if neighbor is vyos_defined %} {% for peer, config in neighbor.items() %} {{ bgp_neighbor(peer, config) }} {# j2lint: disable=jinja-statements-delimeter #} {%- endfor %} {% endif %} ! {% if listen.limit is vyos_defined %} bgp listen limit {{ listen.limit }} {% endif %} {% if listen.range is vyos_defined %} {% for prefix, options in listen.range.items() %} {% if options.peer_group is vyos_defined %} bgp listen range {{ prefix }} peer-group {{ options.peer_group }} {% endif %} {% endfor %} {% endif %} {% if parameters.allow_martian_nexthop is vyos_defined %} bgp allow-martian-nexthop {% endif %} {% if parameters.disable_ebgp_connected_route_check is vyos_defined %} bgp disable-ebgp-connected-route-check {% endif %} {% if parameters.always_compare_med is vyos_defined %} bgp always-compare-med {% endif %} {% if parameters.bestpath.as_path is vyos_defined %} {% for option in parameters.bestpath.as_path %} {# replace is required for multipath-relax option #} bgp bestpath as-path {{ option | replace('_', '-') }} {% endfor %} {% endif %} {% if parameters.bestpath.bandwidth is vyos_defined %} bgp bestpath bandwidth {{ parameters.bestpath.bandwidth }} {% endif %} {% if parameters.bestpath.compare_routerid is vyos_defined %} bgp bestpath compare-routerid {% endif %} {% if parameters.bestpath.med is vyos_defined %} bgp bestpath med {{ parameters.bestpath.med | join(' ') | replace('_', '-') }} {% endif %} {% if parameters.bestpath.peer_type is vyos_defined %} bgp bestpath peer-type {{ 'multipath-relax' if parameters.bestpath.peer_type.multipath_relax is vyos_defined }} {% endif %} {% if parameters.cluster_id is vyos_defined %} bgp cluster-id {{ parameters.cluster_id }} {% endif %} {% if parameters.conditional_advertisement.timer is vyos_defined %} bgp conditional-advertisement timer {{ parameters.conditional_advertisement.timer }} {% endif %} {% if parameters.confederation.identifier is vyos_defined %} bgp confederation identifier {{ parameters.confederation.identifier }} {% endif %} {% if parameters.confederation.peers is vyos_defined %} bgp confederation peers {{ parameters.confederation.peers | join(' ') }} {% endif %} {% if parameters.dampening.half_life is vyos_defined %} {# Doesn't work in current FRR configuration; vtysh (bgp dampening 16 751 2001 61) #} bgp dampening {{ parameters.dampening.half_life }} {{ parameters.dampening.re_use if parameters.dampening.re_use is vyos_defined }} {{ parameters.dampening.start_suppress_time if parameters.dampening.start_suppress_time is vyos_defined }} {{ parameters.dampening.max_suppress_time if parameters.dampening.max_suppress_time is vyos_defined }} {% endif %} {% if parameters.default.local_pref is vyos_defined %} bgp default local-preference {{ parameters.default.local_pref }} {% endif %} {% if parameters.deterministic_med is vyos_defined %} bgp deterministic-med {% endif %} {% if parameters.distance.global.external is vyos_defined and parameters.distance.global.internal is vyos_defined and parameters.distance.global.local is vyos_defined %} distance bgp {{ parameters.distance.global.external }} {{ parameters.distance.global.internal }} {{ parameters.distance.global.local }} {% endif %} {% if parameters.distance.prefix is vyos_defined %} {% for prefix in parameters.distance.prefix %} distance {{ parameters.distance.prefix[prefix].distance }} {{ prefix }} {% endfor %} {% endif %} {% if parameters.fast_convergence is vyos_defined %} bgp fast-convergence {% endif %} {% if parameters.graceful_restart is vyos_defined %} bgp graceful-restart {{ 'stalepath-time ' ~ parameters.graceful_restart.stalepath_time if parameters.graceful_restart.stalepath_time is vyos_defined }} {% endif %} {% if parameters.graceful_shutdown is vyos_defined %} bgp graceful-shutdown {% endif %} {% if parameters.no_hard_administrative_reset is vyos_defined %} no bgp hard-administrative-reset {% endif %} {% if parameters.labeled_unicast is vyos_defined %} bgp labeled-unicast {{ parameters.labeled_unicast }} {% endif %} {% if parameters.log_neighbor_changes is vyos_defined %} bgp log-neighbor-changes {% endif %} {% if parameters.minimum_holdtime is vyos_defined %} bgp minimum-holdtime {{ parameters.minimum_holdtime }} {% endif %} {% if parameters.network_import_check is vyos_defined %} bgp network import-check {% endif %} {% if parameters.route_reflector_allow_outbound_policy is vyos_defined %} bgp route-reflector allow-outbound-policy {% endif %} {% if parameters.no_client_to_client_reflection is vyos_defined %} no bgp client-to-client reflection {% endif %} {% if parameters.no_fast_external_failover is vyos_defined %} no bgp fast-external-failover {% endif %} {% if parameters.no_suppress_duplicates is vyos_defined %} no bgp suppress-duplicates {% endif %} {% if parameters.reject_as_sets is vyos_defined %} bgp reject-as-sets {% endif %} {% if parameters.router_id is vyos_defined and parameters.router_id is not none %} bgp router-id {{ parameters.router_id }} {% endif %} {% if parameters.shutdown is vyos_defined %} bgp shutdown {% endif %} {% if parameters.suppress_fib_pending is vyos_defined %} bgp suppress-fib-pending {% endif %} {% if parameters.tcp_keepalive.idle is vyos_defined and parameters.tcp_keepalive.interval is vyos_defined and parameters.tcp_keepalive.probes is vyos_defined %} bgp tcp-keepalive {{ parameters.tcp_keepalive.idle }} {{ parameters.tcp_keepalive.interval }} {{ parameters.tcp_keepalive.probes }} {% endif %} {% if srv6.locator is vyos_defined %} segment-routing srv6 locator {{ srv6.locator }} exit {% endif %} {% if sid.vpn.per_vrf.export is vyos_defined %} sid vpn per-vrf export {{ sid.vpn.per_vrf.export }} {% endif %} {% if timers.keepalive is vyos_defined and timers.holdtime is vyos_defined %} timers bgp {{ timers.keepalive }} {{ timers.holdtime }} {% endif %} exit ! {% if interface is vyos_defined %} {% for iface, iface_config in interface.items() %} interface {{ iface }} {% if iface_config.mpls.forwarding is vyos_defined %} mpls bgp forwarding {% endif %} exit ! {% endfor %} {% endif %} diff --git a/data/templates/frr/evpn.mh.frr.j2 b/data/templates/frr/evpn.mh.frr.j2 index 03aaac44b..2fd7b7c09 100644 --- a/data/templates/frr/evpn.mh.frr.j2 +++ b/data/templates/frr/evpn.mh.frr.j2 @@ -1,16 +1,20 @@ ! -interface {{ ifname }} -{% if evpn.es_df_pref is vyos_defined %} - evpn mh es-df-pref {{ evpn.es_df_pref }} -{% endif %} -{% if evpn.es_id is vyos_defined %} - evpn mh es-id {{ evpn.es_id }} -{% endif %} -{% if evpn.es_sys_mac is vyos_defined %} - evpn mh es-sys-mac {{ evpn.es_sys_mac }} -{% endif %} -{% if evpn.uplink is vyos_defined %} +{% if interfaces is vyos_defined %} +{% for if_name, if_config in interfaces.items() %} +interface {{ if_name }} +{% if if_config.evpn.es_df_pref is vyos_defined %} + evpn mh es-df-pref {{ if_config.evpn.es_df_pref }} +{% endif %} +{% if if_config.evpn.es_id is vyos_defined %} + evpn mh es-id {{ if_config.evpn.es_id }} +{% endif %} +{% if if_config.evpn.es_sys_mac is vyos_defined %} + evpn mh es-sys-mac {{ if_config.evpn.es_sys_mac }} +{% endif %} +{% if if_config.evpn.uplink is vyos_defined %} evpn mh uplink -{% endif %} +{% endif %} exit ! +{% endfor %} +{% endif %} diff --git a/data/templates/frr/fabricd.frr.j2 b/data/templates/frr/fabricd.frr.j2 index 8f2ae6466..3a0646eb8 100644 --- a/data/templates/frr/fabricd.frr.j2 +++ b/data/templates/frr/fabricd.frr.j2 @@ -1,72 +1,73 @@ ! {% for name, router_config in domain.items() %} {% if router_config.interface is vyos_defined %} {% for iface, iface_config in router_config.interface.items() %} interface {{ iface }} {% if iface_config.address_family.ipv4 is vyos_defined %} ip router openfabric {{ name }} {% endif %} {% if iface_config.address_family.ipv6 is vyos_defined %} ipv6 router openfabric {{ name }} {% endif %} {% if iface_config.csnp_interval is vyos_defined %} openfabric csnp-interval {{ iface_config.csnp_interval }} {% endif %} {% if iface_config.hello_interval is vyos_defined %} openfabric hello-interval {{ iface_config.hello_interval }} {% endif %} {% if iface_config.hello_multiplier is vyos_defined %} openfabric hello-multiplier {{ iface_config.hello_multiplier }} {% endif %} {% if iface_config.metric is vyos_defined %} openfabric metric {{ iface_config.metric }} {% endif %} {% if iface_config.passive is vyos_defined or iface == 'lo' %} openfabric passive {% endif %} {% if iface_config.password.md5 is vyos_defined %} openfabric password md5 {{ iface_config.password.md5 }} {% elif iface_config.password.plaintext_password is vyos_defined %} openfabric password clear {{ iface_config.password.plaintext_password }} {% endif %} {% if iface_config.psnp_interval is vyos_defined %} openfabric psnp-interval {{ iface_config.psnp_interval }} {% endif %} exit ! {% endfor %} {% endif %} router openfabric {{ name }} net {{ net }} {% if router_config.domain_password.md5 is vyos_defined %} domain-password md5 {{ router_config.domain_password.plaintext_password }} {% elif router_config.domain_password.plaintext_password is vyos_defined %} domain-password clear {{ router_config.domain_password.plaintext_password }} {% endif %} {% if router_config.log_adjacency_changes is vyos_defined %} log-adjacency-changes {% endif %} {% if router_config.set_overload_bit is vyos_defined %} set-overload-bit {% endif %} {% if router_config.purge_originator is vyos_defined %} purge-originator {% endif %} {% if router_config.fabric_tier is vyos_defined %} fabric-tier {{ router_config.fabric_tier }} {% endif %} {% if router_config.lsp_gen_interval is vyos_defined %} lsp-gen-interval {{ router_config.lsp_gen_interval }} {% endif %} {% if router_config.lsp_refresh_interval is vyos_defined %} lsp-refresh-interval {{ router_config.lsp_refresh_interval }} {% endif %} {% if router_config.max_lsp_lifetime is vyos_defined %} max-lsp-lifetime {{ router_config.max_lsp_lifetime }} {% endif %} {% if router_config.spf_interval is vyos_defined %} spf-interval {{ router_config.spf_interval }} {% endif %} exit ! {% endfor %} +! diff --git a/data/templates/frr/static_mcast.frr.j2 b/data/templates/frr/static_mcast.frr.j2 deleted file mode 100644 index 54b2790b0..000000000 --- a/data/templates/frr/static_mcast.frr.j2 +++ /dev/null @@ -1,11 +0,0 @@ -! -{% for route_gr in mroute %} -{% for nh in mroute[route_gr] %} -{% if mroute[route_gr][nh] %} -ip mroute {{ route_gr }} {{ nh }} {{ mroute[route_gr][nh] }} -{% else %} -ip mroute {{ route_gr }} {{ nh }} -{% endif %} -{% endfor %} -{% endfor %} -! diff --git a/data/templates/frr/static_routes_macro.j2 b/data/templates/frr/static_routes_macro.j2 deleted file mode 100644 index db31700c5..000000000 --- a/data/templates/frr/static_routes_macro.j2 +++ /dev/null @@ -1,31 +0,0 @@ -{% macro static_routes(ip_ipv6, prefix, prefix_config, table=None) %} -{% if prefix_config.blackhole is vyos_defined %} -{{ ip_ipv6 }} route {{ prefix }} blackhole {{ prefix_config.blackhole.distance if prefix_config.blackhole.distance is vyos_defined }} {{ 'tag ' ~ prefix_config.blackhole.tag if prefix_config.blackhole.tag is vyos_defined }} {{ 'table ' ~ table if table is vyos_defined and table is not none }} -{% endif %} -{% if prefix_config.reject is vyos_defined %} -{{ ip_ipv6 }} route {{ prefix }} reject {{ prefix_config.reject.distance if prefix_config.reject.distance is vyos_defined }} {{ 'tag ' ~ prefix_config.reject.tag if prefix_config.reject.tag is vyos_defined }} {{ 'table ' ~ table if table is vyos_defined }} -{% endif %} -{% if prefix_config.dhcp_interface is vyos_defined %} -{% for dhcp_interface in prefix_config.dhcp_interface %} -{% set next_hop = dhcp_interface | get_dhcp_router %} -{% if next_hop is vyos_defined %} -{{ ip_ipv6 }} route {{ prefix }} {{ next_hop }} {{ dhcp_interface }} {{ 'table ' ~ table if table is vyos_defined }} -{% endif %} -{% endfor %} -{% endif %} -{% if prefix_config.interface is vyos_defined %} -{% for interface, interface_config in prefix_config.interface.items() if interface_config.disable is not defined %} -{{ ip_ipv6 }} route {{ prefix }} {{ interface }} {{ interface_config.distance if interface_config.distance is vyos_defined }} {{ 'nexthop-vrf ' ~ interface_config.vrf if interface_config.vrf is vyos_defined }} {{ 'segments ' ~ interface_config.segments if interface_config.segments is vyos_defined }} {{ 'table ' ~ table if table is vyos_defined }} -{% endfor %} -{% endif %} -{% if prefix_config.next_hop is vyos_defined and prefix_config.next_hop is not none %} -{% for next_hop, next_hop_config in prefix_config.next_hop.items() if next_hop_config.disable is not defined %} -{{ ip_ipv6 }} route {{ prefix }} {{ next_hop }} {{ next_hop_config.interface if next_hop_config.interface is vyos_defined }} {{ next_hop_config.distance if next_hop_config.distance is vyos_defined }} {{ 'nexthop-vrf ' ~ next_hop_config.vrf if next_hop_config.vrf is vyos_defined }} {{ 'bfd profile ' ~ next_hop_config.bfd.profile if next_hop_config.bfd.profile is vyos_defined }} {{ 'segments ' ~ next_hop_config.segments if next_hop_config.segments is vyos_defined }} {{ 'table ' ~ table if table is vyos_defined }} -{% if next_hop_config.bfd.multi_hop.source is vyos_defined %} -{% for source, source_config in next_hop_config.bfd.multi_hop.source.items() %} -{{ ip_ipv6 }} route {{ prefix }} {{ next_hop }} bfd multi-hop source {{ source }} profile {{ source_config.profile }} -{% endfor %} -{% endif %} -{% endfor %} -{% endif %} -{% endmacro %} diff --git a/data/templates/frr/staticd.frr.j2 b/data/templates/frr/staticd.frr.j2 index 68e3f21f9..c662b7650 100644 --- a/data/templates/frr/staticd.frr.j2 +++ b/data/templates/frr/staticd.frr.j2 @@ -1,66 +1,145 @@ -{% from 'frr/static_routes_macro.j2' import static_routes %} +{# Common macro for recurroiing options for a static route #} +{% macro route_options(route, interface_or_next_hop, config, table) %} +{# j2lint: disable=jinja-statements-delimeter #} +{% set ip_route = route ~ ' ' ~ interface_or_next_hop %} +{% if config.interface is vyos_defined %} +{% set ip_route = ip_route ~ ' ' ~ config.interface %} +{% endif %} +{% if config.tag is vyos_defined %} +{% set ip_route = ip_route ~ ' tag ' ~ config.tag %} +{% endif %} +{% if config.distance is vyos_defined %} +{% set ip_route = ip_route ~ ' ' ~ config.distance %} +{% endif %} +{% if config.bfd is vyos_defined %} +{% set ip_route = ip_route ~ ' bfd' %} +{% if config.bfd.multi_hop is vyos_defined %} +{% set ip_route = ip_route ~ ' multi-hop' %} +{% if config.bfd.source_address is vyos_defined %} +{% set ip_route = ip_route ~ ' source ' ~ config.bfd.source_address %} +{% endif %} +{% endif %} +{% if config.bfd.profile is vyos_defined %} +{% set ip_route = ip_route ~ ' profile ' ~ config.bfd.profile %} +{% endif %} +{% endif %} +{% if config.vrf is vyos_defined %} +{% set ip_route = ip_route ~ ' nexthop-vrf ' ~ config.vrf %} +{% endif %} +{% if config.segments is vyos_defined %} +{# Segments used in/for SRv6 #} +{% set ip_route = ip_route ~ ' segments ' ~ config.segments %} +{% endif %} +{# Routing table to configure #} +{% if table is vyos_defined %} +{% set ip_route = ip_route ~ ' table ' ~ table %} +{% endif %} +{{ ip_route }} +{%- endmacro -%} +{# Build static IPv4/IPv6 route #} +{% macro static_routes(ip_ipv6, prefix, prefix_config, table=None) %} +{% set route = ip_ipv6 ~ 'route ' ~ prefix %} +{% if prefix_config.interface is vyos_defined %} +{% for interface, interface_config in prefix_config.interface.items() if interface_config.disable is not defined %} +{{ route_options(route, interface, interface_config, table) }} +{% endfor %} +{% endif %} +{% if prefix_config.next_hop is vyos_defined and prefix_config.next_hop is not none %} +{% for next_hop, next_hop_config in prefix_config.next_hop.items() if next_hop_config.disable is not defined %} +{{ route_options(route, next_hop, next_hop_config, table) }} +{% endfor %} +{% endif %} +{% if prefix_config.dhcp_interface is vyos_defined %} +{% for dhcp_interface in prefix_config.dhcp_interface %} +{% set next_hop = dhcp_interface | get_dhcp_router %} +{% if next_hop is vyos_defined %} +{{ ip_ipv6 }} route {{ prefix }} {{ next_hop }} {{ dhcp_interface }} {{ 'table ' ~ table if table is vyos_defined }} +{% endif %} +{% endfor %} +{% endif %} +{% if prefix_config.blackhole is vyos_defined %} +{{ route_options(route, 'blackhole', prefix_config.blackhole, table) }} +{% elif prefix_config.reject is vyos_defined %} +{{ route_options(route, 'reject', prefix_config.reject, table) }} +{% endif %} +{# j2lint: disable=jinja-statements-delimeter #} +{%- endmacro -%} ! -{% set ip_prefix = 'ip' %} -{% set ipv6_prefix = 'ipv6' %} +{% set ip_prefix = 'ip ' %} +{% set ipv6_prefix = 'ipv6 ' %} {% if vrf is vyos_defined %} {# We need to add an additional whitespace in front of the prefix #} {# when VRFs are in use, thus we use a variable for prefix handling #} -{% set ip_prefix = ' ip' %} -{% set ipv6_prefix = ' ipv6' %} +{% set ip_prefix = ' ip ' %} +{% set ipv6_prefix = ' ipv6 ' %} vrf {{ vrf }} {% endif %} {# IPv4 routing #} {% if route is vyos_defined %} {% for prefix, prefix_config in route.items() %} {{ static_routes(ip_prefix, prefix, prefix_config) }} {# j2lint: disable=jinja-statements-delimeter #} {%- endfor %} {% endif %} {# IPv4 default routes from DHCP interfaces #} {% if dhcp is vyos_defined %} {% for interface, interface_config in dhcp.items() if interface_config.dhcp_options.no_default_route is not vyos_defined %} {% set next_hop = interface | get_dhcp_router %} {% if next_hop is vyos_defined %} {{ ip_prefix }} route 0.0.0.0/0 {{ next_hop }} {{ interface }} tag 210 {{ interface_config.dhcp_options.default_route_distance if interface_config.dhcp_options.default_route_distance is vyos_defined }} {% endif %} {% endfor %} {% endif %} {# IPv4 default routes from PPPoE interfaces #} {% if pppoe is vyos_defined %} {% for interface, interface_config in pppoe.items() if interface_config.no_default_route is not vyos_defined %} {{ ip_prefix }} route 0.0.0.0/0 {{ interface }} tag 210 {{ interface_config.default_route_distance if interface_config.default_route_distance is vyos_defined }} {%- endfor %} {% endif %} {# IPv6 routing #} {% if route6 is vyos_defined %} {% for prefix, prefix_config in route6.items() %} {{ static_routes(ipv6_prefix, prefix, prefix_config) }} {# j2lint: disable=jinja-statements-delimeter #} {%- endfor %} {% endif %} {% if vrf is vyos_defined %} exit-vrf {% endif %} ! {# Policy route tables #} {% if table is vyos_defined %} {% for table_id, table_config in table.items() %} {% if table_config.route is vyos_defined %} {% for prefix, prefix_config in table_config.route.items() %} -{{ static_routes('ip', prefix, prefix_config, table_id) }} -{% endfor %} +{{ static_routes('ip ', prefix, prefix_config, table_id) }} +{# j2lint: disable=jinja-statements-delimeter #} +{%- endfor %} {% endif %} ! {% if table_config.route6 is vyos_defined %} {% for prefix, prefix_config in table_config.route6.items() %} -{{ static_routes('ipv6', prefix, prefix_config, table_id) }} -{% endfor %} +{{ static_routes('ipv6 ', prefix, prefix_config, table_id) }} +{# j2lint: disable=jinja-statements-delimeter #} +{%- endfor %} {% endif %} ! {% endfor %} {% endif %} ! +{# Multicast route #} +{% if multicast is vyos_defined %} +{% set ip_prefix = 'ip m' %} +{# IPv4 multicast routing #} +{% if multicast.route is vyos_defined %} +{% for prefix, prefix_config in multicast.route.items() %} +{{ static_routes(ip_prefix, prefix, prefix_config) }} +{# j2lint: disable=jinja-statements-delimeter #} +{%- endfor %} +{% endif %} +{% endif %} +! {% if route_map is vyos_defined %} ip protocol static route-map {{ route_map }} ! {% endif %} diff --git a/data/templates/frr/zebra.vrf.route-map.frr.j2 b/data/templates/frr/zebra.vrf.route-map.frr.j2 index 8ebb82511..656b31deb 100644 --- a/data/templates/frr/zebra.vrf.route-map.frr.j2 +++ b/data/templates/frr/zebra.vrf.route-map.frr.j2 @@ -1,30 +1,30 @@ ! {% if name is vyos_defined %} {% for vrf, vrf_config in name.items() %} vrf {{ vrf }} {% if vrf_config.ip.nht.no_resolve_via_default is vyos_defined %} no ip nht resolve-via-default {% endif %} {% if vrf_config.ipv6.nht.no_resolve_via_default is vyos_defined %} no ipv6 nht resolve-via-default {% endif %} {% if vrf_config.ip.protocol is vyos_defined %} {% for protocol_name, protocol_config in vrf_config.ip.protocol.items() %} ip protocol {{ protocol_name }} route-map {{ protocol_config.route_map }} {% endfor %} {% endif %} {% if vrf_config.ipv6.protocol is vyos_defined %} {% for protocol_name, protocol_config in vrf_config.ipv6.protocol.items() %} {% if protocol_name is vyos_defined('ospfv3') %} {% set protocol_name = 'ospf6' %} {% endif %} ipv6 protocol {{ protocol_name }} route-map {{ protocol_config.route_map }} {% endfor %} {% endif %} {% if vrf_config.vni is vyos_defined %} vni {{ vrf_config.vni }} {% endif %} exit-vrf -{% endfor %} ! +{% endfor %} {% endif %} diff --git a/interface-definitions/include/source-address-ipv4.xml.i b/interface-definitions/include/source-address-ipv4.xml.i index 052678113..aa0b083c7 100644 --- a/interface-definitions/include/source-address-ipv4.xml.i +++ b/interface-definitions/include/source-address-ipv4.xml.i @@ -1,17 +1,17 @@ <!-- include start from source-address-ipv4.xml.i --> <leafNode name="source-address"> <properties> - <help>IPv4 source address used to initiate connection</help> + <help>IPv4 address used to initiate connection</help> <completionHelp> <script>${vyos_completion_dir}/list_local_ips.sh --ipv4</script> </completionHelp> <valueHelp> <format>ipv4</format> <description>IPv4 source address</description> </valueHelp> <constraint> <validator name="ipv4-address"/> </constraint> </properties> </leafNode> <!-- include end --> diff --git a/interface-definitions/include/source-address-ipv6.xml.i b/interface-definitions/include/source-address-ipv6.xml.i new file mode 100644 index 000000000..a27955b0c --- /dev/null +++ b/interface-definitions/include/source-address-ipv6.xml.i @@ -0,0 +1,17 @@ +<!-- include start from source-address-ipv6.xml.i --> +<leafNode name="source-address"> + <properties> + <help>IPv6 address used to initiate connection</help> + <completionHelp> + <script>${vyos_completion_dir}/list_local_ips.sh --ipv6</script> + </completionHelp> + <valueHelp> + <format>ipv6</format> + <description>IPv6 source address</description> + </valueHelp> + <constraint> + <validator name="ipv6-address"/> + </constraint> + </properties> +</leafNode> +<!-- include end --> diff --git a/interface-definitions/include/static/bfd-multi-hop.xml.i b/interface-definitions/include/static/bfd-multi-hop.xml.i new file mode 100644 index 000000000..e53994191 --- /dev/null +++ b/interface-definitions/include/static/bfd-multi-hop.xml.i @@ -0,0 +1,8 @@ +<!-- include start from static/bfd-multi-hop.xml.i --> +<leafNode name="multi-hop"> + <properties> + <help>Enable BFD multi-hop session (requires source-address)</help> + <valueless/> + </properties> +</leafNode> +<!-- include end --> diff --git a/interface-definitions/include/static/static-route-bfd.xml.i b/interface-definitions/include/static/static-route-bfd.xml.i deleted file mode 100644 index d588b369f..000000000 --- a/interface-definitions/include/static/static-route-bfd.xml.i +++ /dev/null @@ -1,36 +0,0 @@ -<!-- include start from static/static-route-bfd.xml.i --> -<node name="bfd"> - <properties> - <help>BFD monitoring</help> - </properties> - <children> - #include <include/bfd/profile.xml.i> - <node name="multi-hop"> - <properties> - <help>Use BFD multi hop session</help> - </properties> - <children> - <tagNode name="source"> - <properties> - <help>Use source for BFD session</help> - <valueHelp> - <format>ipv4</format> - <description>IPv4 source address</description> - </valueHelp> - <valueHelp> - <format>ipv6</format> - <description>IPv6 source address</description> - </valueHelp> - <constraint> - <validator name="ip-address"/> - </constraint> - </properties> - <children> - #include <include/bfd/profile.xml.i> - </children> - </tagNode> - </children> - </node> - </children> -</node> -<!-- include end --> diff --git a/interface-definitions/include/static/static-route.xml.i b/interface-definitions/include/static/static-route.xml.i index 51e45c6f7..fa1131118 100644 --- a/interface-definitions/include/static/static-route.xml.i +++ b/interface-definitions/include/static/static-route.xml.i @@ -1,60 +1,68 @@ <!-- include start from static/static-route.xml.i --> <tagNode name="route"> <properties> <help>Static IPv4 route</help> <valueHelp> <format>ipv4net</format> <description>IPv4 static route</description> </valueHelp> <constraint> <validator name="ipv4-prefix"/> </constraint> </properties> <children> #include <include/static/static-route-blackhole.xml.i> #include <include/static/static-route-reject.xml.i> #include <include/dhcp-interface-multi.xml.i> #include <include/generic-description.xml.i> <tagNode name="interface"> <properties> <help>Next-hop IPv4 router interface</help> <completionHelp> <script>${vyos_completion_dir}/list_interfaces</script> </completionHelp> <valueHelp> <format>txt</format> <description>Gateway interface name</description> </valueHelp> <constraint> #include <include/constraint/interface-name.xml.i> </constraint> </properties> <children> #include <include/generic-disable-node.xml.i> #include <include/static/static-route-distance.xml.i> #include <include/static/static-route-vrf.xml.i> </children> </tagNode> <tagNode name="next-hop"> <properties> <help>Next-hop IPv4 router address</help> <valueHelp> <format>ipv4</format> <description>Next-hop router address</description> </valueHelp> <constraint> <validator name="ipv4-address"/> </constraint> </properties> <children> #include <include/generic-disable-node.xml.i> #include <include/static/static-route-distance.xml.i> #include <include/static/static-route-interface.xml.i> #include <include/static/static-route-vrf.xml.i> - #include <include/static/static-route-bfd.xml.i> + <node name="bfd"> + <properties> + <help>BFD monitoring</help> + </properties> + <children> + #include <include/bfd/profile.xml.i> + #include <include/static/bfd-multi-hop.xml.i> + #include <include/source-address-ipv4.xml.i> + </children> + </node> </children> </tagNode> </children> </tagNode> <!-- include end --> - diff --git a/interface-definitions/include/static/static-route6.xml.i b/interface-definitions/include/static/static-route6.xml.i index 4468c8025..e75385dc7 100644 --- a/interface-definitions/include/static/static-route6.xml.i +++ b/interface-definitions/include/static/static-route6.xml.i @@ -1,60 +1,69 @@ <!-- include start from static/static-route6.xml.i --> <tagNode name="route6"> <properties> <help>Static IPv6 route</help> <valueHelp> <format>ipv6net</format> <description>IPv6 static route</description> </valueHelp> <constraint> <validator name="ipv6-prefix"/> </constraint> </properties> <children> #include <include/static/static-route-blackhole.xml.i> #include <include/static/static-route-reject.xml.i> #include <include/generic-description.xml.i> <tagNode name="interface"> <properties> <help>IPv6 gateway interface name</help> <completionHelp> <script>${vyos_completion_dir}/list_interfaces</script> </completionHelp> <valueHelp> <format>txt</format> <description>Gateway interface name</description> </valueHelp> <constraint> #include <include/constraint/interface-name.xml.i> </constraint> </properties> <children> #include <include/generic-disable-node.xml.i> #include <include/static/static-route-distance.xml.i> #include <include/static/static-route-segments.xml.i> #include <include/static/static-route-vrf.xml.i> </children> </tagNode> <tagNode name="next-hop"> <properties> <help>IPv6 gateway address</help> <valueHelp> <format>ipv6</format> <description>Next-hop IPv6 router</description> </valueHelp> <constraint> <validator name="ipv6-address"/> </constraint> </properties> <children> #include <include/generic-disable-node.xml.i> - #include <include/static/static-route-bfd.xml.i> #include <include/static/static-route-distance.xml.i> #include <include/static/static-route-interface.xml.i> #include <include/static/static-route-segments.xml.i> #include <include/static/static-route-vrf.xml.i> + <node name="bfd"> + <properties> + <help>BFD monitoring</help> + </properties> + <children> + #include <include/bfd/profile.xml.i> + #include <include/static/bfd-multi-hop.xml.i> + #include <include/source-address-ipv6.xml.i> + </children> + </node> </children> </tagNode> </children> </tagNode> <!-- include end --> diff --git a/interface-definitions/protocols_static.xml.in b/interface-definitions/protocols_static.xml.in index ca4ca2d74..407e56553 100644 --- a/interface-definitions/protocols_static.xml.in +++ b/interface-definitions/protocols_static.xml.in @@ -1,44 +1,100 @@ <?xml version="1.0"?> <interfaceDefinition> <node name="protocols"> <properties> <help>Routing protocols</help> </properties> <children> <node name="static" owner="${vyos_conf_scripts_dir}/protocols_static.py"> <properties> <help>Static Routing</help> <priority>480</priority> </properties> <children> + <node name="multicast"> + <properties> + <help>Multicast static route</help> + </properties> + <children> + <tagNode name="route"> + <properties> + <help>Configure static unicast route into MRIB for multicast RPF lookup</help> + <valueHelp> + <format>ipv4net</format> + <description>Network</description> + </valueHelp> + <constraint> + <validator name="ip-prefix"/> + </constraint> + </properties> + <children> + <tagNode name="next-hop"> + <properties> + <help>Next-hop IPv4 router address</help> + <valueHelp> + <format>ipv4</format> + <description>Next-hop router address</description> + </valueHelp> + <constraint> + <validator name="ipv4-address"/> + </constraint> + </properties> + <children> + #include <include/generic-disable-node.xml.i> + #include <include/static/static-route-distance.xml.i> + </children> + </tagNode> + <tagNode name="interface"> + <properties> + <help>Next-hop IPv4 router interface</help> + <completionHelp> + <script>${vyos_completion_dir}/list_interfaces</script> + </completionHelp> + <valueHelp> + <format>txt</format> + <description>Gateway interface name</description> + </valueHelp> + <constraint> + #include <include/constraint/interface-name.xml.i> + </constraint> + </properties> + <children> + #include <include/generic-disable-node.xml.i> + #include <include/static/static-route-distance.xml.i> + </children> + </tagNode> + </children> + </tagNode> + </children> + </node> #include <include/route-map.xml.i> #include <include/static/static-route.xml.i> #include <include/static/static-route6.xml.i> <tagNode name="table"> <properties> <help>Policy route table number</help> <valueHelp> <format>u32:1-200</format> <description>Policy route table number</description> </valueHelp> <constraint> <validator name="numeric" argument="--range 1-200"/> </constraint> </properties> <children> <!-- iproute2 only considers the first "word" until whitespace in the name field but does not complain about special characters. We put an artificial limit here to make table descriptions potentially valid node names to avoid quoting and simplify future syntax changes if we decide to make any. --> #include <include/generic-description.xml.i> #include <include/static/static-route.xml.i> #include <include/static/static-route6.xml.i> </children> </tagNode> </children> </node> </children> </node> </interfaceDefinition> diff --git a/interface-definitions/protocols_static_multicast.xml.in b/interface-definitions/protocols_static_multicast.xml.in deleted file mode 100644 index caf95ed7c..000000000 --- a/interface-definitions/protocols_static_multicast.xml.in +++ /dev/null @@ -1,95 +0,0 @@ -<?xml version="1.0"?> -<interfaceDefinition> - <node name="protocols"> - <children> - <node name="static"> - <children> - <node name="multicast" owner="${vyos_conf_scripts_dir}/protocols_static_multicast.py"> - <properties> - <help>Multicast static route</help> - <priority>481</priority> - </properties> - <children> - <tagNode name="route"> - <properties> - <help>Configure static unicast route into MRIB for multicast RPF lookup</help> - <valueHelp> - <format>ipv4net</format> - <description>Network</description> - </valueHelp> - <constraint> - <validator name="ip-prefix"/> - </constraint> - </properties> - <children> - <tagNode name="next-hop"> - <properties> - <help>Nexthop IPv4 address</help> - <valueHelp> - <format>ipv4</format> - <description>Nexthop IPv4 address</description> - </valueHelp> - <constraint> - <validator name="ipv4-address"/> - </constraint> - </properties> - <children> - <leafNode name="distance"> - <properties> - <help>Distance value for this route</help> - <valueHelp> - <format>u32:1-255</format> - <description>Distance for this route</description> - </valueHelp> - <constraint> - <validator name="numeric" argument="--range 1-255"/> - </constraint> - </properties> - </leafNode> - </children> - </tagNode> - </children> - </tagNode> - <tagNode name="interface-route"> - <properties> - <help>Multicast interface based route</help> - <valueHelp> - <format>ipv4net</format> - <description>Network</description> - </valueHelp> - <constraint> - <validator name="ip-prefix"/> - </constraint> - </properties> - <children> - <tagNode name="next-hop-interface"> - <properties> - <help>Next-hop interface</help> - <completionHelp> - <script>${vyos_completion_dir}/list_interfaces</script> - </completionHelp> - </properties> - <children> - <leafNode name="distance"> - <properties> - <help>Distance value for this route</help> - <valueHelp> - <format>u32:1-255</format> - <description>Distance for this route</description> - </valueHelp> - <constraint> - <validator name="numeric" argument="--range 1-255"/> - </constraint> - </properties> - </leafNode> - </children> - </tagNode> - </children> - </tagNode> - </children> - </node> - </children> - </node> - </children> - </node> -</interfaceDefinition> diff --git a/python/vyos/configdict.py b/python/vyos/configdict.py index 5a353b110..88d131573 100644 --- a/python/vyos/configdict.py +++ b/python/vyos/configdict.py @@ -1,666 +1,1131 @@ # Copyright 2019-2024 VyOS maintainers and contributors <maintainers@vyos.io> # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see <http://www.gnu.org/licenses/>. """ A library for retrieving value dicts from VyOS configs in a declarative fashion. """ import os import json from vyos.utils.dict import dict_search from vyos.utils.process import cmd def retrieve_config(path_hash, base_path, config): """ Retrieves a VyOS config as a dict according to a declarative description The description dict, passed in the first argument, must follow this format: ``field_name : <path, type, [inner_options_dict]>``. Supported types are: ``str`` (for normal nodes), ``list`` (returns a list of strings, for multi nodes), ``bool`` (returns True if valueless node exists), ``dict`` (for tag nodes, returns a dict indexed by node names, according to description in the third item of the tuple). Args: path_hash (dict): Declarative description of the config to retrieve base_path (list): A base path to prepend to all option paths config (vyos.config.Config): A VyOS config object Returns: dict: config dict """ config_hash = {} for k in path_hash: if type(path_hash[k]) != tuple: raise ValueError("In field {0}: expected a tuple, got a value {1}".format(k, str(path_hash[k]))) if len(path_hash[k]) < 2: raise ValueError("In field {0}: field description must be a tuple of at least two items, path (list) and type".format(k)) path = path_hash[k][0] if type(path) != list: raise ValueError("In field {0}: path must be a list, not a {1}".format(k, type(path))) typ = path_hash[k][1] if type(typ) != type: raise ValueError("In field {0}: type must be a type, not a {1}".format(k, type(typ))) path = base_path + path path_str = " ".join(path) if typ == str: config_hash[k] = config.return_value(path_str) elif typ == list: config_hash[k] = config.return_values(path_str) elif typ == bool: config_hash[k] = config.exists(path_str) elif typ == dict: try: inner_hash = path_hash[k][2] except IndexError: raise ValueError("The type of the \'{0}\' field is dict, but inner options hash is missing from the tuple".format(k)) config_hash[k] = {} nodes = config.list_nodes(path_str) for node in nodes: config_hash[k][node] = retrieve_config(inner_hash, path + [node], config) return config_hash def dict_merge(source, destination): """ Merge two dictionaries. Only keys which are not present in destination will be copied from source, anything else will be kept untouched. Function will return a new dict which has the merged key/value pairs. """ from copy import deepcopy tmp = deepcopy(destination) for key, value in source.items(): if key not in tmp: tmp[key] = value elif isinstance(source[key], dict): tmp[key] = dict_merge(source[key], tmp[key]) return tmp def list_diff(first, second): """ Diff two dictionaries and return only unique items """ second = set(second) return [item for item in first if item not in second] def is_node_changed(conf, path): """ Check if any key under path has been changed and return True. If nothing changed, return false """ from vyos.configdiff import get_config_diff D = get_config_diff(conf, key_mangling=('-', '_')) return D.is_node_changed(path) def leaf_node_changed(conf, path): """ Check if a leaf node was altered. If it has been altered - values has been changed, or it was added/removed, we will return a list containing the old value(s). If nothing has been changed, None is returned. NOTE: path must use the real CLI node name (e.g. with a hyphen!) """ from vyos.configdiff import get_config_diff D = get_config_diff(conf, key_mangling=('-', '_')) (new, old) = D.get_value_diff(path) if new != old: if isinstance(old, dict): # valueLess nodes return {} if node is deleted return True if old is None and isinstance(new, dict): # valueLess nodes return {} if node was added return True if old is None: return [] if isinstance(old, str): return [old] if isinstance(old, list): if isinstance(new, str): new = [new] elif isinstance(new, type(None)): new = [] return list_diff(old, new) return None def node_changed(conf, path, key_mangling=None, recursive=False, expand_nodes=None) -> list: """ Check if node under path (or anything under path if recursive=True) was changed. By default we only check if a node or subnode (recursive) was deleted from path. If expand_nodes is set to Diff.ADD we can also check if something was added to the path. If nothing changed, an empty list is returned. """ from vyos.configdiff import get_config_diff from vyos.configdiff import Diff # to prevent circular dependencies we assign the default here if not expand_nodes: expand_nodes = Diff.DELETE D = get_config_diff(conf, key_mangling) # get_child_nodes_diff() will return dict_keys() tmp = D.get_child_nodes_diff(path, expand_nodes=expand_nodes, recursive=recursive) output = [] if expand_nodes & Diff.DELETE: output.extend(list(tmp['delete'].keys())) if expand_nodes & Diff.ADD: output.extend(list(tmp['add'].keys())) # remove duplicate keys from list, this happens when a node (e.g. description) is altered output = list(dict.fromkeys(output)) return output def get_removed_vlans(conf, path, dict): """ Common function to parse a dictionary retrieved via get_config_dict() and determine any added/removed VLAN interfaces - be it 802.1q or Q-in-Q. """ from vyos.configdiff import get_config_diff, Diff # Check vif, vif-s/vif-c VLAN interfaces for removal D = get_config_diff(conf, key_mangling=('-', '_')) D.set_level(conf.get_level()) # get_child_nodes() will return dict_keys(), mangle this into a list with PEP448 keys = D.get_child_nodes_diff(path + ['vif'], expand_nodes=Diff.DELETE)['delete'].keys() if keys: dict['vif_remove'] = [*keys] # get_child_nodes() will return dict_keys(), mangle this into a list with PEP448 keys = D.get_child_nodes_diff(path + ['vif-s'], expand_nodes=Diff.DELETE)['delete'].keys() if keys: dict['vif_s_remove'] = [*keys] for vif in dict.get('vif_s', {}).keys(): keys = D.get_child_nodes_diff(path + ['vif-s', vif, 'vif-c'], expand_nodes=Diff.DELETE)['delete'].keys() if keys: dict['vif_s'][vif]['vif_c_remove'] = [*keys] return dict def is_member(conf, interface, intftype=None): """ Checks if passed interface is member of other interface of specified type. intftype is optional, if not passed it will search all known types (currently bridge and bonding) Returns: dict empty -> Interface is not a member key -> Interface is a member of this interface """ ret_val = {} intftypes = ['bonding', 'bridge'] if intftype not in intftypes + [None]: raise ValueError(( f'unknown interface type "{intftype}" or it cannot ' f'have member interfaces')) intftype = intftypes if intftype == None else [intftype] for iftype in intftype: base = ['interfaces', iftype] for intf in conf.list_nodes(base): member = base + [intf, 'member', 'interface', interface] if conf.exists(member): tmp = conf.get_config_dict(member, key_mangling=('-', '_'), get_first_key=True, no_tag_node_value_mangle=True) ret_val.update({intf : tmp}) return ret_val def is_mirror_intf(conf, interface, direction=None): """ Check whether the passed interface is used for port mirroring. Direction is optional, if not passed it will search all known direction (currently ingress and egress) Returns: None -> Interface is not a monitor interface Array() -> This interface is a monitor interface of interfaces """ from vyos.ifconfig import Section directions = ['ingress', 'egress'] if direction not in directions + [None]: raise ValueError(f'Unknown interface mirror direction "{direction}"') direction = directions if direction == None else [direction] ret_val = None base = ['interfaces'] for dir in direction: for iftype in conf.list_nodes(base): iftype_base = base + [iftype] for intf in conf.list_nodes(iftype_base): mirror = iftype_base + [intf, 'mirror', dir, interface] if conf.exists(mirror): path = ['interfaces', Section.section(intf), intf] tmp = conf.get_config_dict(path, key_mangling=('-', '_'), get_first_key=True) ret_val = {intf : tmp} return ret_val def has_address_configured(conf, intf): """ Checks if interface has an address configured. Checks the following config nodes: 'address', 'ipv6 address eui64', 'ipv6 address autoconf' Returns True if interface has address configured, False if it doesn't. """ from vyos.ifconfig import Section ret = False old_level = conf.get_level() conf.set_level([]) intfpath = ['interfaces', Section.get_config_path(intf)] if (conf.exists([intfpath, 'address']) or conf.exists([intfpath, 'ipv6', 'address', 'autoconf']) or conf.exists([intfpath, 'ipv6', 'address', 'eui64'])): ret = True conf.set_level(old_level) return ret def has_vrf_configured(conf, intf): """ Checks if interface has a VRF configured. Returns True if interface has VRF configured, False if it doesn't. """ from vyos.ifconfig import Section ret = False old_level = conf.get_level() conf.set_level([]) if conf.exists(['interfaces', Section.get_config_path(intf), 'vrf']): ret = True conf.set_level(old_level) return ret def has_vlan_subinterface_configured(conf, intf): """ Checks if interface has an VLAN subinterface configured. Checks the following config nodes: 'vif', 'vif-s' Return True if interface has VLAN subinterface configured. """ from vyos.ifconfig import Section ret = False intfpath = ['interfaces', Section.section(intf), intf] if (conf.exists(intfpath + ['vif']) or conf.exists(intfpath + ['vif-s'])): ret = True return ret def is_source_interface(conf, interface, intftype=None): """ Checks if passed interface is configured as source-interface of other interfaces of specified type. intftype is optional, if not passed it will search all known types (currently pppoe, macsec, pseudo-ethernet, tunnel and vxlan) Returns: None -> Interface is not a member interface name -> Interface is a member of this interface False -> interface type cannot have members """ ret_val = None intftypes = ['macsec', 'pppoe', 'pseudo-ethernet', 'tunnel', 'vxlan'] if not intftype: intftype = intftypes if isinstance(intftype, str): intftype = [intftype] elif not isinstance(intftype, list): raise ValueError(f'Interface type "{type(intftype)}" must be either str or list!') if not all(x in intftypes for x in intftype): raise ValueError(f'unknown interface type "{intftype}" or it can not ' 'have a source-interface') for it in intftype: base = ['interfaces', it] for intf in conf.list_nodes(base): src_intf = base + [intf, 'source-interface'] if conf.exists(src_intf) and interface in conf.return_values(src_intf): ret_val = intf break return ret_val def get_dhcp_interfaces(conf, vrf=None): """ Common helper functions to retrieve all interfaces from current CLI sessions that have DHCP configured. """ dhcp_interfaces = {} dict = conf.get_config_dict(['interfaces'], get_first_key=True) if not dict: return dhcp_interfaces def check_dhcp(config): ifname = config['ifname'] tmp = {} if 'address' in config and 'dhcp' in config['address']: options = {} if dict_search('dhcp_options.default_route_distance', config) != None: options.update({'dhcp_options' : config['dhcp_options']}) if 'vrf' in config: if vrf == config['vrf']: tmp.update({ifname : options}) else: if vrf is None: tmp.update({ifname : options}) return tmp for section, interface in dict.items(): for ifname in interface: # always reset config level, as get_interface_dict() will alter it conf.set_level([]) # we already have a dict representation of the config from get_config_dict(), # but with the extended information from get_interface_dict() we also # get the DHCP client default-route-distance default option if not specified. _, ifconfig = get_interface_dict(conf, ['interfaces', section], ifname) tmp = check_dhcp(ifconfig) dhcp_interfaces.update(tmp) # check per VLAN interfaces for vif, vif_config in ifconfig.get('vif', {}).items(): tmp = check_dhcp(vif_config) dhcp_interfaces.update(tmp) # check QinQ VLAN interfaces for vif_s, vif_s_config in ifconfig.get('vif_s', {}).items(): tmp = check_dhcp(vif_s_config) dhcp_interfaces.update(tmp) for vif_c, vif_c_config in vif_s_config.get('vif_c', {}).items(): tmp = check_dhcp(vif_c_config) dhcp_interfaces.update(tmp) return dhcp_interfaces def get_pppoe_interfaces(conf, vrf=None): """ Common helper functions to retrieve all interfaces from current CLI sessions that have DHCP configured. """ pppoe_interfaces = {} conf.set_level([]) for ifname in conf.list_nodes(['interfaces', 'pppoe']): # always reset config level, as get_interface_dict() will alter it conf.set_level([]) # we already have a dict representation of the config from get_config_dict(), # but with the extended information from get_interface_dict() we also # get the DHCP client default-route-distance default option if not specified. _, ifconfig = get_interface_dict(conf, ['interfaces', 'pppoe'], ifname) options = {} if 'default_route_distance' in ifconfig: options.update({'default_route_distance' : ifconfig['default_route_distance']}) if 'no_default_route' in ifconfig: options.update({'no_default_route' : {}}) if 'vrf' in ifconfig: if vrf == ifconfig['vrf']: pppoe_interfaces.update({ifname : options}) else: if vrf is None: pppoe_interfaces.update({ifname : options}) return pppoe_interfaces def get_interface_dict(config, base, ifname='', recursive_defaults=True, with_pki=False): """ Common utility function to retrieve and mangle the interfaces configuration from the CLI input nodes. All interfaces have a common base where value retrival is identical. This function must be used whenever possible when working on the interfaces node! Return a dictionary with the necessary interface config keys. """ if not ifname: from vyos import ConfigError # determine tagNode instance if 'VYOS_TAGNODE_VALUE' not in os.environ: raise ConfigError('Interface (VYOS_TAGNODE_VALUE) not specified') ifname = os.environ['VYOS_TAGNODE_VALUE'] # Check if interface has been removed. We must use exists() as # get_config_dict() will always return {} - even when an empty interface # node like the following exists. # +macsec macsec1 { # +} if not config.exists(base + [ifname]): dict = config.get_config_dict(base + [ifname], key_mangling=('-', '_'), get_first_key=True, no_tag_node_value_mangle=True) dict.update({'deleted' : {}}) else: # Get config_dict with default values dict = config.get_config_dict(base + [ifname], key_mangling=('-', '_'), get_first_key=True, no_tag_node_value_mangle=True, with_defaults=True, with_recursive_defaults=recursive_defaults, with_pki=with_pki) # If interface does not request an IPv4 DHCP address there is no need # to keep the dhcp-options key if 'address' not in dict or 'dhcp' not in dict['address']: if 'dhcp_options' in dict: del dict['dhcp_options'] # Add interface instance name into dictionary dict.update({'ifname': ifname}) # Check if QoS policy applied on this interface - See ifconfig.interface.set_mirror_redirect() if config.exists(['qos', 'interface', ifname]): dict.update({'traffic_policy': {}}) address = leaf_node_changed(config, base + [ifname, 'address']) if address: dict.update({'address_old' : address}) # Check if we are a member of a bridge device bridge = is_member(config, ifname, 'bridge') if bridge: dict.update({'is_bridge_member' : bridge}) # Check if it is a monitor interface mirror = is_mirror_intf(config, ifname) if mirror: dict.update({'is_mirror_intf' : mirror}) # Check if we are a member of a bond device bond = is_member(config, ifname, 'bonding') if bond: dict.update({'is_bond_member' : bond}) # Check if any DHCP options changed which require a client restat dhcp = is_node_changed(config, base + [ifname, 'dhcp-options']) if dhcp: dict.update({'dhcp_options_changed' : {}}) # Changine interface VRF assignemnts require a DHCP restart, too dhcp = is_node_changed(config, base + [ifname, 'vrf']) if dhcp: dict.update({'dhcp_options_changed' : {}}) # Some interfaces come with a source_interface which must also not be part # of any other bond or bridge interface as it is exclusivly assigned as the # Kernels "lower" interface to this new "virtual/upper" interface. if 'source_interface' in dict: # Check if source interface is member of another bridge tmp = is_member(config, dict['source_interface'], 'bridge') if tmp: dict.update({'source_interface_is_bridge_member' : tmp}) # Check if source interface is member of another bridge tmp = is_member(config, dict['source_interface'], 'bonding') if tmp: dict.update({'source_interface_is_bond_member' : tmp}) mac = leaf_node_changed(config, base + [ifname, 'mac']) if mac: dict.update({'mac_old' : mac}) eui64 = leaf_node_changed(config, base + [ifname, 'ipv6', 'address', 'eui64']) if eui64: tmp = dict_search('ipv6.address', dict) if not tmp: dict.update({'ipv6': {'address': {'eui64_old': eui64}}}) else: dict['ipv6']['address'].update({'eui64_old': eui64}) for vif, vif_config in dict.get('vif', {}).items(): # Add subinterface name to dictionary dict['vif'][vif].update({'ifname' : f'{ifname}.{vif}'}) if config.exists(['qos', 'interface', f'{ifname}.{vif}']): dict['vif'][vif].update({'traffic_policy': {}}) if 'deleted' not in dict: address = leaf_node_changed(config, base + [ifname, 'vif', vif, 'address']) if address: dict['vif'][vif].update({'address_old' : address}) # If interface does not request an IPv4 DHCP address there is no need # to keep the dhcp-options key if 'address' not in dict['vif'][vif] or 'dhcp' not in dict['vif'][vif]['address']: if 'dhcp_options' in dict['vif'][vif]: del dict['vif'][vif]['dhcp_options'] # Check if we are a member of a bridge device bridge = is_member(config, f'{ifname}.{vif}', 'bridge') if bridge: dict['vif'][vif].update({'is_bridge_member' : bridge}) # Check if any DHCP options changed which require a client restat dhcp = is_node_changed(config, base + [ifname, 'vif', vif, 'dhcp-options']) if dhcp: dict['vif'][vif].update({'dhcp_options_changed' : {}}) for vif_s, vif_s_config in dict.get('vif_s', {}).items(): # Add subinterface name to dictionary dict['vif_s'][vif_s].update({'ifname' : f'{ifname}.{vif_s}'}) if config.exists(['qos', 'interface', f'{ifname}.{vif_s}']): dict['vif_s'][vif_s].update({'traffic_policy': {}}) if 'deleted' not in dict: address = leaf_node_changed(config, base + [ifname, 'vif-s', vif_s, 'address']) if address: dict['vif_s'][vif_s].update({'address_old' : address}) # If interface does not request an IPv4 DHCP address there is no need # to keep the dhcp-options key if 'address' not in dict['vif_s'][vif_s] or 'dhcp' not in \ dict['vif_s'][vif_s]['address']: if 'dhcp_options' in dict['vif_s'][vif_s]: del dict['vif_s'][vif_s]['dhcp_options'] # Check if we are a member of a bridge device bridge = is_member(config, f'{ifname}.{vif_s}', 'bridge') if bridge: dict['vif_s'][vif_s].update({'is_bridge_member' : bridge}) # Check if any DHCP options changed which require a client restat dhcp = is_node_changed(config, base + [ifname, 'vif-s', vif_s, 'dhcp-options']) if dhcp: dict['vif_s'][vif_s].update({'dhcp_options_changed' : {}}) for vif_c, vif_c_config in vif_s_config.get('vif_c', {}).items(): # Add subinterface name to dictionary dict['vif_s'][vif_s]['vif_c'][vif_c].update({'ifname' : f'{ifname}.{vif_s}.{vif_c}'}) if config.exists(['qos', 'interface', f'{ifname}.{vif_s}.{vif_c}']): dict['vif_s'][vif_s]['vif_c'][vif_c].update({'traffic_policy': {}}) if 'deleted' not in dict: address = leaf_node_changed(config, base + [ifname, 'vif-s', vif_s, 'vif-c', vif_c, 'address']) if address: dict['vif_s'][vif_s]['vif_c'][vif_c].update( {'address_old' : address}) # If interface does not request an IPv4 DHCP address there is no need # to keep the dhcp-options key if 'address' not in dict['vif_s'][vif_s]['vif_c'][vif_c] or 'dhcp' \ not in dict['vif_s'][vif_s]['vif_c'][vif_c]['address']: if 'dhcp_options' in dict['vif_s'][vif_s]['vif_c'][vif_c]: del dict['vif_s'][vif_s]['vif_c'][vif_c]['dhcp_options'] # Check if we are a member of a bridge device bridge = is_member(config, f'{ifname}.{vif_s}.{vif_c}', 'bridge') if bridge: dict['vif_s'][vif_s]['vif_c'][vif_c].update( {'is_bridge_member' : bridge}) # Check if any DHCP options changed which require a client restat dhcp = is_node_changed(config, base + [ifname, 'vif-s', vif_s, 'vif-c', vif_c, 'dhcp-options']) if dhcp: dict['vif_s'][vif_s]['vif_c'][vif_c].update({'dhcp_options_changed' : {}}) # Check vif, vif-s/vif-c VLAN interfaces for removal dict = get_removed_vlans(config, base + [ifname], dict) return ifname, dict def get_vlan_ids(interface): """ Get the VLAN ID of the interface bound to the bridge """ vlan_ids = set() bridge_status = cmd('bridge -j vlan show', shell=True) vlan_filter_status = json.loads(bridge_status) if vlan_filter_status is not None: for interface_status in vlan_filter_status: ifname = interface_status['ifname'] if interface == ifname: vlans_status = interface_status['vlans'] for vlan_status in vlans_status: vlan_id = vlan_status['vlan'] vlan_ids.add(vlan_id) return vlan_ids def get_accel_dict(config, base, chap_secrets, with_pki=False): """ Common utility function to retrieve and mangle the Accel-PPP configuration from different CLI input nodes. All Accel-PPP services have a common base where value retrival is identical. This function must be used whenever possible when working with Accel-PPP services! Return a dictionary with the necessary interface config keys. """ from vyos.utils.cpu import get_core_count from vyos.template import is_ipv4 dict = config.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True, no_tag_node_value_mangle=True, with_recursive_defaults=True, with_pki=with_pki) # set CPUs cores to process requests dict.update({'thread_count' : get_core_count()}) # we need to store the path to the secrets file dict.update({'chap_secrets_file' : chap_secrets}) # We can only have two IPv4 and three IPv6 nameservers - also they are # configured in a different way in the configuration, this is why we split # the configuration if 'name_server' in dict: ns_v4 = [] ns_v6 = [] for ns in dict['name_server']: if is_ipv4(ns): ns_v4.append(ns) else: ns_v6.append(ns) dict.update({'name_server_ipv4' : ns_v4, 'name_server_ipv6' : ns_v6}) del dict['name_server'] # Check option "disable-accounting" per server and replace default value from '1813' to '0' for server in (dict_search('authentication.radius.server', dict) or []): if 'disable_accounting' in dict['authentication']['radius']['server'][server]: dict['authentication']['radius']['server'][server]['acct_port'] = '0' return dict + +def get_frrender_dict(conf) -> dict: + from copy import deepcopy + from vyos.config import config_dict_merge + from vyos.frrender import frr_protocols + + # Create an empty dictionary which will be filled down the code path and + # returned to the caller + dict = {} + + def dict_helper_ospf_defaults(ospf, path): + # We have gathered the dict representation of the CLI, but there are default + # options which we need to update into the dictionary retrived. + default_values = conf.get_config_defaults(path, key_mangling=('-', '_'), + get_first_key=True, recursive=True) + + # We have to cleanup the default dict, as default values could enable features + # which are not explicitly enabled on the CLI. Example: default-information + # originate comes with a default metric-type of 2, which will enable the + # entire default-information originate tree, even when not set via CLI so we + # need to check this first and probably drop that key. + if dict_search('default_information.originate', ospf) is None: + del default_values['default_information'] + if 'mpls_te' not in ospf: + del default_values['mpls_te'] + if 'graceful_restart' not in ospf: + del default_values['graceful_restart'] + for area_num in default_values.get('area', []): + if dict_search(f'area.{area_num}.area_type.nssa', ospf) is None: + del default_values['area'][area_num]['area_type']['nssa'] + + for protocol in ['babel', 'bgp', 'connected', 'isis', 'kernel', 'rip', 'static']: + if dict_search(f'redistribute.{protocol}', ospf) is None: + del default_values['redistribute'][protocol] + if not bool(default_values['redistribute']): + del default_values['redistribute'] + + for interface in ospf.get('interface', []): + # We need to reload the defaults on every pass b/c of + # hello-multiplier dependency on dead-interval + # If hello-multiplier is set, we need to remove the default from + # dead-interval. + if 'hello_multiplier' in ospf['interface'][interface]: + del default_values['interface'][interface]['dead_interval'] + + ospf = config_dict_merge(default_values, ospf) + return ospf + + def dict_helper_ospfv3_defaults(ospfv3, path): + # We have gathered the dict representation of the CLI, but there are default + # options which we need to update into the dictionary retrived. + default_values = conf.get_config_defaults(path, key_mangling=('-', '_'), + get_first_key=True, recursive=True) + + # We have to cleanup the default dict, as default values could enable features + # which are not explicitly enabled on the CLI. Example: default-information + # originate comes with a default metric-type of 2, which will enable the + # entire default-information originate tree, even when not set via CLI so we + # need to check this first and probably drop that key. + if dict_search('default_information.originate', ospfv3) is None: + del default_values['default_information'] + if 'graceful_restart' not in ospfv3: + del default_values['graceful_restart'] + + for protocol in ['babel', 'bgp', 'connected', 'isis', 'kernel', 'ripng', 'static']: + if dict_search(f'redistribute.{protocol}', ospfv3) is None: + del default_values['redistribute'][protocol] + if not bool(default_values['redistribute']): + del default_values['redistribute'] + + default_values.pop('interface', {}) + + # merge in remaining default values + ospfv3 = config_dict_merge(default_values, ospfv3) + return ospfv3 + + def dict_helper_pim_defaults(pim, path): + # We have gathered the dict representation of the CLI, but there are default + # options which we need to update into the dictionary retrived. + default_values = conf.get_config_defaults(path, key_mangling=('-', '_'), + get_first_key=True, recursive=True) + + # We have to cleanup the default dict, as default values could enable features + # which are not explicitly enabled on the CLI. + for interface in pim.get('interface', []): + if 'igmp' not in pim['interface'][interface]: + del default_values['interface'][interface]['igmp'] + + pim = config_dict_merge(default_values, pim) + return pim + + # Ethernet and bonding interfaces can participate in EVPN which is configured via FRR + tmp = {} + for if_type in ['ethernet', 'bonding']: + interface_path = ['interfaces', if_type] + if not conf.exists(interface_path): + continue + for interface in conf.list_nodes(interface_path): + evpn_path = interface_path + [interface, 'evpn'] + if not conf.exists(evpn_path): + continue + + evpn = conf.get_config_dict(evpn_path, key_mangling=('-', '_')) + tmp.update({interface : evpn}) + # At least one participating EVPN interface found, add to result dict + if tmp: dict['interfaces'] = tmp + + # Enable SNMP agentx support + # SNMP AgentX support cannot be disabled once enabled + if conf.exists(['service', 'snmp']): + dict['snmp'] = {} + + # We will always need the policy key + dict['policy'] = conf.get_config_dict(['policy'], key_mangling=('-', '_'), + get_first_key=True, + no_tag_node_value_mangle=True) + + # We need to check the CLI if the BABEL node is present and thus load in all the default + # values present on the CLI - that's why we have if conf.exists() + babel_cli_path = ['protocols', 'babel'] + if conf.exists(babel_cli_path): + babel = conf.get_config_dict(babel_cli_path, key_mangling=('-', '_'), + get_first_key=True, + with_recursive_defaults=True) + dict.update({'babel' : babel}) + + # We need to check the CLI if the BFD node is present and thus load in all the default + # values present on the CLI - that's why we have if conf.exists() + bfd_cli_path = ['protocols', 'bfd'] + if conf.exists(bfd_cli_path): + bfd = conf.get_config_dict(bfd_cli_path, key_mangling=('-', '_'), + get_first_key=True, + no_tag_node_value_mangle=True, + with_recursive_defaults=True) + dict.update({'bfd' : bfd}) + + # We need to check the CLI if the BGP node is present and thus load in all the default + # values present on the CLI - that's why we have if conf.exists() + bgp_cli_path = ['protocols', 'bgp'] + if conf.exists(bgp_cli_path): + bgp = conf.get_config_dict(bgp_cli_path, key_mangling=('-', '_'), + get_first_key=True, + no_tag_node_value_mangle=True, + with_recursive_defaults=True) + bgp['dependent_vrfs'] = {} + dict.update({'bgp' : bgp}) + elif conf.exists_effective(bgp_cli_path): + dict.update({'bgp' : {'deleted' : '', 'dependent_vrfs' : {}}}) + + # We need to check the CLI if the EIGRP node is present and thus load in all the default + # values present on the CLI - that's why we have if conf.exists() + eigrp_cli_path = ['protocols', 'eigrp'] + if conf.exists(eigrp_cli_path): + isis = conf.get_config_dict(eigrp_cli_path, key_mangling=('-', '_'), + get_first_key=True, + no_tag_node_value_mangle=True, + with_recursive_defaults=True) + dict.update({'eigrp' : isis}) + elif conf.exists_effective(eigrp_cli_path): + dict.update({'eigrp' : {'deleted' : ''}}) + + # We need to check the CLI if the ISIS node is present and thus load in all the default + # values present on the CLI - that's why we have if conf.exists() + isis_cli_path = ['protocols', 'isis'] + if conf.exists(isis_cli_path): + isis = conf.get_config_dict(isis_cli_path, key_mangling=('-', '_'), + get_first_key=True, + no_tag_node_value_mangle=True, + with_recursive_defaults=True) + dict.update({'isis' : isis}) + elif conf.exists_effective(isis_cli_path): + dict.update({'isis' : {'deleted' : ''}}) + + # We need to check the CLI if the MPLS node is present and thus load in all the default + # values present on the CLI - that's why we have if conf.exists() + mpls_cli_path = ['protocols', 'mpls'] + if conf.exists(mpls_cli_path): + mpls = conf.get_config_dict(mpls_cli_path, key_mangling=('-', '_'), + get_first_key=True) + dict.update({'mpls' : mpls}) + elif conf.exists_effective(mpls_cli_path): + dict.update({'mpls' : {'deleted' : ''}}) + + # We need to check the CLI if the OPENFABRIC node is present and thus load in all the default + # values present on the CLI - that's why we have if conf.exists() + openfabric_cli_path = ['protocols', 'openfabric'] + if conf.exists(openfabric_cli_path): + openfabric = conf.get_config_dict(openfabric_cli_path, key_mangling=('-', '_'), + get_first_key=True, + no_tag_node_value_mangle=True) + dict.update({'openfabric' : openfabric}) + elif conf.exists_effective(openfabric_cli_path): + dict.update({'openfabric' : {'deleted' : ''}}) + + # We need to check the CLI if the OSPF node is present and thus load in all the default + # values present on the CLI - that's why we have if conf.exists() + ospf_cli_path = ['protocols', 'ospf'] + if conf.exists(ospf_cli_path): + ospf = conf.get_config_dict(ospf_cli_path, key_mangling=('-', '_'), + get_first_key=True) + ospf = dict_helper_ospf_defaults(ospf, ospf_cli_path) + dict.update({'ospf' : ospf}) + elif conf.exists_effective(ospf_cli_path): + dict.update({'ospf' : {'deleted' : ''}}) + + # We need to check the CLI if the OSPFv3 node is present and thus load in all the default + # values present on the CLI - that's why we have if conf.exists() + ospfv3_cli_path = ['protocols', 'ospfv3'] + if conf.exists(ospfv3_cli_path): + ospfv3 = conf.get_config_dict(ospfv3_cli_path, key_mangling=('-', '_'), + get_first_key=True) + ospfv3 = dict_helper_ospfv3_defaults(ospfv3, ospfv3_cli_path) + dict.update({'ospfv3' : ospfv3}) + elif conf.exists_effective(ospfv3_cli_path): + dict.update({'ospfv3' : {'deleted' : ''}}) + + # We need to check the CLI if the PIM node is present and thus load in all the default + # values present on the CLI - that's why we have if conf.exists() + pim_cli_path = ['protocols', 'pim'] + if conf.exists(pim_cli_path): + pim = conf.get_config_dict(pim_cli_path, key_mangling=('-', '_'), + get_first_key=True) + pim = dict_helper_pim_defaults(pim, pim_cli_path) + dict.update({'pim' : pim}) + elif conf.exists_effective(pim_cli_path): + dict.update({'pim' : {'deleted' : ''}}) + + # We need to check the CLI if the PIM6 node is present and thus load in all the default + # values present on the CLI - that's why we have if conf.exists() + pim6_cli_path = ['protocols', 'pim6'] + if conf.exists(pim6_cli_path): + pim6 = conf.get_config_dict(pim6_cli_path, key_mangling=('-', '_'), + get_first_key=True, + with_recursive_defaults=True) + dict.update({'pim6' : pim6}) + elif conf.exists_effective(pim6_cli_path): + dict.update({'pim6' : {'deleted' : ''}}) + + # We need to check the CLI if the RIP node is present and thus load in all the default + # values present on the CLI - that's why we have if conf.exists() + rip_cli_path = ['protocols', 'rip'] + if conf.exists(rip_cli_path): + rip = conf.get_config_dict(rip_cli_path, key_mangling=('-', '_'), + get_first_key=True, + with_recursive_defaults=True) + dict.update({'rip' : rip}) + elif conf.exists_effective(rip_cli_path): + dict.update({'rip' : {'deleted' : ''}}) + + # We need to check the CLI if the RIPng node is present and thus load in all the default + # values present on the CLI - that's why we have if conf.exists() + ripng_cli_path = ['protocols', 'ripng'] + if conf.exists(ripng_cli_path): + ripng = conf.get_config_dict(ripng_cli_path, key_mangling=('-', '_'), + get_first_key=True, + with_recursive_defaults=True) + dict.update({'ripng' : ripng}) + elif conf.exists_effective(ripng_cli_path): + dict.update({'ripng' : {'deleted' : ''}}) + + # We need to check the CLI if the RPKI node is present and thus load in all the default + # values present on the CLI - that's why we have if conf.exists() + rpki_cli_path = ['protocols', 'rpki'] + if conf.exists(rpki_cli_path): + rpki = conf.get_config_dict(rpki_cli_path, key_mangling=('-', '_'), + get_first_key=True, with_pki=True, + with_recursive_defaults=True) + dict.update({'rpki' : rpki}) + elif conf.exists_effective(rpki_cli_path): + dict.update({'rpki' : {'deleted' : ''}}) + + # We need to check the CLI if the Segment Routing node is present and thus load in + # all the default values present on the CLI - that's why we have if conf.exists() + sr_cli_path = ['protocols', 'segment-routing'] + if conf.exists(sr_cli_path): + sr = conf.get_config_dict(sr_cli_path, key_mangling=('-', '_'), + get_first_key=True, + no_tag_node_value_mangle=True, + with_recursive_defaults=True) + dict.update({'segment_routing' : sr}) + elif conf.exists_effective(sr_cli_path): + dict.update({'segment_routing' : {'deleted' : ''}}) + + # We need to check the CLI if the static node is present and thus load in + # all the default values present on the CLI - that's why we have if conf.exists() + static_cli_path = ['protocols', 'static'] + if conf.exists(static_cli_path): + static = conf.get_config_dict(static_cli_path, key_mangling=('-', '_'), + get_first_key=True, + no_tag_node_value_mangle=True) + + # T3680 - get a list of all interfaces currently configured to use DHCP + tmp = get_dhcp_interfaces(conf) + if tmp: static.update({'dhcp' : tmp}) + tmp = get_pppoe_interfaces(conf) + if tmp: static.update({'pppoe' : tmp}) + + dict.update({'static' : static}) + elif conf.exists_effective(static_cli_path): + dict.update({'static' : {'deleted' : ''}}) + + # keep a re-usable list of dependent VRFs + dependent_vrfs_default = {} + if 'bgp' in dict: + dependent_vrfs_default = deepcopy(dict['bgp']) + # we do not need to nest the 'dependent_vrfs' key - simply remove it + if 'dependent_vrfs' in dependent_vrfs_default: + del dependent_vrfs_default['dependent_vrfs'] + + vrf_cli_path = ['vrf', 'name'] + if conf.exists(vrf_cli_path): + vrf = conf.get_config_dict(vrf_cli_path, key_mangling=('-', '_'), + get_first_key=False, + no_tag_node_value_mangle=True) + # We do not have any VRF related default values on the CLI. The defaults will only + # come into place under the protocols tree, thus we can safely merge them with the + # appropriate routing protocols + for vrf_name, vrf_config in vrf['name'].items(): + bgp_vrf_path = ['vrf', 'name', vrf_name, 'protocols', 'bgp'] + if 'bgp' in vrf_config.get('protocols', []): + # We have gathered the dict representation of the CLI, but there are default + # options which we need to update into the dictionary retrived. + default_values = conf.get_config_defaults(bgp_vrf_path, key_mangling=('-', '_'), + get_first_key=True, recursive=True) + + # merge in remaining default values + vrf_config['protocols']['bgp'] = config_dict_merge(default_values, + vrf_config['protocols']['bgp']) + + # Add this BGP VRF instance as dependency into the default VRF + if 'bgp' in dict: + dict['bgp']['dependent_vrfs'].update({vrf_name : deepcopy(vrf_config)}) + + vrf_config['protocols']['bgp']['dependent_vrfs'] = conf.get_config_dict( + vrf_cli_path, key_mangling=('-', '_'), get_first_key=True, + no_tag_node_value_mangle=True) + + # We can safely delete ourself from the dependent VRF list + if vrf_name in vrf_config['protocols']['bgp']['dependent_vrfs']: + del vrf_config['protocols']['bgp']['dependent_vrfs'][vrf_name] + + # Add dependency on possible existing default VRF to this VRF + if 'bgp' in dict: + vrf_config['protocols']['bgp']['dependent_vrfs'].update({'default': {'protocols': { + 'bgp': dependent_vrfs_default}}}) + elif conf.exists_effective(bgp_vrf_path): + # Add this BGP VRF instance as dependency into the default VRF + tmp = {'deleted' : '', 'dependent_vrfs': deepcopy(vrf['name'])} + # We can safely delete ourself from the dependent VRF list + if vrf_name in tmp['dependent_vrfs']: + del tmp['dependent_vrfs'][vrf_name] + + # Add dependency on possible existing default VRF to this VRF + if 'bgp' in dict: + tmp['dependent_vrfs'].update({'default': {'protocols': { + 'bgp': dependent_vrfs_default}}}) + + if 'bgp' in dict: + dict['bgp']['dependent_vrfs'].update({vrf_name : {'protocols': tmp} }) + vrf['name'][vrf_name]['protocols'].update({'bgp' : tmp}) + + # We need to check the CLI if the EIGRP node is present and thus load in all the default + # values present on the CLI - that's why we have if conf.exists() + eigrp_vrf_path = ['vrf', 'name', vrf_name, 'protocols', 'eigrp'] + if 'eigrp' in vrf_config.get('protocols', []): + eigrp = conf.get_config_dict(eigrp_vrf_path, key_mangling=('-', '_'), get_first_key=True, + no_tag_node_value_mangle=True) + vrf['name'][vrf_name]['protocols'].update({'eigrp' : isis}) + elif conf.exists_effective(eigrp_vrf_path): + vrf['name'][vrf_name]['protocols'].update({'eigrp' : {'deleted' : ''}}) + + # We need to check the CLI if the ISIS node is present and thus load in all the default + # values present on the CLI - that's why we have if conf.exists() + isis_vrf_path = ['vrf', 'name', vrf_name, 'protocols', 'isis'] + if 'isis' in vrf_config.get('protocols', []): + isis = conf.get_config_dict(isis_vrf_path, key_mangling=('-', '_'), get_first_key=True, + no_tag_node_value_mangle=True, with_recursive_defaults=True) + vrf['name'][vrf_name]['protocols'].update({'isis' : isis}) + elif conf.exists_effective(isis_vrf_path): + vrf['name'][vrf_name]['protocols'].update({'isis' : {'deleted' : ''}}) + + # We need to check the CLI if the OSPF node is present and thus load in all the default + # values present on the CLI - that's why we have if conf.exists() + ospf_vrf_path = ['vrf', 'name', vrf_name, 'protocols', 'ospf'] + if 'ospf' in vrf_config.get('protocols', []): + ospf = conf.get_config_dict(ospf_vrf_path, key_mangling=('-', '_'), get_first_key=True) + ospf = dict_helper_ospf_defaults(vrf_config['protocols']['ospf'], ospf_vrf_path) + vrf['name'][vrf_name]['protocols'].update({'ospf' : ospf}) + elif conf.exists_effective(ospf_vrf_path): + vrf['name'][vrf_name]['protocols'].update({'ospf' : {'deleted' : ''}}) + + # We need to check the CLI if the OSPFv3 node is present and thus load in all the default + # values present on the CLI - that's why we have if conf.exists() + ospfv3_vrf_path = ['vrf', 'name', vrf_name, 'protocols', 'ospfv3'] + if 'ospfv3' in vrf_config.get('protocols', []): + ospfv3 = conf.get_config_dict(ospfv3_vrf_path, key_mangling=('-', '_'), get_first_key=True) + ospfv3 = dict_helper_ospfv3_defaults(vrf_config['protocols']['ospfv3'], ospfv3_vrf_path) + vrf['name'][vrf_name]['protocols'].update({'ospfv3' : ospfv3}) + elif conf.exists_effective(ospfv3_vrf_path): + vrf['name'][vrf_name]['protocols'].update({'ospfv3' : {'deleted' : ''}}) + + # We need to check the CLI if the static node is present and thus load in all the default + # values present on the CLI - that's why we have if conf.exists() + static_vrf_path = ['vrf', 'name', vrf_name, 'protocols', 'static'] + if 'static' in vrf_config.get('protocols', []): + static = conf.get_config_dict(static_vrf_path, key_mangling=('-', '_'), + get_first_key=True, + no_tag_node_value_mangle=True) + # T3680 - get a list of all interfaces currently configured to use DHCP + tmp = get_dhcp_interfaces(conf, vrf_name) + if tmp: static.update({'dhcp' : tmp}) + tmp = get_pppoe_interfaces(conf, vrf_name) + if tmp: static.update({'pppoe' : tmp}) + + vrf['name'][vrf_name]['protocols'].update({'static': static}) + elif conf.exists_effective(static_vrf_path): + vrf['name'][vrf_name]['protocols'].update({'static': {'deleted' : ''}}) + + vrf_vni_path = ['vrf', 'name', vrf_name, 'vni'] + if conf.exists_effective(vrf_vni_path): + vrf_config.update({'vni': conf.return_effective_value(vrf_vni_path)}) + + dict.update({'vrf' : vrf}) + elif conf.exists_effective(vrf_cli_path): + effective_vrf = conf.get_config_dict(vrf_cli_path, key_mangling=('-', '_'), + get_first_key=False, + no_tag_node_value_mangle=True, + effective=True) + vrf = {'name' : {}} + for vrf_name, vrf_config in effective_vrf.get('name', {}).items(): + vrf['name'].update({vrf_name : {}}) + for protocol in frr_protocols: + if protocol in vrf_config.get('protocols', []): + # Create initial protocols key if not present + if 'protocols' not in vrf['name'][vrf_name]: + vrf['name'][vrf_name].update({'protocols' : {}}) + # All routing protocols are deleted when we pass this point + tmp = {'deleted' : ''} + + # Special treatment for BGP routing protocol + if protocol == 'bgp': + tmp['dependent_vrfs'] = {} + if 'name' in vrf: + tmp['dependent_vrfs'] = conf.get_config_dict( + vrf_cli_path, key_mangling=('-', '_'), + get_first_key=True, no_tag_node_value_mangle=True, + effective=True) + # Add dependency on possible existing default VRF to this VRF + if 'bgp' in dict: + tmp['dependent_vrfs'].update({'default': {'protocols': { + 'bgp': dependent_vrfs_default}}}) + # We can safely delete ourself from the dependent VRF list + if vrf_name in tmp['dependent_vrfs']: + del tmp['dependent_vrfs'][vrf_name] + + # Update VRF related dict + vrf['name'][vrf_name]['protocols'].update({protocol : tmp}) + + dict.update({'vrf' : vrf}) + + print('======== < > ==========') + import pprint + pprint.pprint(dict) + print('======== < > ==========') + return dict diff --git a/python/vyos/configverify.py b/python/vyos/configverify.py index 92996f2ee..4450dc16b 100644 --- a/python/vyos/configverify.py +++ b/python/vyos/configverify.py @@ -1,539 +1,546 @@ # Copyright 2020-2024 VyOS maintainers and contributors <maintainers@vyos.io> # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see <http://www.gnu.org/licenses/>. # The sole purpose of this module is to hold common functions used in # all kinds of implementations to verify the CLI configuration. # It is started by migrating the interfaces to the new get_config_dict() # approach which will lead to a lot of code that can be reused. # NOTE: imports should be as local as possible to the function which # makes use of it! from vyos import ConfigError from vyos.utils.dict import dict_search # pattern re-used in ipsec migration script dynamic_interface_pattern = r'(ppp|pppoe|sstpc|l2tp|ipoe)[0-9]+' def verify_mtu(config): """ Common helper function used by interface implementations to perform recurring validation if the specified MTU can be used by the underlaying hardware. """ from vyos.ifconfig import Interface if 'mtu' in config: mtu = int(config['mtu']) tmp = Interface(config['ifname']) # Not all interfaces support min/max MTU # https://vyos.dev/T5011 try: min_mtu = tmp.get_min_mtu() max_mtu = tmp.get_max_mtu() except: # Fallback to defaults min_mtu = 68 max_mtu = 9000 if mtu < min_mtu: raise ConfigError(f'Interface MTU too low, ' \ f'minimum supported MTU is {min_mtu}!') if mtu > max_mtu: raise ConfigError(f'Interface MTU too high, ' \ f'maximum supported MTU is {max_mtu}!') def verify_mtu_parent(config, parent): if 'mtu' not in config or 'mtu' not in parent: return mtu = int(config['mtu']) parent_mtu = int(parent['mtu']) if mtu > parent_mtu: raise ConfigError(f'Interface MTU "{mtu}" too high, ' \ f'parent interface MTU is "{parent_mtu}"!') def verify_mtu_ipv6(config): """ Common helper function used by interface implementations to perform recurring validation if the specified MTU can be used when IPv6 is configured on the interface. IPv6 requires a 1280 bytes MTU. """ from vyos.template import is_ipv6 if 'mtu' in config: # IPv6 minimum required link mtu min_mtu = 1280 if int(config['mtu']) < min_mtu: interface = config['ifname'] error_msg = f'IPv6 address will be configured on interface "{interface}",\n' \ f'the required minimum MTU is "{min_mtu}"!' if 'address' in config: for address in config['address']: if address in ['dhcpv6'] or is_ipv6(address): raise ConfigError(error_msg) tmp = dict_search('ipv6.address.no_default_link_local', config) if tmp == None: raise ConfigError('link-local ' + error_msg) tmp = dict_search('ipv6.address.autoconf', config) if tmp != None: raise ConfigError(error_msg) tmp = dict_search('ipv6.address.eui64', config) if tmp != None: raise ConfigError(error_msg) def verify_vrf(config): """ Common helper function used by interface implementations to perform recurring validation of VRF configuration. """ from vyos.utils.network import interface_exists if 'vrf' in config: vrfs = config['vrf'] if isinstance(vrfs, str): vrfs = [vrfs] for vrf in vrfs: if vrf == 'default': continue if not interface_exists(vrf): raise ConfigError(f'VRF "{vrf}" does not exist!') if 'is_bridge_member' in config: raise ConfigError( 'Interface "{ifname}" cannot be both a member of VRF "{vrf}" ' 'and bridge "{is_bridge_member}"!'.format(**config)) def verify_bond_bridge_member(config): """ Checks if interface has a VRF configured and is also part of a bond or bridge, which is not allowed! """ if 'vrf' in config: ifname = config['ifname'] if 'is_bond_member' in config: raise ConfigError(f'Can not add interface "{ifname}" to bond, it has a VRF assigned!') if 'is_bridge_member' in config: raise ConfigError(f'Can not add interface "{ifname}" to bridge, it has a VRF assigned!') def verify_tunnel(config): """ This helper is used to verify the common part of the tunnel """ from vyos.template import is_ipv4 from vyos.template import is_ipv6 if 'encapsulation' not in config: raise ConfigError('Must configure the tunnel encapsulation for '\ '{ifname}!'.format(**config)) if 'source_address' not in config and 'source_interface' not in config: raise ConfigError('source-address or source-interface required for tunnel!') if 'remote' not in config and config['encapsulation'] != 'gre': raise ConfigError('remote ip address is mandatory for tunnel') if config['encapsulation'] in ['ipip6', 'ip6ip6', 'ip6gre', 'ip6gretap', 'ip6erspan']: error_ipv6 = 'Encapsulation mode requires IPv6' if 'source_address' in config and not is_ipv6(config['source_address']): raise ConfigError(f'{error_ipv6} source-address') if 'remote' in config and not is_ipv6(config['remote']): raise ConfigError(f'{error_ipv6} remote') else: error_ipv4 = 'Encapsulation mode requires IPv4' if 'source_address' in config and not is_ipv4(config['source_address']): raise ConfigError(f'{error_ipv4} source-address') if 'remote' in config and not is_ipv4(config['remote']): raise ConfigError(f'{error_ipv4} remote address') if config['encapsulation'] in ['sit', 'gretap', 'ip6gretap']: if 'source_interface' in config: encapsulation = config['encapsulation'] raise ConfigError(f'Option source-interface can not be used with ' \ f'encapsulation "{encapsulation}"!') elif config['encapsulation'] == 'gre': if 'source_address' in config and is_ipv6(config['source_address']): raise ConfigError('Can not use local IPv6 address is for mGRE tunnels') def verify_mirror_redirect(config): """ Common helper function used by interface implementations to perform recurring validation of mirror and redirect interface configuration via tc(8) It makes no sense to mirror traffic back at yourself! """ from vyos.utils.network import interface_exists if {'mirror', 'redirect'} <= set(config): raise ConfigError('Mirror and redirect can not be enabled at the same time!') if 'mirror' in config: for direction, mirror_interface in config['mirror'].items(): if not interface_exists(mirror_interface): raise ConfigError(f'Requested mirror interface "{mirror_interface}" '\ 'does not exist!') if mirror_interface == config['ifname']: raise ConfigError(f'Can not mirror "{direction}" traffic back '\ 'the originating interface!') if 'redirect' in config: redirect_ifname = config['redirect'] if not interface_exists(redirect_ifname): raise ConfigError(f'Requested redirect interface "{redirect_ifname}" '\ 'does not exist!') if ('mirror' in config or 'redirect' in config) and dict_search('traffic_policy.in', config) is not None: # XXX: support combination of limiting and redirect/mirror - this is an # artificial limitation raise ConfigError('Can not use ingress policy together with mirror or redirect!') def verify_authentication(config): """ Common helper function used by interface implementations to perform recurring validation of authentication for either PPPoE or WWAN interfaces. If authentication CLI option is defined, both username and password must be set! """ if 'authentication' not in config: return if not {'username', 'password'} <= set(config['authentication']): raise ConfigError('Authentication requires both username and ' \ 'password to be set!') def verify_address(config): """ Common helper function used by interface implementations to perform recurring validation of IP address assignment when interface is part of a bridge or bond. """ if {'is_bridge_member', 'address'} <= set(config): interface = config['ifname'] bridge_name = next(iter(config['is_bridge_member'])) raise ConfigError(f'Cannot assign address to interface "{interface}" ' f'as it is a member of bridge "{bridge_name}"!') def verify_bridge_delete(config): """ Common helper function used by interface implementations to perform recurring validation of IP address assignmenr when interface also is part of a bridge. """ if 'is_bridge_member' in config: interface = config['ifname'] bridge_name = next(iter(config['is_bridge_member'])) raise ConfigError(f'Interface "{interface}" cannot be deleted as it ' f'is a member of bridge "{bridge_name}"!') def verify_interface_exists(config, ifname, state_required=False, warning_only=False): """ Common helper function used by interface implementations to perform recurring validation if an interface actually exists. We first probe if the interface is defined on the CLI, if it's not found we try if it exists at the OS level. """ from vyos.base import Warning from vyos.utils.dict import dict_search_recursive from vyos.utils.network import interface_exists if not state_required: # Check if interface is present in CLI config tmp = getattr(config, 'interfaces_root', {}) if bool(list(dict_search_recursive(tmp, ifname))): return True # Interface not found on CLI, try Linux Kernel if interface_exists(ifname): return True message = f'Interface "{ifname}" does not exist!' if warning_only: Warning(message) return False raise ConfigError(message) def verify_source_interface(config): """ Common helper function used by interface implementations to perform recurring validation of the existence of a source-interface required by e.g. peth/MACvlan, MACsec ... """ import re from vyos.utils.network import interface_exists ifname = config['ifname'] if 'source_interface' not in config: raise ConfigError(f'Physical source-interface required for "{ifname}"!') src_ifname = config['source_interface'] # We do not allow sourcing other interfaces (e.g. tunnel) from dynamic interfaces tmp = re.compile(dynamic_interface_pattern) if tmp.match(src_ifname): raise ConfigError(f'Can not source "{ifname}" from dynamic interface "{src_ifname}"!') if not interface_exists(src_ifname): raise ConfigError(f'Specified source-interface {src_ifname} does not exist') if 'source_interface_is_bridge_member' in config: bridge_name = next(iter(config['source_interface_is_bridge_member'])) raise ConfigError(f'Invalid source-interface "{src_ifname}". Interface ' f'is already a member of bridge "{bridge_name}"!') if 'source_interface_is_bond_member' in config: bond_name = next(iter(config['source_interface_is_bond_member'])) raise ConfigError(f'Invalid source-interface "{src_ifname}". Interface ' f'is already a member of bond "{bond_name}"!') if 'is_source_interface' in config: tmp = config['is_source_interface'] raise ConfigError(f'Can not use source-interface "{src_ifname}", it already ' \ f'belongs to interface "{tmp}"!') def verify_dhcpv6(config): """ Common helper function used by interface implementations to perform recurring validation of DHCPv6 options which are mutually exclusive. """ if 'dhcpv6_options' in config: if {'parameters_only', 'temporary'} <= set(config['dhcpv6_options']): raise ConfigError('DHCPv6 temporary and parameters-only options ' 'are mutually exclusive!') # It is not allowed to have duplicate SLA-IDs as those identify an # assigned IPv6 subnet from a delegated prefix for pd in (dict_search('dhcpv6_options.pd', config) or []): sla_ids = [] interfaces = dict_search(f'dhcpv6_options.pd.{pd}.interface', config) if not interfaces: raise ConfigError('DHCPv6-PD requires an interface where to assign ' 'the delegated prefix!') for count, interface in enumerate(interfaces): if 'sla_id' in interfaces[interface]: sla_ids.append(interfaces[interface]['sla_id']) else: sla_ids.append(str(count)) # Check for duplicates duplicates = [x for n, x in enumerate(sla_ids) if x in sla_ids[:n]] if duplicates: raise ConfigError('Site-Level Aggregation Identifier (SLA-ID) ' 'must be unique per prefix-delegation!') def verify_vlan_config(config): """ Common helper function used by interface implementations to perform recurring validation of interface VLANs """ # VLAN and Q-in-Q IDs are not allowed to overlap if 'vif' in config and 'vif_s' in config: duplicate = list(set(config['vif']) & set(config['vif_s'])) if duplicate: raise ConfigError(f'Duplicate VLAN id "{duplicate[0]}" used for vif and vif-s interfaces!') parent_ifname = config['ifname'] # 802.1q VLANs for vlan_id in config.get('vif', {}): vlan = config['vif'][vlan_id] vlan['ifname'] = f'{parent_ifname}.{vlan_id}' verify_dhcpv6(vlan) verify_address(vlan) verify_vrf(vlan) verify_mirror_redirect(vlan) verify_mtu_parent(vlan, config) # 802.1ad (Q-in-Q) VLANs for s_vlan_id in config.get('vif_s', {}): s_vlan = config['vif_s'][s_vlan_id] s_vlan['ifname'] = f'{parent_ifname}.{s_vlan_id}' verify_dhcpv6(s_vlan) verify_address(s_vlan) verify_vrf(s_vlan) verify_mirror_redirect(s_vlan) verify_mtu_parent(s_vlan, config) for c_vlan_id in s_vlan.get('vif_c', {}): c_vlan = s_vlan['vif_c'][c_vlan_id] c_vlan['ifname'] = f'{parent_ifname}.{s_vlan_id}.{c_vlan_id}' verify_dhcpv6(c_vlan) verify_address(c_vlan) verify_vrf(c_vlan) verify_mirror_redirect(c_vlan) verify_mtu_parent(c_vlan, config) verify_mtu_parent(c_vlan, s_vlan) def verify_diffie_hellman_length(file, min_keysize): """ Verify Diffie-Hellamn keypair length given via file. It must be greater then or equal to min_keysize """ import os import re from vyos.utils.process import cmd try: keysize = str(min_keysize) except: return False if os.path.exists(file): out = cmd(f'openssl dhparam -inform PEM -in {file} -text') prog = re.compile('\d+\s+bit') if prog.search(out): bits = prog.search(out)[0].split()[0] if int(bits) >= int(min_keysize): return True return False def verify_common_route_maps(config): """ Common helper function used by routing protocol implementations to perform recurring validation if the specified route-map for either zebra to kernel installation exists (this is the top-level route_map key) or when a route is redistributed with a route-map that it exists! """ # XXX: This function is called in combination with a previous call to: # tmp = conf.get_config_dict(['policy']) - see protocols_ospf.py as example. # We should NOT call this with the key_mangling option as this would rename # route-map hypens '-' to underscores '_' and one could no longer distinguish # what should have been the "proper" route-map name, as foo-bar and foo_bar # are two entire different route-map instances! for route_map in ['route-map', 'route_map']: if route_map not in config: continue tmp = config[route_map] # Check if the specified route-map exists, if not error out - if dict_search(f'policy.route-map.{tmp}', config) == None: + if dict_search(f'policy.route_map.{tmp}', config) == None: raise ConfigError(f'Specified route-map "{tmp}" does not exist!') if 'redistribute' in config: for protocol, protocol_config in config['redistribute'].items(): if 'route_map' in protocol_config: verify_route_map(protocol_config['route_map'], config) def verify_route_map(route_map_name, config): """ Common helper function used by routing protocol implementations to perform recurring validation if a specified route-map exists! """ # Check if the specified route-map exists, if not error out - if dict_search(f'policy.route-map.{route_map_name}', config) == None: + if dict_search(f'policy.route_map.{route_map_name}', config) == None: raise ConfigError(f'Specified route-map "{route_map_name}" does not exist!') def verify_prefix_list(prefix_list, config, version=''): """ Common helper function used by routing protocol implementations to perform recurring validation if a specified prefix-list exists! """ # Check if the specified prefix-list exists, if not error out - if dict_search(f'policy.prefix-list{version}.{prefix_list}', config) == None: + if dict_search(f'policy.prefix_list{version}.{prefix_list}', config) == None: raise ConfigError(f'Specified prefix-list{version} "{prefix_list}" does not exist!') def verify_access_list(access_list, config, version=''): """ Common helper function used by routing protocol implementations to perform recurring validation if a specified prefix-list exists! """ # Check if the specified ACL exists, if not error out - if dict_search(f'policy.access-list{version}.{access_list}', config) == None: + if dict_search(f'policy.access_list{version}.{access_list}', config) == None: raise ConfigError(f'Specified access-list{version} "{access_list}" does not exist!') def verify_pki_certificate(config: dict, cert_name: str, no_password_protected: bool=False): """ Common helper function user by PKI consumers to perform recurring validation functions for PEM based certificates """ if 'pki' not in config: raise ConfigError('PKI is not configured!') if 'certificate' not in config['pki']: raise ConfigError('PKI does not contain any certificates!') if cert_name not in config['pki']['certificate']: raise ConfigError(f'Certificate "{cert_name}" not found in configuration!') pki_cert = config['pki']['certificate'][cert_name] if 'certificate' not in pki_cert: raise ConfigError(f'PEM certificate for "{cert_name}" missing in configuration!') if 'private' not in pki_cert or 'key' not in pki_cert['private']: raise ConfigError(f'PEM private key for "{cert_name}" missing in configuration!') if no_password_protected and 'password_protected' in pki_cert['private']: raise ConfigError('Password protected PEM private key is not supported!') def verify_pki_ca_certificate(config: dict, ca_name: str): """ Common helper function user by PKI consumers to perform recurring validation functions for PEM based CA certificates """ if 'pki' not in config: raise ConfigError('PKI is not configured!') if 'ca' not in config['pki']: raise ConfigError('PKI does not contain any CA certificates!') if ca_name not in config['pki']['ca']: raise ConfigError(f'CA Certificate "{ca_name}" not found in configuration!') pki_cert = config['pki']['ca'][ca_name] if 'certificate' not in pki_cert: raise ConfigError(f'PEM CA certificate for "{cert_name}" missing in configuration!') def verify_pki_dh_parameters(config: dict, dh_name: str, min_key_size: int=0): """ Common helper function user by PKI consumers to perform recurring validation functions on DH parameters """ from vyos.pki import load_dh_parameters if 'pki' not in config: raise ConfigError('PKI is not configured!') if 'dh' not in config['pki']: raise ConfigError('PKI does not contain any DH parameters!') if dh_name not in config['pki']['dh']: raise ConfigError(f'DH parameter "{dh_name}" not found in configuration!') if min_key_size: pki_dh = config['pki']['dh'][dh_name] dh_params = load_dh_parameters(pki_dh['parameters']) dh_numbers = dh_params.parameter_numbers() dh_bits = dh_numbers.p.bit_length() if dh_bits < min_key_size: raise ConfigError(f'Minimum DH key-size is {min_key_size} bits!') def verify_eapol(config: dict): """ Common helper function used by interface implementations to perform recurring validation of EAPoL configuration. """ if 'eapol' not in config: return if 'certificate' not in config['eapol']: raise ConfigError('Certificate must be specified when using EAPoL!') verify_pki_certificate(config, config['eapol']['certificate'], no_password_protected=True) if 'ca_certificate' in config['eapol']: for ca_cert in config['eapol']['ca_certificate']: verify_pki_ca_certificate(config, ca_cert) + +def has_frr_protocol_in_dict(config_dict: dict, protocol: str, vrf: str=None) -> bool: + if vrf and protocol in (dict_search(f'vrf.name.{vrf}.protocols', config_dict) or []): + return True + if config_dict and protocol in config_dict: + return True + return False diff --git a/python/vyos/frrender.py b/python/vyos/frrender.py new file mode 100644 index 000000000..015596a8f --- /dev/null +++ b/python/vyos/frrender.py @@ -0,0 +1,156 @@ +# Copyright 2024 VyOS maintainers and contributors <maintainers@vyos.io> +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this library. If not, see <http://www.gnu.org/licenses/>. + +""" +Library used to interface with FRRs mgmtd introduced in version 10.0 +""" + +import os + +from vyos.utils.file import write_file +from vyos.utils.process import rc_cmd +from vyos.template import render_to_string +from vyos import ConfigError + +DEBUG_ON = os.path.exists('/tmp/vyos.frr.debug') +DEBUG_ON = True + +def debug(message): + if not DEBUG_ON: + return + print(message) + +pim_daemon = 'pimd' + +frr_protocols = ['babel', 'bfd', 'bgp', 'eigrp', 'isis', 'mpls', 'nhrp', + 'openfabric', 'ospf', 'ospfv3', 'pim', 'pim6', 'rip', + 'ripng', 'rpki', 'segment_routing', 'static'] + +class FRRender: + def __init__(self): + self._frr_conf = '/run/frr/config/frr.conf' + + def generate(self, config): + if not isinstance(config, dict): + raise ValueError('config must be of type dict') + + def inline_helper(config_dict) -> str: + output = '!\n' + if 'babel' in config_dict and 'deleted' not in config_dict['babel']: + output += render_to_string('frr/babeld.frr.j2', config_dict['babel']) + output += '\n' + if 'bfd' in config_dict and 'deleted' not in config_dict['bfd']: + output += render_to_string('frr/bfdd.frr.j2', config_dict['bfd']) + output += '\n' + if 'bgp' in config_dict and 'deleted' not in config_dict['bgp']: + output += render_to_string('frr/bgpd.frr.j2', config_dict['bgp']) + output += '\n' + if 'eigrp' in config_dict and 'deleted' not in config_dict['eigrp']: + output += render_to_string('frr/eigrpd.frr.j2', config_dict['eigrp']) + output += '\n' + if 'isis' in config_dict and 'deleted' not in config_dict['isis']: + output += render_to_string('frr/isisd.frr.j2', config_dict['isis']) + output += '\n' + if 'mpls' in config_dict and 'deleted' not in config_dict['mpls']: + output += render_to_string('frr/ldpd.frr.j2', config_dict['mpls']) + output += '\n' + if 'openfabric' in config_dict and 'deleted' not in config_dict['openfabric']: + output += render_to_string('frr/fabricd.frr.j2', config_dict['openfabric']) + output += '\n' + if 'ospf' in config_dict and 'deleted' not in config_dict['ospf']: + output += render_to_string('frr/ospfd.frr.j2', config_dict['ospf']) + output += '\n' + if 'ospfv3' in config_dict and 'deleted' not in config_dict['ospfv3']: + output += render_to_string('frr/ospf6d.frr.j2', config_dict['ospfv3']) + output += '\n' + if 'pim' in config_dict and 'deleted' not in config_dict['pim']: + output += render_to_string('frr/pimd.frr.j2', config_dict['pim']) + output += '\n' + if 'pim6' in config_dict and 'deleted' not in config_dict['pim6']: + output += render_to_string('frr/pim6d.frr.j2', config_dict['pim6']) + output += '\n' + if 'policy' in config_dict and len(config_dict['policy']) > 0: + output += render_to_string('frr/policy.frr.j2', config_dict['policy']) + output += '\n' + if 'rip' in config_dict and 'deleted' not in config_dict['rip']: + output += render_to_string('frr/ripd.frr.j2', config_dict['rip']) + output += '\n' + if 'ripng' in config_dict and 'deleted' not in config_dict['ripng']: + output += render_to_string('frr/ripngd.frr.j2', config_dict['ripng']) + output += '\n' + if 'rpki' in config_dict and 'deleted' not in config_dict['rpki']: + output += render_to_string('frr/rpki.frr.j2', config_dict['rpki']) + output += '\n' + if 'segment_routing' in config_dict and 'deleted' not in config_dict['segment_routing']: + output += render_to_string('frr/zebra.segment_routing.frr.j2', config_dict['segment_routing']) + output += '\n' + if 'static' in config_dict and 'deleted' not in config_dict['static']: + output += render_to_string('frr/staticd.frr.j2', config_dict['static']) + output += '\n' + return output + + debug('======< RENDERING CONFIG >======') + # we can not reload an empty file, thus we always embed the marker + output = '!\n' + # Enable SNMP agentx support + # SNMP AgentX support cannot be disabled once enabled + if 'snmp' in config: + output += 'agentx\n' + # Add routing protocols in global VRF + output += inline_helper(config) + # Interface configuration for EVPN is not VRF related + if 'interfaces' in config: + output += render_to_string('frr/evpn.mh.frr.j2', {'interfaces' : config['interfaces']}) + output += '\n' + + if 'vrf' in config and 'name' in config['vrf']: + output += render_to_string('frr/zebra.vrf.route-map.frr.j2', config['vrf']) + '\n' + for vrf, vrf_config in config['vrf']['name'].items(): + if 'protocols' not in vrf_config: + continue + for protocol in vrf_config['protocols']: + vrf_config['protocols'][protocol]['vrf'] = vrf + + output += inline_helper(vrf_config['protocols']) + + debug(output) + debug('======< RENDERING CONFIG COMPLETE >======') + write_file(self._frr_conf, output) + if DEBUG_ON: write_file('/tmp/frr.conf.debug', output) + + def apply(self): + count = 0 + count_max = 5 + emsg = '' + while count < count_max: + count += 1 + print('FRR: Reloading configuration', count) + + cmdline = '/usr/lib/frr/frr-reload.py --reload' + if DEBUG_ON: + cmdline += ' --debug' + rc, emsg = rc_cmd(f'{cmdline} {self._frr_conf}') + if rc != 0: + debug('FRR configuration reload failed, retrying') + continue + debug(emsg) + debug('======< DONE APPLYING CONFIG >======') + break + if count >= count_max: + raise ConfigError(emsg) + + def save_configuration(): + """ T3217: Save FRR configuration to /run/frr/config/frr.conf """ + return cmd('/usr/bin/vtysh -n --writeconfig') diff --git a/smoketest/scripts/cli/test_policy.py b/smoketest/scripts/cli/test_policy.py index a0c6ab055..7ea1b610e 100755 --- a/smoketest/scripts/cli/test_policy.py +++ b/smoketest/scripts/cli/test_policy.py @@ -1,1994 +1,1994 @@ #!/usr/bin/env python3 # # Copyright (C) 2021-2023 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import unittest from base_vyostest_shim import VyOSUnitTestSHIM from vyos.configsession import ConfigSessionError from vyos.utils.process import cmd base_path = ['policy'] class TestPolicy(VyOSUnitTestSHIM.TestCase): def tearDown(self): self.cli_delete(base_path) self.cli_commit() def test_access_list(self): acls = { '50' : { 'rule' : { '5' : { 'action' : 'permit', 'source' : { 'any' : '' }, }, '10' : { 'action' : 'deny', 'source' : { 'host' : '1.2.3.4' }, }, }, }, '150' : { 'rule' : { '5' : { 'action' : 'permit', 'source' : { 'any' : '' }, 'destination' : { 'host' : '2.2.2.2' }, }, '10' : { 'action' : 'deny', 'source' : { 'any' : '' }, 'destination' : { 'any' : '' }, }, }, }, '2000' : { 'rule' : { '5' : { 'action' : 'permit', 'destination' : { 'any' : '' }, 'source' : { 'network' : '10.0.0.0', 'inverse-mask' : '0.255.255.255' }, }, '10' : { 'action' : 'permit', 'destination' : { 'any' : '' }, 'source' : { 'network' : '172.16.0.0', 'inverse-mask' : '0.15.255.255' }, }, '15' : { 'action' : 'permit', 'destination' : { 'any' : '' }, 'source' : { 'network' : '192.168.0.0', 'inverse-mask' : '0.0.255.255' }, }, '20' : { 'action' : 'permit', 'destination' : { 'network' : '172.16.0.0', 'inverse-mask' : '0.15.255.255' }, 'source' : { 'network' : '10.0.0.0', 'inverse-mask' : '0.255.255.255' }, }, '25' : { 'action' : 'deny', 'destination' : { 'network' : '192.168.0.0', 'inverse-mask' : '0.0.255.255' }, 'source' : { 'network' : '172.16.0.0', 'inverse-mask' : '0.15.255.255' }, }, '30' : { 'action' : 'deny', 'destination' : { 'any' : '' }, 'source' : { 'any' : '' }, }, }, }, } for acl, acl_config in acls.items(): path = base_path + ['access-list', acl] self.cli_set(path + ['description', f'VyOS-ACL-{acl}']) if 'rule' not in acl_config: continue for rule, rule_config in acl_config['rule'].items(): self.cli_set(path + ['rule', rule, 'action', rule_config['action']]) for direction in ['source', 'destination']: if direction in rule_config: if 'any' in rule_config[direction]: self.cli_set(path + ['rule', rule, direction, 'any']) if 'host' in rule_config[direction]: self.cli_set(path + ['rule', rule, direction, 'host', rule_config[direction]['host']]) if 'network' in rule_config[direction]: self.cli_set(path + ['rule', rule, direction, 'network', rule_config[direction]['network']]) self.cli_set(path + ['rule', rule, direction, 'inverse-mask', rule_config[direction]['inverse-mask']]) self.cli_commit() config = self.getFRRconfig('access-list', end='') for acl, acl_config in acls.items(): for rule, rule_config in acl_config['rule'].items(): tmp = f'access-list {acl} seq {rule}' if rule_config['action'] == 'permit': tmp += ' permit' else: tmp += ' deny' if {'source', 'destination'} <= set(rule_config): tmp += ' ip' for direction in ['source', 'destination']: if direction in rule_config: if 'any' in rule_config[direction]: tmp += ' any' if 'host' in rule_config[direction]: # XXX: Some weird side rule from the old vyatta days # possible to clean this up after the vyos-1x migration if int(acl) in range(100, 200) or int(acl) in range(2000, 2700): tmp += ' host' tmp += ' ' + rule_config[direction]['host'] if 'network' in rule_config[direction]: tmp += ' ' + rule_config[direction]['network'] + ' ' + rule_config[direction]['inverse-mask'] self.assertIn(tmp, config) def test_access_list6(self): acls = { '50' : { 'rule' : { '5' : { 'action' : 'permit', 'source' : { 'any' : '' }, }, '10' : { 'action' : 'deny', 'source' : { 'network' : '2001:db8:10::/48', 'exact-match' : '' }, }, '15' : { 'action' : 'deny', 'source' : { 'network' : '2001:db8:20::/48' }, }, }, }, '100' : { 'rule' : { '5' : { 'action' : 'deny', 'source' : { 'network' : '2001:db8:10::/64', 'exact-match' : '' }, }, '10' : { 'action' : 'deny', 'source' : { 'network' : '2001:db8:20::/64', }, }, '15' : { 'action' : 'deny', 'source' : { 'network' : '2001:db8:30::/64', 'exact-match' : '' }, }, '20' : { 'action' : 'deny', 'source' : { 'network' : '2001:db8:40::/64', 'exact-match' : '' }, }, '25' : { 'action' : 'deny', 'source' : { 'any' : '' }, }, }, }, } for acl, acl_config in acls.items(): path = base_path + ['access-list6', acl] self.cli_set(path + ['description', f'VyOS-ACL-{acl}']) if 'rule' not in acl_config: continue for rule, rule_config in acl_config['rule'].items(): self.cli_set(path + ['rule', rule, 'action', rule_config['action']]) for direction in ['source', 'destination']: if direction in rule_config: if 'any' in rule_config[direction]: self.cli_set(path + ['rule', rule, direction, 'any']) if 'network' in rule_config[direction]: self.cli_set(path + ['rule', rule, direction, 'network', rule_config[direction]['network']]) if 'exact-match' in rule_config[direction]: self.cli_set(path + ['rule', rule, direction, 'exact-match']) self.cli_commit() config = self.getFRRconfig('ipv6 access-list', end='') for acl, acl_config in acls.items(): for rule, rule_config in acl_config['rule'].items(): tmp = f'ipv6 access-list {acl} seq {rule}' if rule_config['action'] == 'permit': tmp += ' permit' else: tmp += ' deny' if {'source', 'destination'} <= set(rule_config): tmp += ' ip' for direction in ['source', 'destination']: if direction in rule_config: if 'any' in rule_config[direction]: tmp += ' any' if 'network' in rule_config[direction]: tmp += ' ' + rule_config[direction]['network'] if 'exact-match' in rule_config[direction]: tmp += ' exact-match' self.assertIn(tmp, config) def test_as_path_list(self): test_data = { 'VyOS' : { 'rule' : { '5' : { 'action' : 'permit', 'regex' : '^44501 64502$', }, '10' : { 'action' : 'permit', 'regex' : '44501|44502|44503', }, '15' : { 'action' : 'permit', 'regex' : '^44501_([0-9]+_)+', }, }, }, 'Customers' : { 'rule' : { '5' : { 'action' : 'permit', 'regex' : '_10_', }, '10' : { 'action' : 'permit', 'regex' : '_20_', }, '15' : { 'action' : 'permit', 'regex' : '_30_', }, '20' : { 'action' : 'deny', 'regex' : '_40_', }, }, }, 'bogons' : { 'rule' : { '5' : { 'action' : 'permit', 'regex' : '_0_', }, '10' : { 'action' : 'permit', 'regex' : '_23456_', }, '15' : { 'action' : 'permit', 'regex' : '_6449[6-9]_|_65[0-4][0-9][0-9]_|_655[0-4][0-9]_|_6555[0-1]_', }, '20' : { 'action' : 'permit', 'regex' : '_6555[2-9]_|_655[6-9][0-9]_|_65[6-9][0-9][0-9]_|_6[6-9][0-9][0-9][0-]_|_[7-9][0-9][0-9][0-9][0-9]_|_1[0-2][0-9][0-9][0-9][0-9]_|_130[0-9][0-9][0-9]_|_1310[0-6][0-9]_|_13107[01]_', }, }, }, } for as_path, as_path_config in test_data.items(): path = base_path + ['as-path-list', as_path] self.cli_set(path + ['description', f'VyOS-ASPATH-{as_path}']) if 'rule' not in as_path_config: continue for rule, rule_config in as_path_config['rule'].items(): if 'action' in rule_config: self.cli_set(path + ['rule', rule, 'action', rule_config['action']]) if 'regex' in rule_config: self.cli_set(path + ['rule', rule, 'regex', rule_config['regex']]) self.cli_commit() config = self.getFRRconfig('bgp as-path access-list', end='') for as_path, as_path_config in test_data.items(): if 'rule' not in as_path_config: continue for rule, rule_config in as_path_config['rule'].items(): tmp = f'bgp as-path access-list {as_path} seq {rule}' if rule_config['action'] == 'permit': tmp += ' permit' else: tmp += ' deny' tmp += ' ' + rule_config['regex'] self.assertIn(tmp, config) def test_community_list(self): test_data = { '100' : { 'rule' : { '5' : { 'action' : 'permit', 'regex' : '.*', }, }, }, '200' : { 'rule' : { '5' : { 'action' : 'deny', 'regex' : '^1:201$', }, '10' : { 'action' : 'deny', 'regex' : '1:101$', }, '15' : { 'action' : 'deny', 'regex' : '^1:100$', }, }, }, } for comm_list, comm_list_config in test_data.items(): path = base_path + ['community-list', comm_list] self.cli_set(path + ['description', f'VyOS-COMM-{comm_list}']) if 'rule' not in comm_list_config: continue for rule, rule_config in comm_list_config['rule'].items(): if 'action' in rule_config: self.cli_set(path + ['rule', rule, 'action', rule_config['action']]) if 'regex' in rule_config: self.cli_set(path + ['rule', rule, 'regex', rule_config['regex']]) self.cli_commit() config = self.getFRRconfig('bgp community-list', end='') for comm_list, comm_list_config in test_data.items(): if 'rule' not in comm_list_config: continue for rule, rule_config in comm_list_config['rule'].items(): tmp = f'bgp community-list {comm_list} seq {rule}' if rule_config['action'] == 'permit': tmp += ' permit' else: tmp += ' deny' tmp += ' ' + rule_config['regex'] self.assertIn(tmp, config) def test_extended_community_list(self): test_data = { 'foo' : { 'rule' : { '5' : { 'action' : 'permit', 'regex' : '.*', }, }, }, '200' : { 'rule' : { '5' : { 'action' : 'deny', 'regex' : '^1:201$', }, '10' : { 'action' : 'deny', 'regex' : '1:101$', }, '15' : { 'action' : 'deny', 'regex' : '^1:100$', }, }, }, } for comm_list, comm_list_config in test_data.items(): path = base_path + ['extcommunity-list', comm_list] self.cli_set(path + ['description', f'VyOS-EXTCOMM-{comm_list}']) if 'rule' not in comm_list_config: continue for rule, rule_config in comm_list_config['rule'].items(): if 'action' in rule_config: self.cli_set(path + ['rule', rule, 'action', rule_config['action']]) if 'regex' in rule_config: self.cli_set(path + ['rule', rule, 'regex', rule_config['regex']]) self.cli_commit() config = self.getFRRconfig('bgp extcommunity-list', end='') for comm_list, comm_list_config in test_data.items(): if 'rule' not in comm_list_config: continue for rule, rule_config in comm_list_config['rule'].items(): # if the community is not a number but a name, the expanded # keyword is used expanded = '' if not comm_list.isnumeric(): expanded = ' expanded' tmp = f'bgp extcommunity-list{expanded} {comm_list} seq {rule}' if rule_config['action'] == 'permit': tmp += ' permit' else: tmp += ' deny' tmp += ' ' + rule_config['regex'] self.assertIn(tmp, config) def test_large_community_list(self): test_data = { 'foo' : { 'rule' : { '5' : { 'action' : 'permit', 'regex' : '667:123:100', }, }, }, 'bar' : { 'rule' : { '5' : { 'action' : 'permit', 'regex' : '65000:120:10', }, '10' : { 'action' : 'permit', 'regex' : '65000:120:20', }, '15' : { 'action' : 'permit', 'regex' : '65000:120:30', }, }, }, } for comm_list, comm_list_config in test_data.items(): path = base_path + ['large-community-list', comm_list] self.cli_set(path + ['description', f'VyOS-LARGECOMM-{comm_list}']) if 'rule' not in comm_list_config: continue for rule, rule_config in comm_list_config['rule'].items(): if 'action' in rule_config: self.cli_set(path + ['rule', rule, 'action', rule_config['action']]) if 'regex' in rule_config: self.cli_set(path + ['rule', rule, 'regex', rule_config['regex']]) self.cli_commit() config = self.getFRRconfig('bgp large-community-list', end='') for comm_list, comm_list_config in test_data.items(): if 'rule' not in comm_list_config: continue for rule, rule_config in comm_list_config['rule'].items(): tmp = f'bgp large-community-list expanded {comm_list} seq {rule}' if rule_config['action'] == 'permit': tmp += ' permit' else: tmp += ' deny' tmp += ' ' + rule_config['regex'] self.assertIn(tmp, config) def test_prefix_list(self): test_data = { 'foo' : { 'rule' : { '5' : { 'action' : 'permit', 'prefix' : '10.0.0.0/8', 'ge' : '16', 'le' : '24', }, '10' : { 'action' : 'deny', 'prefix' : '172.16.0.0/12', 'ge' : '16', }, '15' : { 'action' : 'permit', 'prefix' : '192.168.0.0/16', }, }, }, 'bar' : { 'rule' : { '5' : { 'action' : 'permit', 'prefix' : '10.0.10.0/24', 'ge' : '25', 'le' : '26', }, '10' : { 'action' : 'deny', 'prefix' : '10.0.20.0/24', 'le' : '25', }, '15' : { 'action' : 'permit', 'prefix' : '10.0.25.0/24', }, }, }, } for prefix_list, prefix_list_config in test_data.items(): path = base_path + ['prefix-list', prefix_list] self.cli_set(path + ['description', f'VyOS-PFX-LIST-{prefix_list}']) if 'rule' not in prefix_list_config: continue for rule, rule_config in prefix_list_config['rule'].items(): if 'action' in rule_config: self.cli_set(path + ['rule', rule, 'action', rule_config['action']]) if 'prefix' in rule_config: self.cli_set(path + ['rule', rule, 'prefix', rule_config['prefix']]) if 'ge' in rule_config: self.cli_set(path + ['rule', rule, 'ge', rule_config['ge']]) if 'le' in rule_config: self.cli_set(path + ['rule', rule, 'le', rule_config['le']]) self.cli_commit() config = self.getFRRconfig('ip prefix-list', end='') for prefix_list, prefix_list_config in test_data.items(): if 'rule' not in prefix_list_config: continue for rule, rule_config in prefix_list_config['rule'].items(): tmp = f'ip prefix-list {prefix_list} seq {rule}' if rule_config['action'] == 'permit': tmp += ' permit' else: tmp += ' deny' tmp += ' ' + rule_config['prefix'] if 'ge' in rule_config: tmp += ' ge ' + rule_config['ge'] if 'le' in rule_config: tmp += ' le ' + rule_config['le'] self.assertIn(tmp, config) def test_prefix_list6(self): test_data = { 'foo' : { 'rule' : { '5' : { 'action' : 'permit', 'prefix' : '2001:db8::/32', 'ge' : '40', 'le' : '48', }, '10' : { 'action' : 'deny', 'prefix' : '2001:db8::/32', 'ge' : '48', }, '15' : { 'action' : 'permit', 'prefix' : '2001:db8:1000::/64', }, }, }, 'bar' : { 'rule' : { '5' : { 'action' : 'permit', 'prefix' : '2001:db8:100::/40', 'ge' : '48', }, '10' : { 'action' : 'permit', 'prefix' : '2001:db8:200::/40', 'ge' : '48', }, '15' : { 'action' : 'deny', 'prefix' : '2001:db8:300::/40', 'le' : '64', }, }, }, } for prefix_list, prefix_list_config in test_data.items(): path = base_path + ['prefix-list6', prefix_list] self.cli_set(path + ['description', f'VyOS-PFX-LIST-{prefix_list}']) if 'rule' not in prefix_list_config: continue for rule, rule_config in prefix_list_config['rule'].items(): if 'action' in rule_config: self.cli_set(path + ['rule', rule, 'action', rule_config['action']]) if 'prefix' in rule_config: self.cli_set(path + ['rule', rule, 'prefix', rule_config['prefix']]) if 'ge' in rule_config: self.cli_set(path + ['rule', rule, 'ge', rule_config['ge']]) if 'le' in rule_config: self.cli_set(path + ['rule', rule, 'le', rule_config['le']]) self.cli_commit() config = self.getFRRconfig('ipv6 prefix-list', end='') for prefix_list, prefix_list_config in test_data.items(): if 'rule' not in prefix_list_config: continue for rule, rule_config in prefix_list_config['rule'].items(): tmp = f'ipv6 prefix-list {prefix_list} seq {rule}' if rule_config['action'] == 'permit': tmp += ' permit' else: tmp += ' deny' tmp += ' ' + rule_config['prefix'] if 'ge' in rule_config: tmp += ' ge ' + rule_config['ge'] if 'le' in rule_config: tmp += ' le ' + rule_config['le'] self.assertIn(tmp, config) def test_prefix_list_duplicates(self): # FRR does not allow to specify the same profix list rule multiple times # # vyos(config)# ip prefix-list foo seq 10 permit 192.0.2.0/24 # vyos(config)# ip prefix-list foo seq 20 permit 192.0.2.0/24 # % Configuration failed. # Error type: validation # Error description: duplicated prefix list value: 192.0.2.0/24 # There is also a VyOS verify() function to test this prefix = '100.64.0.0/10' prefix_list = 'duplicates' test_range = range(20, 25) path = base_path + ['prefix-list', prefix_list] for rule in test_range: self.cli_set(path + ['rule', str(rule), 'action', 'permit']) self.cli_set(path + ['rule', str(rule), 'prefix', prefix]) # Duplicate prefixes with self.assertRaises(ConfigSessionError): self.cli_commit() for rule in test_range: self.cli_set(path + ['rule', str(rule), 'le', str(rule)]) self.cli_commit() config = self.getFRRconfig('ip prefix-list', end='') for rule in test_range: tmp = f'ip prefix-list {prefix_list} seq {rule} permit {prefix} le {rule}' self.assertIn(tmp, config) def test_route_map_community_set(self): test_data = { "community-configuration": { "rule": { "10": { "action": "permit", "set": { "community": { "replace": [ "65000:10", "65001:11" ] }, "extcommunity": { "bandwidth": "200", "rt": [ "65000:10", "192.168.0.1:11" ], "soo": [ "192.168.0.1:11", "65000:10" ] }, "large-community": { "replace": [ "65000:65000:10", "65000:65000:11" ] } } }, "20": { "action": "permit", "set": { "community": { "add": [ "65000:10", "65001:11" ] }, "extcommunity": { "bandwidth": "200", "bandwidth-non-transitive": {} }, "large-community": { "add": [ "65000:65000:10", "65000:65000:11" ] } } }, "30": { "action": "permit", "set": { "community": { "none": {} }, "extcommunity": { "none": {} }, "large-community": { "none": {} } } } } } } for route_map, route_map_config in test_data.items(): path = base_path + ['route-map', route_map] self.cli_set(path + ['description', f'VyOS ROUTE-MAP {route_map}']) if 'rule' not in route_map_config: continue for rule, rule_config in route_map_config['rule'].items(): if 'action' in rule_config: self.cli_set(path + ['rule', rule, 'action', rule_config['action']]) if 'set' in rule_config: #Add community in configuration if 'community' in rule_config['set']: if 'none' in rule_config['set']['community']: self.cli_set(path + ['rule', rule, 'set', 'community', 'none']) else: community_path = path + ['rule', rule, 'set', 'community'] if 'add' in rule_config['set']['community']: for community_unit in rule_config['set']['community']['add']: self.cli_set(community_path + ['add', community_unit]) if 'replace' in rule_config['set']['community']: for community_unit in rule_config['set']['community']['replace']: self.cli_set(community_path + ['replace', community_unit]) #Add large-community in configuration if 'large-community' in rule_config['set']: if 'none' in rule_config['set']['large-community']: self.cli_set(path + ['rule', rule, 'set', 'large-community', 'none']) else: community_path = path + ['rule', rule, 'set', 'large-community'] if 'add' in rule_config['set']['large-community']: for community_unit in rule_config['set']['large-community']['add']: self.cli_set(community_path + ['add', community_unit]) if 'replace' in rule_config['set']['large-community']: for community_unit in rule_config['set']['large-community']['replace']: self.cli_set(community_path + ['replace', community_unit]) #Add extcommunity in configuration if 'extcommunity' in rule_config['set']: if 'none' in rule_config['set']['extcommunity']: self.cli_set(path + ['rule', rule, 'set', 'extcommunity', 'none']) else: if 'bandwidth' in rule_config['set']['extcommunity']: self.cli_set(path + ['rule', rule, 'set', 'extcommunity', 'bandwidth', rule_config['set']['extcommunity']['bandwidth']]) if 'bandwidth-non-transitive' in rule_config['set']['extcommunity']: self.cli_set(path + ['rule', rule, 'set','extcommunity', 'bandwidth-non-transitive']) if 'rt' in rule_config['set']['extcommunity']: for community_unit in rule_config['set']['extcommunity']['rt']: self.cli_set(path + ['rule', rule, 'set', 'extcommunity','rt',community_unit]) if 'soo' in rule_config['set']['extcommunity']: for community_unit in rule_config['set']['extcommunity']['soo']: self.cli_set(path + ['rule', rule, 'set', 'extcommunity','soo',community_unit]) self.cli_commit() for route_map, route_map_config in test_data.items(): if 'rule' not in route_map_config: continue for rule, rule_config in route_map_config['rule'].items(): name = f'route-map {route_map} {rule_config["action"]} {rule}' config = self.getFRRconfig(name) self.assertIn(name, config) if 'set' in rule_config: #Check community if 'community' in rule_config['set']: if 'none' in rule_config['set']['community']: tmp = f'set community none' self.assertIn(tmp, config) if 'replace' in rule_config['set']['community']: values = ' '.join(rule_config['set']['community']['replace']) tmp = f'set community {values}' self.assertIn(tmp, config) if 'add' in rule_config['set']['community']: values = ' '.join(rule_config['set']['community']['add']) tmp = f'set community {values} additive' self.assertIn(tmp, config) #Check large-community if 'large-community' in rule_config['set']: if 'none' in rule_config['set']['large-community']: tmp = f'set large-community none' self.assertIn(tmp, config) if 'replace' in rule_config['set']['large-community']: values = ' '.join(rule_config['set']['large-community']['replace']) tmp = f'set large-community {values}' self.assertIn(tmp, config) if 'add' in rule_config['set']['large-community']: values = ' '.join(rule_config['set']['large-community']['add']) tmp = f'set large-community {values} additive' self.assertIn(tmp, config) #Check extcommunity if 'extcommunity' in rule_config['set']: if 'none' in rule_config['set']['extcommunity']: tmp = 'set extcommunity none' self.assertIn(tmp, config) if 'bandwidth' in rule_config['set']['extcommunity']: values = rule_config['set']['extcommunity']['bandwidth'] tmp = f'set extcommunity bandwidth {values}' if 'bandwidth-non-transitive' in rule_config['set']['extcommunity']: tmp = tmp + ' non-transitive' self.assertIn(tmp, config) if 'rt' in rule_config['set']['extcommunity']: values = ' '.join(rule_config['set']['extcommunity']['rt']) tmp = f'set extcommunity rt {values}' self.assertIn(tmp, config) if 'soo' in rule_config['set']['extcommunity']: values = ' '.join(rule_config['set']['extcommunity']['soo']) tmp = f'set extcommunity soo {values}' self.assertIn(tmp, config) def test_route_map(self): access_list = '50' as_path_list = '100' test_interface = 'eth0' community_list = 'BGP-comm-0815' # ext community name only allows alphanumeric characters and no hyphen :/ # maybe change this if possible in vyos-1x rewrite extcommunity_list = 'BGPextcomm123' large_community_list = 'bgp-large-community-123456' prefix_list = 'foo-pfx-list' ipv6_nexthop_address = 'fe80::1' local_pref = '300' metric = '50' peer = '2.3.4.5' peerv6 = '2001:db8::1' tag = '6542' goto = '25' ipv4_nexthop_address= '192.0.2.2' ipv4_prefix_len= '18' ipv6_prefix_len= '122' ipv4_nexthop_type= 'blackhole' ipv6_nexthop_type= 'blackhole' test_data = { 'foo-map-bar' : { 'rule' : { '5' : { 'action' : 'permit', 'continue' : '20', }, '10' : { 'action' : 'permit', 'call' : 'complicated-configuration', }, }, }, 'a-matching-rule-0815': { 'rule' : { '5' : { 'action' : 'deny', 'match' : { 'as-path' : as_path_list, 'rpki-invalid': '', 'tag': tag, }, }, '10' : { 'action' : 'permit', 'match' : { 'community' : community_list, 'interface' : test_interface, 'rpki-not-found': '', }, }, '15' : { 'action' : 'permit', 'match' : { 'extcommunity' : extcommunity_list, 'rpki-valid': '', }, 'on-match' : { 'next' : '', }, }, '20' : { 'action' : 'permit', 'match' : { 'ip-address-acl': access_list, 'ip-nexthop-acl': access_list, 'ip-route-source-acl': access_list, 'ipv6-address-acl': access_list, 'origin-incomplete' : '', }, 'on-match' : { 'goto' : goto, }, }, '25' : { 'action' : 'permit', 'match' : { 'ip-address-pfx': prefix_list, 'ip-nexthop-pfx': prefix_list, 'ip-route-source-pfx': prefix_list, 'ipv6-address-pfx': prefix_list, 'origin-igp': '', }, }, '30' : { 'action' : 'permit', 'match' : { 'ipv6-nexthop-address' : ipv6_nexthop_address, 'ipv6-nexthop-access-list' : access_list, 'ipv6-nexthop-prefix-list' : prefix_list, 'ipv6-nexthop-type' : ipv6_nexthop_type, 'ipv6-address-pfx-len' : ipv6_prefix_len, 'large-community' : large_community_list, 'local-pref' : local_pref, 'metric': metric, 'origin-egp': '', 'peer' : peer, }, }, '31' : { 'action' : 'permit', 'match' : { 'peer' : peerv6, }, }, '40' : { 'action' : 'permit', 'match' : { 'ip-nexthop-addr' : ipv4_nexthop_address, 'ip-address-pfx-len' : ipv4_prefix_len, }, }, '42' : { 'action' : 'deny', 'match' : { 'ip-nexthop-plen' : ipv4_prefix_len, }, }, '44' : { 'action' : 'permit', 'match' : { 'ip-nexthop-type' : ipv4_nexthop_type, }, }, }, }, 'complicated-configuration' : { 'rule' : { '10' : { 'action' : 'deny', 'set' : { 'aggregator-as' : '1234567890', 'aggregator-ip' : '10.255.255.0', 'as-path-exclude' : '1234', 'as-path-prepend' : '1234567890 987654321', 'as-path-prepend-last-as' : '5', 'atomic-aggregate' : '', 'distance' : '110', 'ipv6-next-hop-global' : '2001::1', 'ipv6-next-hop-local' : 'fe80::1', 'ip-next-hop' : '192.168.1.1', 'local-preference' : '500', 'metric' : '150', 'metric-type' : 'type-1', 'origin' : 'incomplete', 'l3vpn' : '', 'originator-id' : '172.16.10.1', 'src' : '100.0.0.1', 'tag' : '65530', 'weight' : '2', }, }, }, }, 'bandwidth-configuration' : { 'rule' : { '10' : { 'action' : 'deny', 'set' : { 'as-path-prepend' : '100 100', 'distance' : '200', 'extcommunity-bw' : 'num-multipaths', }, }, }, }, 'evpn-configuration' : { 'rule' : { '10' : { 'action' : 'permit', 'match' : { 'evpn-default-route' : '', 'evpn-rd' : '100:300', 'evpn-route-type' : 'prefix', 'evpn-vni' : '1234', }, }, '20' : { 'action' : 'permit', 'set' : { 'as-path-exclude' : 'all', 'evpn-gateway-ipv4' : '192.0.2.99', 'evpn-gateway-ipv6' : '2001:db8:f00::1', }, }, }, }, 'match-protocol' : { 'rule' : { '10' : { 'action' : 'permit', 'match' : { 'protocol' : 'static', }, }, '20' : { 'action' : 'permit', 'match' : { 'protocol' : 'bgp', }, }, }, }, 'relative-metric' : { 'rule' : { '10' : { 'action' : 'permit', 'match' : { 'ip-nexthop-addr' : ipv4_nexthop_address, }, 'set' : { 'metric' : '+10', }, }, '20' : { 'action' : 'permit', 'match' : { 'ip-nexthop-addr' : ipv4_nexthop_address, }, 'set' : { 'metric' : '-20', }, }, '30': { 'action': 'permit', 'match': { 'ip-nexthop-addr': ipv4_nexthop_address, }, 'set': { 'metric': 'rtt', }, }, '40': { 'action': 'permit', 'match': { 'ip-nexthop-addr': ipv4_nexthop_address, }, 'set': { 'metric': '+rtt', }, }, '50': { 'action': 'permit', 'match': { 'ip-nexthop-addr': ipv4_nexthop_address, }, 'set': { 'metric': '-rtt', }, }, }, }, } self.cli_set(['policy', 'access-list', access_list, 'rule', '10', 'action', 'permit']) self.cli_set(['policy', 'access-list', access_list, 'rule', '10', 'source', 'host', '1.1.1.1']) self.cli_set(['policy', 'access-list6', access_list, 'rule', '10', 'action', 'permit']) self.cli_set(['policy', 'access-list6', access_list, 'rule', '10', 'source', 'network', '2001:db8::/32']) self.cli_set(['policy', 'as-path-list', as_path_list, 'rule', '10', 'action', 'permit']) self.cli_set(['policy', 'as-path-list', as_path_list, 'rule', '10', 'regex', '64501 64502']) self.cli_set(['policy', 'community-list', community_list, 'rule', '10', 'action', 'deny']) self.cli_set(['policy', 'community-list', community_list, 'rule', '10', 'regex', '65432']) self.cli_set(['policy', 'extcommunity-list', extcommunity_list, 'rule', '10', 'action', 'deny']) self.cli_set(['policy', 'extcommunity-list', extcommunity_list, 'rule', '10', 'regex', '65000']) self.cli_set(['policy', 'large-community-list', large_community_list, 'rule', '10', 'action', 'permit']) self.cli_set(['policy', 'large-community-list', large_community_list, 'rule', '10', 'regex', '100:200:300']) self.cli_set(['policy', 'prefix-list', prefix_list, 'rule', '10', 'action', 'permit']) self.cli_set(['policy', 'prefix-list', prefix_list, 'rule', '10', 'prefix', '192.0.2.0/24']) self.cli_set(['policy', 'prefix-list6', prefix_list, 'rule', '10', 'action', 'permit']) self.cli_set(['policy', 'prefix-list6', prefix_list, 'rule', '10', 'prefix', '2001:db8::/32']) for route_map, route_map_config in test_data.items(): path = base_path + ['route-map', route_map] self.cli_set(path + ['description', f'VyOS ROUTE-MAP {route_map}']) if 'rule' not in route_map_config: continue for rule, rule_config in route_map_config['rule'].items(): if 'action' in rule_config: self.cli_set(path + ['rule', rule, 'action', rule_config['action']]) if 'call' in rule_config: self.cli_set(path + ['rule', rule, 'call', rule_config['call']]) if 'continue' in rule_config: self.cli_set(path + ['rule', rule, 'continue', rule_config['continue']]) if 'match' in rule_config: if 'as-path' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'as-path', rule_config['match']['as-path']]) if 'community' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'community', 'community-list', rule_config['match']['community']]) self.cli_set(path + ['rule', rule, 'match', 'community', 'exact-match']) if 'evpn-default-route' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'evpn', 'default-route']) if 'evpn-rd' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'evpn', 'rd', rule_config['match']['evpn-rd']]) if 'evpn-route-type' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'evpn', 'route-type', rule_config['match']['evpn-route-type']]) if 'evpn-vni' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'evpn', 'vni', rule_config['match']['evpn-vni']]) if 'extcommunity' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'extcommunity', rule_config['match']['extcommunity']]) if 'interface' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'interface', rule_config['match']['interface']]) if 'ip-address-acl' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'ip', 'address', 'access-list', rule_config['match']['ip-address-acl']]) if 'ip-address-pfx' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'ip', 'address', 'prefix-list', rule_config['match']['ip-address-pfx']]) if 'ip-address-pfx-len' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'ip', 'address', 'prefix-len', rule_config['match']['ip-address-pfx-len']]) if 'ip-nexthop-acl' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'ip', 'nexthop', 'access-list', rule_config['match']['ip-nexthop-acl']]) if 'ip-nexthop-pfx' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'ip', 'nexthop', 'prefix-list', rule_config['match']['ip-nexthop-pfx']]) if 'ip-nexthop-addr' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'ip', 'nexthop', 'address', rule_config['match']['ip-nexthop-addr']]) if 'ip-nexthop-plen' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'ip', 'nexthop', 'prefix-len', rule_config['match']['ip-nexthop-plen']]) if 'ip-nexthop-type' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'ip', 'nexthop', 'type', rule_config['match']['ip-nexthop-type']]) if 'ip-route-source-acl' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'ip', 'route-source', 'access-list', rule_config['match']['ip-route-source-acl']]) if 'ip-route-source-pfx' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'ip', 'route-source', 'prefix-list', rule_config['match']['ip-route-source-pfx']]) if 'ipv6-address-acl' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'ipv6', 'address', 'access-list', rule_config['match']['ipv6-address-acl']]) if 'ipv6-address-pfx' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'ipv6', 'address', 'prefix-list', rule_config['match']['ipv6-address-pfx']]) if 'ipv6-address-pfx-len' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'ipv6', 'address', 'prefix-len', rule_config['match']['ipv6-address-pfx-len']]) if 'ipv6-nexthop-address' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'ipv6', 'nexthop', 'address', rule_config['match']['ipv6-nexthop-address']]) if 'ipv6-nexthop-access-list' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'ipv6', 'nexthop', 'access-list', rule_config['match']['ipv6-nexthop-access-list']]) if 'ipv6-nexthop-prefix-list' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'ipv6', 'nexthop', 'prefix-list', rule_config['match']['ipv6-nexthop-prefix-list']]) if 'ipv6-nexthop-type' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'ipv6', 'nexthop', 'type', rule_config['match']['ipv6-nexthop-type']]) if 'large-community' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'large-community', 'large-community-list', rule_config['match']['large-community']]) if 'local-pref' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'local-preference', rule_config['match']['local-pref']]) if 'metric' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'metric', rule_config['match']['metric']]) if 'origin-igp' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'origin', 'igp']) if 'origin-egp' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'origin', 'egp']) if 'origin-incomplete' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'origin', 'incomplete']) if 'peer' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'peer', rule_config['match']['peer']]) if 'rpki-invalid' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'rpki', 'invalid']) if 'rpki-not-found' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'rpki', 'notfound']) if 'rpki-valid' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'rpki', 'valid']) if 'protocol' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'protocol', rule_config['match']['protocol']]) if 'tag' in rule_config['match']: self.cli_set(path + ['rule', rule, 'match', 'tag', rule_config['match']['tag']]) if 'on-match' in rule_config: if 'goto' in rule_config['on-match']: self.cli_set(path + ['rule', rule, 'on-match', 'goto', rule_config['on-match']['goto']]) if 'next' in rule_config['on-match']: self.cli_set(path + ['rule', rule, 'on-match', 'next']) if 'set' in rule_config: if 'aggregator-as' in rule_config['set']: self.cli_set(path + ['rule', rule, 'set', 'aggregator', 'as', rule_config['set']['aggregator-as']]) if 'aggregator-ip' in rule_config['set']: self.cli_set(path + ['rule', rule, 'set', 'aggregator', 'ip', rule_config['set']['aggregator-ip']]) if 'as-path-exclude' in rule_config['set']: self.cli_set(path + ['rule', rule, 'set', 'as-path', 'exclude', rule_config['set']['as-path-exclude']]) if 'as-path-prepend' in rule_config['set']: self.cli_set(path + ['rule', rule, 'set', 'as-path', 'prepend', rule_config['set']['as-path-prepend']]) if 'atomic-aggregate' in rule_config['set']: self.cli_set(path + ['rule', rule, 'set', 'atomic-aggregate']) if 'distance' in rule_config['set']: self.cli_set(path + ['rule', rule, 'set', 'distance', rule_config['set']['distance']]) if 'ipv6-next-hop-global' in rule_config['set']: self.cli_set(path + ['rule', rule, 'set', 'ipv6-next-hop', 'global', rule_config['set']['ipv6-next-hop-global']]) if 'ipv6-next-hop-local' in rule_config['set']: self.cli_set(path + ['rule', rule, 'set', 'ipv6-next-hop', 'local', rule_config['set']['ipv6-next-hop-local']]) if 'ip-next-hop' in rule_config['set']: self.cli_set(path + ['rule', rule, 'set', 'ip-next-hop', rule_config['set']['ip-next-hop']]) if 'l3vpn' in rule_config['set']: self.cli_set(path + ['rule', rule, 'set', 'l3vpn-nexthop', 'encapsulation', 'gre']) if 'local-preference' in rule_config['set']: self.cli_set(path + ['rule', rule, 'set', 'local-preference', rule_config['set']['local-preference']]) if 'metric' in rule_config['set']: self.cli_set(path + ['rule', rule, 'set', 'metric', rule_config['set']['metric']]) if 'metric-type' in rule_config['set']: self.cli_set(path + ['rule', rule, 'set', 'metric-type', rule_config['set']['metric-type']]) if 'origin' in rule_config['set']: self.cli_set(path + ['rule', rule, 'set', 'origin', rule_config['set']['origin']]) if 'originator-id' in rule_config['set']: self.cli_set(path + ['rule', rule, 'set', 'originator-id', rule_config['set']['originator-id']]) if 'src' in rule_config['set']: self.cli_set(path + ['rule', rule, 'set', 'src', rule_config['set']['src']]) if 'tag' in rule_config['set']: self.cli_set(path + ['rule', rule, 'set', 'tag', rule_config['set']['tag']]) if 'weight' in rule_config['set']: self.cli_set(path + ['rule', rule, 'set', 'weight', rule_config['set']['weight']]) if 'evpn-gateway-ipv4' in rule_config['set']: self.cli_set(path + ['rule', rule, 'set', 'evpn', 'gateway', 'ipv4', rule_config['set']['evpn-gateway-ipv4']]) if 'evpn-gateway-ipv6' in rule_config['set']: self.cli_set(path + ['rule', rule, 'set', 'evpn', 'gateway', 'ipv6', rule_config['set']['evpn-gateway-ipv6']]) self.cli_commit() for route_map, route_map_config in test_data.items(): if 'rule' not in route_map_config: continue for rule, rule_config in route_map_config['rule'].items(): name = f'route-map {route_map} {rule_config["action"]} {rule}' config = self.getFRRconfig(name) self.assertIn(name, config) if 'call' in rule_config: tmp = 'call ' + rule_config['call'] self.assertIn(tmp, config) if 'continue' in rule_config: tmp = 'on-match goto ' + rule_config['continue'] self.assertIn(tmp, config) if 'match' in rule_config: if 'as-path' in rule_config['match']: tmp = 'match as-path ' + rule_config['match']['as-path'] self.assertIn(tmp, config) if 'community' in rule_config['match']: tmp = f'match community {rule_config["match"]["community"]} exact-match' self.assertIn(tmp, config) if 'evpn-default-route' in rule_config['match']: tmp = f'match evpn default-route' self.assertIn(tmp, config) if 'evpn-rd' in rule_config['match']: tmp = f'match evpn rd {rule_config["match"]["evpn-rd"]}' self.assertIn(tmp, config) if 'evpn-route-type' in rule_config['match']: tmp = f'match evpn route-type {rule_config["match"]["evpn-route-type"]}' self.assertIn(tmp, config) if 'evpn-vni' in rule_config['match']: tmp = f'match evpn vni {rule_config["match"]["evpn-vni"]}' self.assertIn(tmp, config) if 'extcommunity' in rule_config['match']: tmp = f'match extcommunity {rule_config["match"]["extcommunity"]}' self.assertIn(tmp, config) if 'interface' in rule_config['match']: tmp = f'match interface {rule_config["match"]["interface"]}' self.assertIn(tmp, config) if 'ip-address-acl' in rule_config['match']: tmp = f'match ip address {rule_config["match"]["ip-address-acl"]}' self.assertIn(tmp, config) if 'ip-address-pfx' in rule_config['match']: tmp = f'match ip address prefix-list {rule_config["match"]["ip-address-pfx"]}' self.assertIn(tmp, config) if 'ip-address-pfx-len' in rule_config['match']: tmp = f'match ip address prefix-len {rule_config["match"]["ip-address-pfx-len"]}' self.assertIn(tmp, config) if 'ip-nexthop-acl' in rule_config['match']: tmp = f'match ip next-hop {rule_config["match"]["ip-nexthop-acl"]}' self.assertIn(tmp, config) if 'ip-nexthop-pfx' in rule_config['match']: tmp = f'match ip next-hop prefix-list {rule_config["match"]["ip-nexthop-pfx"]}' self.assertIn(tmp, config) if 'ip-nexthop-addr' in rule_config['match']: tmp = f'match ip next-hop address {rule_config["match"]["ip-nexthop-addr"]}' self.assertIn(tmp, config) if 'ip-nexthop-plen' in rule_config['match']: tmp = f'match ip next-hop prefix-len {rule_config["match"]["ip-nexthop-plen"]}' self.assertIn(tmp, config) if 'ip-nexthop-type' in rule_config['match']: tmp = f'match ip next-hop type {rule_config["match"]["ip-nexthop-type"]}' self.assertIn(tmp, config) if 'ip-route-source-acl' in rule_config['match']: tmp = f'match ip route-source {rule_config["match"]["ip-route-source-acl"]}' self.assertIn(tmp, config) if 'ip-route-source-pfx' in rule_config['match']: tmp = f'match ip route-source prefix-list {rule_config["match"]["ip-route-source-pfx"]}' self.assertIn(tmp, config) if 'ipv6-address-acl' in rule_config['match']: tmp = f'match ipv6 address {rule_config["match"]["ipv6-address-acl"]}' self.assertIn(tmp, config) if 'ipv6-address-pfx' in rule_config['match']: tmp = f'match ipv6 address prefix-list {rule_config["match"]["ipv6-address-pfx"]}' self.assertIn(tmp, config) if 'ipv6-address-pfx-len' in rule_config['match']: tmp = f'match ipv6 address prefix-len {rule_config["match"]["ipv6-address-pfx-len"]}' self.assertIn(tmp, config) if 'ipv6-nexthop-address' in rule_config['match']: tmp = f'match ipv6 next-hop address {rule_config["match"]["ipv6-nexthop-address"]}' self.assertIn(tmp, config) if 'ipv6-nexthop-access-list' in rule_config['match']: tmp = f'match ipv6 next-hop {rule_config["match"]["ipv6-nexthop-access-list"]}' self.assertIn(tmp, config) if 'ipv6-nexthop-prefix-list' in rule_config['match']: tmp = f'match ipv6 next-hop prefix-list {rule_config["match"]["ipv6-nexthop-prefix-list"]}' self.assertIn(tmp, config) if 'ipv6-nexthop-type' in rule_config['match']: tmp = f'match ipv6 next-hop type {rule_config["match"]["ipv6-nexthop-type"]}' self.assertIn(tmp, config) if 'large-community' in rule_config['match']: tmp = f'match large-community {rule_config["match"]["large-community"]}' self.assertIn(tmp, config) if 'local-pref' in rule_config['match']: tmp = f'match local-preference {rule_config["match"]["local-pref"]}' self.assertIn(tmp, config) if 'metric' in rule_config['match']: tmp = f'match metric {rule_config["match"]["metric"]}' self.assertIn(tmp, config) if 'origin-igp' in rule_config['match']: tmp = f'match origin igp' self.assertIn(tmp, config) if 'origin-egp' in rule_config['match']: tmp = f'match origin egp' self.assertIn(tmp, config) if 'origin-incomplete' in rule_config['match']: tmp = f'match origin incomplete' self.assertIn(tmp, config) if 'peer' in rule_config['match']: tmp = f'match peer {rule_config["match"]["peer"]}' self.assertIn(tmp, config) if 'protocol' in rule_config['match']: tmp = f'match source-protocol {rule_config["match"]["protocol"]}' self.assertIn(tmp, config) if 'rpki-invalid' in rule_config['match']: tmp = f'match rpki invalid' self.assertIn(tmp, config) if 'rpki-not-found' in rule_config['match']: tmp = f'match rpki notfound' self.assertIn(tmp, config) if 'rpki-valid' in rule_config['match']: tmp = f'match rpki valid' self.assertIn(tmp, config) if 'tag' in rule_config['match']: tmp = f'match tag {rule_config["match"]["tag"]}' self.assertIn(tmp, config) if 'on-match' in rule_config: if 'goto' in rule_config['on-match']: tmp = f'on-match goto {rule_config["on-match"]["goto"]}' self.assertIn(tmp, config) if 'next' in rule_config['on-match']: tmp = f'on-match next' self.assertIn(tmp, config) if 'set' in rule_config: tmp = ' set ' if 'aggregator-as' in rule_config['set']: tmp += 'aggregator as ' + rule_config['set']['aggregator-as'] elif 'aggregator-ip' in rule_config['set']: tmp += ' ' + rule_config['set']['aggregator-ip'] elif 'as-path-exclude' in rule_config['set']: tmp += 'as-path exclude ' + rule_config['set']['as-path-exclude'] elif 'as-path-prepend' in rule_config['set']: tmp += 'as-path prepend ' + rule_config['set']['as-path-prepend'] elif 'as-path-prepend-last-as' in rule_config['set']: tmp += 'as-path prepend last-as' + rule_config['set']['as-path-prepend-last-as'] elif 'atomic-aggregate' in rule_config['set']: tmp += 'atomic-aggregate' elif 'distance' in rule_config['set']: tmp += 'distance ' + rule_config['set']['distance'] elif 'ip-next-hop' in rule_config['set']: tmp += 'ip next-hop ' + rule_config['set']['ip-next-hop'] elif 'ipv6-next-hop-global' in rule_config['set']: tmp += 'ipv6 next-hop global ' + rule_config['set']['ipv6-next-hop-global'] elif 'ipv6-next-hop-local' in rule_config['set']: tmp += 'ipv6 next-hop local ' + rule_config['set']['ipv6-next-hop-local'] elif 'l3vpn' in rule_config['set']: tmp += 'l3vpn next-hop encapsulation gre' elif 'local-preference' in rule_config['set']: tmp += 'local-preference ' + rule_config['set']['local-preference'] elif 'metric' in rule_config['set']: tmp += 'metric ' + rule_config['set']['metric'] elif 'metric-type' in rule_config['set']: tmp += 'metric-type ' + rule_config['set']['metric-type'] elif 'origin' in rule_config['set']: tmp += 'origin ' + rule_config['set']['origin'] elif 'originator-id' in rule_config['set']: tmp += 'originator-id ' + rule_config['set']['originator-id'] elif 'src' in rule_config['set']: tmp += 'src ' + rule_config['set']['src'] elif 'tag' in rule_config['set']: tmp += 'tag ' + rule_config['set']['tag'] elif 'weight' in rule_config['set']: tmp += 'weight ' + rule_config['set']['weight'] elif 'vpn-gateway-ipv4' in rule_config['set']: tmp += 'evpn gateway ipv4 ' + rule_config['set']['vpn-gateway-ipv4'] elif 'vpn-gateway-ipv6' in rule_config['set']: tmp += 'evpn gateway ipv6 ' + rule_config['set']['vpn-gateway-ipv6'] self.assertIn(tmp, config) # Test set table for some sources def test_table_id(self): path = base_path + ['local-route'] sources = ['203.0.113.1', '203.0.113.2'] rule = '50' table = '23' for src in sources: self.cli_set(path + ['rule', rule, 'set', 'table', table]) self.cli_set(path + ['rule', rule, 'source', 'address', src]) self.cli_commit() original = """ 50: from 203.0.113.1 lookup 23 50: from 203.0.113.2 lookup 23 """ tmp = cmd('ip rule show prio 50') self.assertEqual(sort_ip(tmp), sort_ip(original)) # Test set table for fwmark def test_fwmark_table_id(self): path = base_path + ['local-route'] fwmk = '24' rule = '101' table = '154' self.cli_set(path + ['rule', rule, 'set', 'table', table]) self.cli_set(path + ['rule', rule, 'fwmark', fwmk]) self.cli_commit() original = """ 101: from all fwmark 0x18 lookup 154 """ tmp = cmd('ip rule show prio 101') self.assertEqual(sort_ip(tmp), sort_ip(original)) # Test set table for destination def test_destination_table_id(self): path = base_path + ['local-route'] dst = '203.0.113.1' rule = '102' table = '154' self.cli_set(path + ['rule', rule, 'set', 'table', table]) self.cli_set(path + ['rule', rule, 'destination', 'address', dst]) self.cli_commit() original = """ 102: from all to 203.0.113.1 lookup 154 """ tmp = cmd('ip rule show prio 102') self.assertEqual(sort_ip(tmp), sort_ip(original)) # Test set table for destination and protocol def test_protocol_destination_table_id(self): path = base_path + ['local-route'] dst = '203.0.113.12' rule = '85' table = '104' proto = 'tcp' self.cli_set(path + ['rule', rule, 'set', 'table', table]) self.cli_set(path + ['rule', rule, 'destination', 'address', dst]) self.cli_set(path + ['rule', rule, 'protocol', proto]) self.cli_commit() original = """ 85: from all to 203.0.113.12 ipproto tcp lookup 104 """ tmp = cmd('ip rule show prio 85') self.assertEqual(sort_ip(tmp), sort_ip(original)) # Test set table for destination, source, protocol, fwmark and port def test_protocol_port_address_fwmark_table_id(self): path = base_path + ['local-route'] dst = '203.0.113.5' src_list = ['203.0.113.1', '203.0.113.2'] rule = '23' fwmark = '123456' table = '123' new_table = '111' proto = 'udp' new_proto = 'tcp' src_port = '5555' dst_port = '8888' self.cli_set(path + ['rule', rule, 'set', 'table', table]) self.cli_set(path + ['rule', rule, 'destination', 'address', dst]) self.cli_set(path + ['rule', rule, 'source', 'port', src_port]) self.cli_set(path + ['rule', rule, 'protocol', proto]) self.cli_set(path + ['rule', rule, 'fwmark', fwmark]) self.cli_set(path + ['rule', rule, 'destination', 'port', dst_port]) for src in src_list: self.cli_set(path + ['rule', rule, 'source', 'address', src]) self.cli_commit() original = """ 23: from 203.0.113.1 to 203.0.113.5 fwmark 0x1e240 ipproto udp sport 5555 dport 8888 lookup 123 23: from 203.0.113.2 to 203.0.113.5 fwmark 0x1e240 ipproto udp sport 5555 dport 8888 lookup 123 """ tmp = cmd(f'ip rule show prio {rule}') self.assertEqual(sort_ip(tmp), sort_ip(original)) # Change table and protocol, delete fwmark and source port self.cli_delete(path + ['rule', rule, 'fwmark']) self.cli_delete(path + ['rule', rule, 'source', 'port']) self.cli_set(path + ['rule', rule, 'set', 'table', new_table]) self.cli_set(path + ['rule', rule, 'protocol', new_proto]) self.cli_commit() original = """ 23: from 203.0.113.1 to 203.0.113.5 ipproto tcp dport 8888 lookup 111 23: from 203.0.113.2 to 203.0.113.5 ipproto tcp dport 8888 lookup 111 """ tmp = cmd(f'ip rule show prio {rule}') self.assertEqual(sort_ip(tmp), sort_ip(original)) # Test set table for sources with fwmark def test_fwmark_sources_table_id(self): path = base_path + ['local-route'] sources = ['203.0.113.11', '203.0.113.12'] fwmk = '23' rule = '100' table = '150' for src in sources: self.cli_set(path + ['rule', rule, 'set', 'table', table]) self.cli_set(path + ['rule', rule, 'source', 'address', src]) self.cli_set(path + ['rule', rule, 'fwmark', fwmk]) self.cli_commit() original = """ 100: from 203.0.113.11 fwmark 0x17 lookup 150 100: from 203.0.113.12 fwmark 0x17 lookup 150 """ tmp = cmd('ip rule show prio 100') self.assertEqual(sort_ip(tmp), sort_ip(original)) # Test set table for sources with iif def test_iif_sources_table_id(self): path = base_path + ['local-route'] sources = ['203.0.113.11', '203.0.113.12'] iif = 'lo' rule = '100' table = '150' self.cli_set(path + ['rule', rule, 'set', 'table', table]) self.cli_set(path + ['rule', rule, 'inbound-interface', iif]) for src in sources: self.cli_set(path + ['rule', rule, 'source', 'address', src]) self.cli_commit() # Check generated configuration # Expected values original = """ 100: from 203.0.113.11 iif lo lookup 150 100: from 203.0.113.12 iif lo lookup 150 """ tmp = cmd('ip rule show prio 100') self.assertEqual(sort_ip(tmp), sort_ip(original)) # Test set table for sources and destinations with fwmark def test_fwmark_sources_destination_table_id(self): path = base_path + ['local-route'] sources = ['203.0.113.11', '203.0.113.12'] destinations = ['203.0.113.13', '203.0.113.15'] fwmk = '23' rule = '103' table = '150' for src in sources: for dst in destinations: self.cli_set(path + ['rule', rule, 'set', 'table', table]) self.cli_set(path + ['rule', rule, 'source', 'address', src]) self.cli_set(path + ['rule', rule, 'destination', 'address', dst]) self.cli_set(path + ['rule', rule, 'fwmark', fwmk]) self.cli_commit() original = """ 103: from 203.0.113.11 to 203.0.113.13 fwmark 0x17 lookup 150 103: from 203.0.113.11 to 203.0.113.15 fwmark 0x17 lookup 150 103: from 203.0.113.12 to 203.0.113.13 fwmark 0x17 lookup 150 103: from 203.0.113.12 to 203.0.113.15 fwmark 0x17 lookup 150 """ tmp = cmd('ip rule show prio 103') self.assertEqual(sort_ip(tmp), sort_ip(original)) # Test set table ipv6 for some sources ipv6 def test_ipv6_table_id(self): path = base_path + ['local-route6'] sources = ['2001:db8:123::/48', '2001:db8:126::/48'] rule = '50' table = '23' for src in sources: self.cli_set(path + ['rule', rule, 'set', 'table', table]) self.cli_set(path + ['rule', rule, 'source', 'address', src]) self.cli_commit() original = """ 50: from 2001:db8:123::/48 lookup 23 50: from 2001:db8:126::/48 lookup 23 """ tmp = cmd('ip -6 rule show prio 50') self.assertEqual(sort_ip(tmp), sort_ip(original)) # Test set table for fwmark ipv6 def test_fwmark_ipv6_table_id(self): path = base_path + ['local-route6'] fwmk = '24' rule = '100' table = '154' self.cli_set(path + ['rule', rule, 'set', 'table', table]) self.cli_set(path + ['rule', rule, 'fwmark', fwmk]) self.cli_commit() original = """ 100: from all fwmark 0x18 lookup 154 """ tmp = cmd('ip -6 rule show prio 100') self.assertEqual(sort_ip(tmp), sort_ip(original)) # Test set table for destination ipv6 def test_destination_ipv6_table_id(self): path = base_path + ['local-route6'] dst = '2001:db8:1337::/126' rule = '101' table = '154' self.cli_set(path + ['rule', rule, 'set', 'table', table]) self.cli_set(path + ['rule', rule, 'destination', 'address', dst]) self.cli_commit() original = """ 101: from all to 2001:db8:1337::/126 lookup 154 """ tmp = cmd('ip -6 rule show prio 101') self.assertEqual(sort_ip(tmp), sort_ip(original)) # Test set table for sources with fwmark ipv6 def test_fwmark_sources_ipv6_table_id(self): path = base_path + ['local-route6'] sources = ['2001:db8:1338::/126', '2001:db8:1339::/126'] fwmk = '23' rule = '102' table = '150' for src in sources: self.cli_set(path + ['rule', rule, 'set', 'table', table]) self.cli_set(path + ['rule', rule, 'source', 'address', src]) self.cli_set(path + ['rule', rule, 'fwmark', fwmk]) self.cli_commit() original = """ 102: from 2001:db8:1338::/126 fwmark 0x17 lookup 150 102: from 2001:db8:1339::/126 fwmark 0x17 lookup 150 """ tmp = cmd('ip -6 rule show prio 102') self.assertEqual(sort_ip(tmp), sort_ip(original)) # Test set table for sources with iif ipv6 def test_iif_sources_ipv6_table_id(self): path = base_path + ['local-route6'] sources = ['2001:db8:1338::/126', '2001:db8:1339::/126'] iif = 'lo' rule = '102' table = '150' for src in sources: self.cli_set(path + ['rule', rule, 'set', 'table', table]) self.cli_set(path + ['rule', rule, 'source', 'address', src]) self.cli_set(path + ['rule', rule, 'inbound-interface', iif]) self.cli_commit() # Check generated configuration # Expected values original = """ 102: from 2001:db8:1338::/126 iif lo lookup 150 102: from 2001:db8:1339::/126 iif lo lookup 150 """ tmp = cmd('ip -6 rule show prio 102') self.assertEqual(sort_ip(tmp), sort_ip(original)) # Test set table for sources and destinations with fwmark ipv6 def test_fwmark_sources_destination_ipv6_table_id(self): path = base_path + ['local-route6'] sources = ['2001:db8:1338::/126', '2001:db8:1339::/56'] destinations = ['2001:db8:13::/48', '2001:db8:16::/48'] fwmk = '23' rule = '103' table = '150' for src in sources: for dst in destinations: self.cli_set(path + ['rule', rule, 'set', 'table', table]) self.cli_set(path + ['rule', rule, 'source', 'address', src]) self.cli_set(path + ['rule', rule, 'destination', 'address', dst]) self.cli_set(path + ['rule', rule, 'fwmark', fwmk]) self.cli_commit() original = """ 103: from 2001:db8:1338::/126 to 2001:db8:13::/48 fwmark 0x17 lookup 150 103: from 2001:db8:1338::/126 to 2001:db8:16::/48 fwmark 0x17 lookup 150 103: from 2001:db8:1339::/56 to 2001:db8:13::/48 fwmark 0x17 lookup 150 103: from 2001:db8:1339::/56 to 2001:db8:16::/48 fwmark 0x17 lookup 150 """ tmp = cmd('ip -6 rule show prio 103') self.assertEqual(sort_ip(tmp), sort_ip(original)) # Test delete table for sources and destination with fwmark ipv4/ipv6 def test_delete_ipv4_ipv6_table_id(self): path = base_path + ['local-route'] path_v6 = base_path + ['local-route6'] sources = ['203.0.113.0/24', '203.0.114.5'] destinations = ['203.0.112.0/24', '203.0.116.5'] sources_v6 = ['2001:db8:1338::/126', '2001:db8:1339::/56'] destinations_v6 = ['2001:db8:13::/48', '2001:db8:16::/48'] fwmk = '23' rule = '103' table = '150' for src in sources: for dst in destinations: self.cli_set(path + ['rule', rule, 'set', 'table', table]) self.cli_set(path + ['rule', rule, 'source', 'address', src]) self.cli_set(path + ['rule', rule, 'destination', 'address', dst]) self.cli_set(path + ['rule', rule, 'fwmark', fwmk]) for src in sources_v6: for dst in destinations_v6: self.cli_set(path_v6 + ['rule', rule, 'set', 'table', table]) self.cli_set(path_v6 + ['rule', rule, 'source', 'address', src]) self.cli_set(path_v6 + ['rule', rule, 'destination', 'address', dst]) self.cli_set(path_v6 + ['rule', rule, 'fwmark', fwmk]) self.cli_commit() original = """ 103: from 203.0.113.0/24 to 203.0.116.5 fwmark 0x17 lookup 150 103: from 203.0.114.5 to 203.0.112.0/24 fwmark 0x17 lookup 150 103: from 203.0.114.5 to 203.0.116.5 fwmark 0x17 lookup 150 103: from 203.0.113.0/24 to 203.0.112.0/24 fwmark 0x17 lookup 150 """ original_v6 = """ 103: from 2001:db8:1338::/126 to 2001:db8:16::/48 fwmark 0x17 lookup 150 103: from 2001:db8:1339::/56 to 2001:db8:13::/48 fwmark 0x17 lookup 150 103: from 2001:db8:1339::/56 to 2001:db8:16::/48 fwmark 0x17 lookup 150 103: from 2001:db8:1338::/126 to 2001:db8:13::/48 fwmark 0x17 lookup 150 """ tmp = cmd('ip rule show prio 103') tmp_v6 = cmd('ip -6 rule show prio 103') self.assertEqual(sort_ip(tmp), sort_ip(original)) self.assertEqual(sort_ip(tmp_v6), sort_ip(original_v6)) self.cli_delete(path) self.cli_delete(path_v6) self.cli_commit() tmp = cmd('ip rule show prio 103') tmp_v6 = cmd('ip -6 rule show prio 103') self.assertEqual(sort_ip(tmp), []) self.assertEqual(sort_ip(tmp_v6), []) # Test multiple commits ipv4 def test_multiple_commit_ipv4_table_id(self): path = base_path + ['local-route'] sources = ['192.0.2.1', '192.0.2.2'] destination = '203.0.113.25' rule = '105' table = '151' self.cli_set(path + ['rule', rule, 'set', 'table', table]) for src in sources: self.cli_set(path + ['rule', rule, 'source', 'address', src]) self.cli_commit() original_first = """ 105: from 192.0.2.1 lookup 151 105: from 192.0.2.2 lookup 151 """ tmp = cmd('ip rule show prio 105') self.assertEqual(sort_ip(tmp), sort_ip(original_first)) # Create second commit with added destination self.cli_set(path + ['rule', rule, 'destination', 'address', destination]) self.cli_commit() original_second = """ 105: from 192.0.2.1 to 203.0.113.25 lookup 151 105: from 192.0.2.2 to 203.0.113.25 lookup 151 """ tmp = cmd('ip rule show prio 105') self.assertEqual(sort_ip(tmp), sort_ip(original_second)) def test_frr_individual_remove_T6283_T6250(self): path = base_path + ['route-map'] route_maps = ['RMAP-1', 'RMAP_2'] seq = '10' base_local_preference = 300 base_table = 50 # T6250 local_preference = base_local_preference table = base_table for route_map in route_maps: self.cli_set(path + [route_map, 'rule', seq, 'action', 'permit']) self.cli_set(path + [route_map, 'rule', seq, 'set', 'table', str(table)]) self.cli_set(path + [route_map, 'rule', seq, 'set', 'local-preference', str(local_preference)]) local_preference += 20 table += 5 self.cli_commit() local_preference = base_local_preference table = base_table for route_map in route_maps: - config = self.getFRRconfig(f'route-map {route_map} permit {seq}', end='') + config = self.getFRRconfig(f'route-map {route_map} permit {seq}', end='', endsection='^exit') self.assertIn(f' set local-preference {local_preference}', config) self.assertIn(f' set table {table}', config) local_preference += 20 table += 5 for route_map in route_maps: self.cli_delete(path + [route_map, 'rule', '10', 'set', 'table']) # we explicitly commit multiple times to be as vandal as possible to the system self.cli_commit() local_preference = base_local_preference for route_map in route_maps: - config = self.getFRRconfig(f'route-map {route_map} permit {seq}', end='') + config = self.getFRRconfig(f'route-map {route_map} permit {seq}', end='', endsection='^exit') self.assertIn(f' set local-preference {local_preference}', config) local_preference += 20 # T6283 seq = '20' prepend = '100 100 100' for route_map in route_maps: self.cli_set(path + [route_map, 'rule', seq, 'action', 'permit']) self.cli_set(path + [route_map, 'rule', seq, 'set', 'as-path', 'prepend', prepend]) self.cli_commit() for route_map in route_maps: - config = self.getFRRconfig(f'route-map {route_map} permit {seq}', end='') + config = self.getFRRconfig(f'route-map {route_map} permit {seq}', end='', endsection='^exit') self.assertIn(f' set as-path prepend {prepend}', config) for route_map in route_maps: self.cli_delete(path + [route_map, 'rule', seq, 'set']) # we explicitly commit multiple times to be as vandal as possible to the system self.cli_commit() for route_map in route_maps: - config = self.getFRRconfig(f'route-map {route_map} permit {seq}', end='') + config = self.getFRRconfig(f'route-map {route_map} permit {seq}', end='', endsection='^exit') self.assertNotIn(f' set', config) def sort_ip(output): o = '\n'.join([' '.join(line.strip().split()) for line in output.strip().splitlines()]) o = o.splitlines() o.sort() return o if __name__ == '__main__': unittest.main(verbosity=2) diff --git a/smoketest/scripts/cli/test_protocols_bfd.py b/smoketest/scripts/cli/test_protocols_bfd.py index 716d0a806..9f178c821 100755 --- a/smoketest/scripts/cli/test_protocols_bfd.py +++ b/smoketest/scripts/cli/test_protocols_bfd.py @@ -1,236 +1,238 @@ #!/usr/bin/env python3 # # Copyright (C) 2021-2023 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import unittest from base_vyostest_shim import VyOSUnitTestSHIM from vyos.configsession import ConfigSessionError from vyos.utils.process import process_named_running PROCESS_NAME = 'bfdd' base_path = ['protocols', 'bfd'] +frr_endsection = '^ exit' dum_if = 'dum1001' vrf_name = 'red' peers = { '192.0.2.10' : { 'intv_rx' : '500', 'intv_tx' : '600', 'multihop' : '', 'source_addr': '192.0.2.254', 'profile' : 'foo-bar-baz', 'minimum_ttl': '20', }, '192.0.2.20' : { 'echo_mode' : '', 'intv_echo' : '100', 'intv_mult' : '100', 'intv_rx' : '222', 'intv_tx' : '333', 'passive' : '', 'shutdown' : '', 'profile' : 'foo', 'source_intf': dum_if, }, '2001:db8::1000:1' : { 'source_addr': '2001:db8::1', 'vrf' : vrf_name, }, '2001:db8::2000:1' : { 'source_addr': '2001:db8::1', 'multihop' : '', 'profile' : 'baz_foo', }, } profiles = { 'foo' : { 'echo_mode' : '', 'intv_echo' : '100', 'intv_mult' : '101', 'intv_rx' : '222', 'intv_tx' : '333', 'shutdown' : '', 'minimum_ttl': '40', }, 'foo-bar-baz' : { 'intv_mult' : '4', 'intv_rx' : '400', 'intv_tx' : '400', }, 'baz_foo' : { 'intv_mult' : '102', 'intv_rx' : '444', 'passive' : '', }, } class TestProtocolsBFD(VyOSUnitTestSHIM.TestCase): @classmethod def setUpClass(cls): super(TestProtocolsBFD, cls).setUpClass() # Retrieve FRR daemon PID - it is not allowed to crash, thus PID must remain the same cls.daemon_pid = process_named_running(PROCESS_NAME) # ensure we can also run this test on a live system - so lets clean # out the current configuration :) cls.cli_delete(cls, base_path) def tearDown(self): self.cli_delete(base_path) self.cli_commit() # check process health and continuity self.assertEqual(self.daemon_pid, process_named_running(PROCESS_NAME)) def test_bfd_peer(self): self.cli_set(['vrf', 'name', vrf_name, 'table', '1000']) for peer, peer_config in peers.items(): if 'echo_mode' in peer_config: self.cli_set(base_path + ['peer', peer, 'echo-mode']) if 'intv_echo' in peer_config: self.cli_set(base_path + ['peer', peer, 'interval', 'echo-interval', peer_config["intv_echo"]]) if 'intv_mult' in peer_config: self.cli_set(base_path + ['peer', peer, 'interval', 'multiplier', peer_config["intv_mult"]]) if 'intv_rx' in peer_config: self.cli_set(base_path + ['peer', peer, 'interval', 'receive', peer_config["intv_rx"]]) if 'intv_tx' in peer_config: self.cli_set(base_path + ['peer', peer, 'interval', 'transmit', peer_config["intv_tx"]]) if 'minimum_ttl' in peer_config: self.cli_set(base_path + ['peer', peer, 'minimum-ttl', peer_config["minimum_ttl"]]) if 'multihop' in peer_config: self.cli_set(base_path + ['peer', peer, 'multihop']) if 'passive' in peer_config: self.cli_set(base_path + ['peer', peer, 'passive']) if 'shutdown' in peer_config: self.cli_set(base_path + ['peer', peer, 'shutdown']) if 'source_addr' in peer_config: self.cli_set(base_path + ['peer', peer, 'source', 'address', peer_config["source_addr"]]) if 'source_intf' in peer_config: self.cli_set(base_path + ['peer', peer, 'source', 'interface', peer_config["source_intf"]]) if 'vrf' in peer_config: self.cli_set(base_path + ['peer', peer, 'vrf', peer_config["vrf"]]) # commit changes self.cli_commit() # Verify FRR bgpd configuration - frrconfig = self.getFRRconfig('bfd', daemon=PROCESS_NAME) + frrconfig = self.getFRRconfig('bfd', endsection='^exit', daemon=PROCESS_NAME) for peer, peer_config in peers.items(): tmp = f'peer {peer}' if 'multihop' in peer_config: tmp += f' multihop' if 'source_addr' in peer_config: tmp += f' local-address {peer_config["source_addr"]}' if 'source_intf' in peer_config: tmp += f' interface {peer_config["source_intf"]}' if 'vrf' in peer_config: tmp += f' vrf {peer_config["vrf"]}' self.assertIn(tmp, frrconfig) - peerconfig = self.getFRRconfig(f' peer {peer}', end='', daemon=PROCESS_NAME) - + peerconfig = self.getFRRconfig(f' peer {peer}', end='', endsection=frr_endsection, + daemon=PROCESS_NAME) if 'echo_mode' in peer_config: self.assertIn(f'echo-mode', peerconfig) if 'intv_echo' in peer_config: self.assertIn(f'echo receive-interval {peer_config["intv_echo"]}', peerconfig) self.assertIn(f'echo transmit-interval {peer_config["intv_echo"]}', peerconfig) if 'intv_mult' in peer_config: self.assertIn(f'detect-multiplier {peer_config["intv_mult"]}', peerconfig) if 'intv_rx' in peer_config: self.assertIn(f'receive-interval {peer_config["intv_rx"]}', peerconfig) if 'intv_tx' in peer_config: self.assertIn(f'transmit-interval {peer_config["intv_tx"]}', peerconfig) if 'minimum_ttl' in peer_config: self.assertIn(f'minimum-ttl {peer_config["minimum_ttl"]}', peerconfig) if 'passive' in peer_config: self.assertIn(f'passive-mode', peerconfig) if 'shutdown' in peer_config: self.assertIn(f'shutdown', peerconfig) else: self.assertNotIn(f'shutdown', peerconfig) self.cli_delete(['vrf', 'name', vrf_name]) def test_bfd_profile(self): for profile, profile_config in profiles.items(): if 'echo_mode' in profile_config: self.cli_set(base_path + ['profile', profile, 'echo-mode']) if 'intv_echo' in profile_config: self.cli_set(base_path + ['profile', profile, 'interval', 'echo-interval', profile_config["intv_echo"]]) if 'intv_mult' in profile_config: self.cli_set(base_path + ['profile', profile, 'interval', 'multiplier', profile_config["intv_mult"]]) if 'intv_rx' in profile_config: self.cli_set(base_path + ['profile', profile, 'interval', 'receive', profile_config["intv_rx"]]) if 'intv_tx' in profile_config: self.cli_set(base_path + ['profile', profile, 'interval', 'transmit', profile_config["intv_tx"]]) if 'minimum_ttl' in profile_config: self.cli_set(base_path + ['profile', profile, 'minimum-ttl', profile_config["minimum_ttl"]]) if 'passive' in profile_config: self.cli_set(base_path + ['profile', profile, 'passive']) if 'shutdown' in profile_config: self.cli_set(base_path + ['profile', profile, 'shutdown']) for peer, peer_config in peers.items(): if 'profile' in peer_config: self.cli_set(base_path + ['peer', peer, 'profile', peer_config["profile"] + 'wrong']) if 'source_addr' in peer_config: self.cli_set(base_path + ['peer', peer, 'source', 'address', peer_config["source_addr"]]) if 'source_intf' in peer_config: self.cli_set(base_path + ['peer', peer, 'source', 'interface', peer_config["source_intf"]]) # BFD profile does not exist! with self.assertRaises(ConfigSessionError): self.cli_commit() for peer, peer_config in peers.items(): if 'profile' in peer_config: self.cli_set(base_path + ['peer', peer, 'profile', peer_config["profile"]]) # commit changes self.cli_commit() # Verify FRR bgpd configuration for profile, profile_config in profiles.items(): - config = self.getFRRconfig(f' profile {profile}', endsection='^ !') + config = self.getFRRconfig(f' profile {profile}', endsection=frr_endsection) if 'echo_mode' in profile_config: self.assertIn(f' echo-mode', config) if 'intv_echo' in profile_config: self.assertIn(f' echo receive-interval {profile_config["intv_echo"]}', config) self.assertIn(f' echo transmit-interval {profile_config["intv_echo"]}', config) if 'intv_mult' in profile_config: self.assertIn(f' detect-multiplier {profile_config["intv_mult"]}', config) if 'intv_rx' in profile_config: self.assertIn(f' receive-interval {profile_config["intv_rx"]}', config) if 'intv_tx' in profile_config: self.assertIn(f' transmit-interval {profile_config["intv_tx"]}', config) if 'minimum_ttl' in profile_config: self.assertIn(f' minimum-ttl {profile_config["minimum_ttl"]}', config) if 'passive' in profile_config: self.assertIn(f' passive-mode', config) if 'shutdown' in profile_config: self.assertIn(f' shutdown', config) else: self.assertNotIn(f'shutdown', config) for peer, peer_config in peers.items(): - peerconfig = self.getFRRconfig(f' peer {peer}', end='', daemon=PROCESS_NAME) + peerconfig = self.getFRRconfig(f' peer {peer}', end='', + endsection=frr_endsection, daemon=PROCESS_NAME) if 'profile' in peer_config: self.assertIn(f' profile {peer_config["profile"]}', peerconfig) if __name__ == '__main__': unittest.main(verbosity=2) diff --git a/smoketest/scripts/cli/test_protocols_static.py b/smoketest/scripts/cli/test_protocols_static.py index f676e2a52..980bc6e7f 100755 --- a/smoketest/scripts/cli/test_protocols_static.py +++ b/smoketest/scripts/cli/test_protocols_static.py @@ -1,482 +1,572 @@ #!/usr/bin/env python3 # # Copyright (C) 2021-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import unittest from base_vyostest_shim import VyOSUnitTestSHIM from vyos.configsession import ConfigSessionError from vyos.template import is_ipv6 from vyos.utils.network import get_interface_config from vyos.utils.network import get_vrf_tableid base_path = ['protocols', 'static'] vrf_path = ['protocols', 'vrf'] routes = { '10.0.0.0/8' : { 'next_hop' : { '192.0.2.100' : { 'distance' : '100' }, '192.0.2.110' : { 'distance' : '110', 'interface' : 'eth0' }, '192.0.2.120' : { 'distance' : '120', 'disable' : '' }, '192.0.2.130' : { 'bfd' : '' }, - '192.0.2.140' : { 'bfd_source' : '192.0.2.10' }, + '192.0.2.131' : { 'bfd' : '', + 'bfd_profile' : 'vyos1' }, + '192.0.2.140' : { 'bfd' : '', + 'bfd_source' : '192.0.2.10', + 'bfd_profile' : 'vyos2' }, }, 'interface' : { 'eth0' : { 'distance' : '130' }, 'eth1' : { 'distance' : '140' }, }, 'blackhole' : { 'distance' : '250', 'tag' : '500' }, }, '172.16.0.0/12' : { 'interface' : { 'eth0' : { 'distance' : '50', 'vrf' : 'black' }, 'eth1' : { 'distance' : '60', 'vrf' : 'black' }, }, 'blackhole' : { 'distance' : '90' }, }, '192.0.2.0/24' : { 'interface' : { 'eth0' : { 'distance' : '50', 'vrf' : 'black' }, 'eth1' : { 'disable' : '' }, }, 'blackhole' : { 'distance' : '90' }, }, '100.64.0.0/16' : { 'blackhole' : {}, }, '100.65.0.0/16' : { 'reject' : { 'distance' : '10', 'tag' : '200' }, }, '100.66.0.0/16' : { 'blackhole' : {}, 'reject' : { 'distance' : '10', 'tag' : '200' }, }, '2001:db8:100::/40' : { 'next_hop' : { '2001:db8::1' : { 'distance' : '10' }, '2001:db8::2' : { 'distance' : '20', 'interface' : 'eth0' }, '2001:db8::3' : { 'distance' : '30', 'disable' : '' }, '2001:db8::4' : { 'bfd' : '' }, '2001:db8::5' : { 'bfd_source' : '2001:db8::ffff' }, }, 'interface' : { 'eth0' : { 'distance' : '40', 'vrf' : 'black' }, 'eth1' : { 'distance' : '50', 'disable' : '' }, }, 'blackhole' : { 'distance' : '250', 'tag' : '500' }, }, '2001:db8:200::/40' : { 'interface' : { 'eth0' : { 'distance' : '40' }, 'eth1' : { 'distance' : '50', 'disable' : '' }, }, 'blackhole' : { 'distance' : '250', 'tag' : '500' }, }, '2001:db8:300::/40' : { 'reject' : { 'distance' : '250', 'tag' : '500' }, }, '2001:db8:400::/40' : { 'next_hop' : { '2001:db8::400' : { 'segments' : '2001:db8:aaaa::400/2002::400/2003::400/2004::400' }, }, }, '2001:db8:500::/40' : { 'next_hop' : { '2001:db8::500' : { 'segments' : '2001:db8:aaaa::500/2002::500/2003::500/2004::500' }, }, }, '2001:db8:600::/40' : { 'interface' : { 'eth0' : { 'segments' : '2001:db8:aaaa::600/2002::600' }, }, }, '2001:db8:700::/40' : { 'interface' : { 'eth1' : { 'segments' : '2001:db8:aaaa::700' }, }, }, '2001:db8::/32' : { 'blackhole' : { 'distance' : '200', 'tag' : '600' } }, } +multicast_routes = { + '224.0.0.0/24' : { + 'next_hop' : { + '224.203.0.1' : { }, + '224.203.0.2' : { 'distance' : '110'}, + }, + }, + '224.1.0.0/24' : { + 'next_hop' : { + '224.205.0.1' : { 'disable' : {} }, + '224.205.0.2' : { 'distance' : '110'}, + }, + }, + '224.2.0.0/24' : { + 'next_hop' : { + '1.2.3.0' : { }, + '1.2.3.1' : { 'distance' : '110'}, + }, + }, + '224.10.0.0/24' : { + 'interface' : { + 'eth1' : { 'disable' : {} }, + 'eth2' : { 'distance' : '110'}, + }, + }, + '224.11.0.0/24' : { + 'interface' : { + 'eth0' : { }, + 'eth1' : { 'distance' : '10'}, + }, + }, + '224.12.0.0/24' : { + 'interface' : { + 'eth0' : { }, + 'eth1' : { 'distance' : '200'}, + }, + }, +} + tables = ['80', '81', '82'] class TestProtocolsStatic(VyOSUnitTestSHIM.TestCase): @classmethod def setUpClass(cls): super(TestProtocolsStatic, cls).setUpClass() cls.cli_delete(cls, ['vrf']) cls.cli_set(cls, ['vrf', 'name', 'black', 'table', '43210']) @classmethod def tearDownClass(cls): cls.cli_delete(cls, ['vrf']) super(TestProtocolsStatic, cls).tearDownClass() def tearDown(self): self.cli_delete(base_path) self.cli_commit() v4route = self.getFRRconfig('ip route', end='') self.assertFalse(v4route) v6route = self.getFRRconfig('ipv6 route', end='') self.assertFalse(v6route) def test_01_static(self): - bfd_profile = 'vyos-test' for route, route_config in routes.items(): route_type = 'route' if is_ipv6(route): route_type = 'route6' base = base_path + [route_type, route] if 'next_hop' in route_config: for next_hop, next_hop_config in route_config['next_hop'].items(): self.cli_set(base + ['next-hop', next_hop]) if 'disable' in next_hop_config: self.cli_set(base + ['next-hop', next_hop, 'disable']) if 'distance' in next_hop_config: self.cli_set(base + ['next-hop', next_hop, 'distance', next_hop_config['distance']]) if 'interface' in next_hop_config: self.cli_set(base + ['next-hop', next_hop, 'interface', next_hop_config['interface']]) if 'vrf' in next_hop_config: self.cli_set(base + ['next-hop', next_hop, 'vrf', next_hop_config['vrf']]) if 'bfd' in next_hop_config: - self.cli_set(base + ['next-hop', next_hop, 'bfd', 'profile', bfd_profile ]) - if 'bfd_source' in next_hop_config: - self.cli_set(base + ['next-hop', next_hop, 'bfd', 'multi-hop', 'source', next_hop_config['bfd_source'], 'profile', bfd_profile]) + self.cli_set(base + ['next-hop', next_hop, 'bfd']) + if 'bfd_profile' in next_hop_config: + self.cli_set(base + ['next-hop', next_hop, 'bfd', 'profile', next_hop_config['bfd_profile']]) + if 'bfd_source' in next_hop_config: + self.cli_set(base + ['next-hop', next_hop, 'bfd', 'multi-hop']) + self.cli_set(base + ['next-hop', next_hop, 'bfd', 'source-address', next_hop_config['bfd_source']]) if 'segments' in next_hop_config: self.cli_set(base + ['next-hop', next_hop, 'segments', next_hop_config['segments']]) if 'interface' in route_config: for interface, interface_config in route_config['interface'].items(): self.cli_set(base + ['interface', interface]) if 'disable' in interface_config: self.cli_set(base + ['interface', interface, 'disable']) if 'distance' in interface_config: self.cli_set(base + ['interface', interface, 'distance', interface_config['distance']]) if 'vrf' in interface_config: self.cli_set(base + ['interface', interface, 'vrf', interface_config['vrf']]) if 'segments' in interface_config: self.cli_set(base + ['interface', interface, 'segments', interface_config['segments']]) if 'blackhole' in route_config: self.cli_set(base + ['blackhole']) if 'distance' in route_config['blackhole']: self.cli_set(base + ['blackhole', 'distance', route_config['blackhole']['distance']]) if 'tag' in route_config['blackhole']: self.cli_set(base + ['blackhole', 'tag', route_config['blackhole']['tag']]) if 'reject' in route_config: self.cli_set(base + ['reject']) if 'distance' in route_config['reject']: self.cli_set(base + ['reject', 'distance', route_config['reject']['distance']]) if 'tag' in route_config['reject']: self.cli_set(base + ['reject', 'tag', route_config['reject']['tag']]) if {'blackhole', 'reject'} <= set(route_config): # Can not use blackhole and reject at the same time with self.assertRaises(ConfigSessionError): self.cli_commit() self.cli_delete(base + ['blackhole']) self.cli_delete(base + ['reject']) # commit changes self.cli_commit() # Verify FRR bgpd configuration frrconfig = self.getFRRconfig('ip route', end='') # Verify routes for route, route_config in routes.items(): ip_ipv6 = 'ip' if is_ipv6(route): ip_ipv6 = 'ipv6' if 'next_hop' in route_config: for next_hop, next_hop_config in route_config['next_hop'].items(): tmp = f'{ip_ipv6} route {route} {next_hop}' if 'interface' in next_hop_config: tmp += ' ' + next_hop_config['interface'] if 'distance' in next_hop_config: tmp += ' ' + next_hop_config['distance'] if 'vrf' in next_hop_config: tmp += ' nexthop-vrf ' + next_hop_config['vrf'] if 'bfd' in next_hop_config: - tmp += ' bfd profile ' + bfd_profile - if 'bfd_source' in next_hop_config: - tmp += ' bfd multi-hop source ' + next_hop_config['bfd_source'] + ' profile ' + bfd_profile + tmp += ' bfd' + if 'bfd_source' in next_hop_config: + tmp += ' multi-hop source ' + next_hop_config['bfd_source'] + if 'bfd_profile' in next_hop_config: + tmp += ' profile ' + next_hop_config['bfd_profile'] if 'segments' in next_hop_config: tmp += ' segments ' + next_hop_config['segments'] if 'disable' in next_hop_config: self.assertNotIn(tmp, frrconfig) else: self.assertIn(tmp, frrconfig) if 'interface' in route_config: for interface, interface_config in route_config['interface'].items(): tmp = f'{ip_ipv6} route {route} {interface}' if 'interface' in interface_config: tmp += ' ' + interface_config['interface'] if 'distance' in interface_config: tmp += ' ' + interface_config['distance'] if 'vrf' in interface_config: tmp += ' nexthop-vrf ' + interface_config['vrf'] if 'segments' in interface_config: tmp += ' segments ' + interface_config['segments'] if 'disable' in interface_config: self.assertNotIn(tmp, frrconfig) else: self.assertIn(tmp, frrconfig) if {'blackhole', 'reject'} <= set(route_config): # Can not use blackhole and reject at the same time # Config error validated above - skip this route continue if 'blackhole' in route_config: tmp = f'{ip_ipv6} route {route} blackhole' if 'tag' in route_config['blackhole']: tmp += ' tag ' + route_config['blackhole']['tag'] if 'distance' in route_config['blackhole']: tmp += ' ' + route_config['blackhole']['distance'] self.assertIn(tmp, frrconfig) if 'reject' in route_config: tmp = f'{ip_ipv6} route {route} reject' if 'tag' in route_config['reject']: tmp += ' tag ' + route_config['reject']['tag'] if 'distance' in route_config['reject']: tmp += ' ' + route_config['reject']['distance'] self.assertIn(tmp, frrconfig) def test_02_static_table(self): for table in tables: for route, route_config in routes.items(): route_type = 'route' if is_ipv6(route): route_type = 'route6' base = base_path + ['table', table, route_type, route] if 'next_hop' in route_config: for next_hop, next_hop_config in route_config['next_hop'].items(): self.cli_set(base + ['next-hop', next_hop]) if 'disable' in next_hop_config: self.cli_set(base + ['next-hop', next_hop, 'disable']) if 'distance' in next_hop_config: self.cli_set(base + ['next-hop', next_hop, 'distance', next_hop_config['distance']]) if 'interface' in next_hop_config: self.cli_set(base + ['next-hop', next_hop, 'interface', next_hop_config['interface']]) if 'vrf' in next_hop_config: self.cli_set(base + ['next-hop', next_hop, 'vrf', next_hop_config['vrf']]) if 'interface' in route_config: for interface, interface_config in route_config['interface'].items(): self.cli_set(base + ['interface', interface]) if 'disable' in interface_config: self.cli_set(base + ['interface', interface, 'disable']) if 'distance' in interface_config: self.cli_set(base + ['interface', interface, 'distance', interface_config['distance']]) if 'vrf' in interface_config: self.cli_set(base + ['interface', interface, 'vrf', interface_config['vrf']]) if 'blackhole' in route_config: self.cli_set(base + ['blackhole']) if 'distance' in route_config['blackhole']: self.cli_set(base + ['blackhole', 'distance', route_config['blackhole']['distance']]) if 'tag' in route_config['blackhole']: self.cli_set(base + ['blackhole', 'tag', route_config['blackhole']['tag']]) # commit changes self.cli_commit() # Verify FRR bgpd configuration frrconfig = self.getFRRconfig('ip route', end='') for table in tables: # Verify routes for route, route_config in routes.items(): ip_ipv6 = 'ip' if is_ipv6(route): ip_ipv6 = 'ipv6' if 'next_hop' in route_config: for next_hop, next_hop_config in route_config['next_hop'].items(): tmp = f'{ip_ipv6} route {route} {next_hop}' if 'interface' in next_hop_config: tmp += ' ' + next_hop_config['interface'] if 'distance' in next_hop_config: tmp += ' ' + next_hop_config['distance'] if 'vrf' in next_hop_config: tmp += ' nexthop-vrf ' + next_hop_config['vrf'] tmp += ' table ' + table if 'disable' in next_hop_config: self.assertNotIn(tmp, frrconfig) else: self.assertIn(tmp, frrconfig) if 'interface' in route_config: for interface, interface_config in route_config['interface'].items(): tmp = f'{ip_ipv6} route {route} {interface}' if 'interface' in interface_config: tmp += ' ' + interface_config['interface'] if 'distance' in interface_config: tmp += ' ' + interface_config['distance'] if 'vrf' in interface_config: tmp += ' nexthop-vrf ' + interface_config['vrf'] tmp += ' table ' + table if 'disable' in interface_config: self.assertNotIn(tmp, frrconfig) else: self.assertIn(tmp, frrconfig) if 'blackhole' in route_config: tmp = f'{ip_ipv6} route {route} blackhole' if 'tag' in route_config['blackhole']: tmp += ' tag ' + route_config['blackhole']['tag'] if 'distance' in route_config['blackhole']: tmp += ' ' + route_config['blackhole']['distance'] tmp += ' table ' + table self.assertIn(tmp, frrconfig) def test_03_static_vrf(self): # Create VRF instances and apply the static routes from above to FRR. # Re-read the configured routes and match them if they are programmed # properly. This also includes VRF leaking vrfs = { 'red' : { 'table' : '1000' }, 'green' : { 'table' : '2000' }, 'blue' : { 'table' : '3000' }, } for vrf, vrf_config in vrfs.items(): vrf_base_path = ['vrf', 'name', vrf] self.cli_set(vrf_base_path + ['table', vrf_config['table']]) for route, route_config in routes.items(): route_type = 'route' if is_ipv6(route): route_type = 'route6' route_base_path = vrf_base_path + ['protocols', 'static', route_type, route] if 'next_hop' in route_config: for next_hop, next_hop_config in route_config['next_hop'].items(): self.cli_set(route_base_path + ['next-hop', next_hop]) if 'disable' in next_hop_config: self.cli_set(route_base_path + ['next-hop', next_hop, 'disable']) if 'distance' in next_hop_config: self.cli_set(route_base_path + ['next-hop', next_hop, 'distance', next_hop_config['distance']]) if 'interface' in next_hop_config: self.cli_set(route_base_path + ['next-hop', next_hop, 'interface', next_hop_config['interface']]) if 'vrf' in next_hop_config: self.cli_set(route_base_path + ['next-hop', next_hop, 'vrf', next_hop_config['vrf']]) if 'segments' in next_hop_config: self.cli_set(route_base_path + ['next-hop', next_hop, 'segments', next_hop_config['segments']]) if 'interface' in route_config: for interface, interface_config in route_config['interface'].items(): self.cli_set(route_base_path + ['interface', interface]) if 'disable' in interface_config: self.cli_set(route_base_path + ['interface', interface, 'disable']) if 'distance' in interface_config: self.cli_set(route_base_path + ['interface', interface, 'distance', interface_config['distance']]) if 'vrf' in interface_config: self.cli_set(route_base_path + ['interface', interface, 'vrf', interface_config['vrf']]) if 'segments' in interface_config: self.cli_set(route_base_path + ['interface', interface, 'segments', interface_config['segments']]) if 'blackhole' in route_config: self.cli_set(route_base_path + ['blackhole']) if 'distance' in route_config['blackhole']: self.cli_set(route_base_path + ['blackhole', 'distance', route_config['blackhole']['distance']]) if 'tag' in route_config['blackhole']: self.cli_set(route_base_path + ['blackhole', 'tag', route_config['blackhole']['tag']]) # commit changes self.cli_commit() for vrf, vrf_config in vrfs.items(): tmp = get_interface_config(vrf) # Compare VRF table ID self.assertEqual(get_vrf_tableid(vrf), int(vrf_config['table'])) self.assertEqual(tmp['linkinfo']['info_kind'], 'vrf') # Verify FRR bgpd configuration - frrconfig = self.getFRRconfig(f'vrf {vrf}') + frrconfig = self.getFRRconfig(f'vrf {vrf}', endsection='^exit-vrf') self.assertIn(f'vrf {vrf}', frrconfig) # Verify routes for route, route_config in routes.items(): ip_ipv6 = 'ip' if is_ipv6(route): ip_ipv6 = 'ipv6' if 'next_hop' in route_config: for next_hop, next_hop_config in route_config['next_hop'].items(): tmp = f'{ip_ipv6} route {route} {next_hop}' if 'interface' in next_hop_config: tmp += ' ' + next_hop_config['interface'] if 'distance' in next_hop_config: tmp += ' ' + next_hop_config['distance'] if 'vrf' in next_hop_config: tmp += ' nexthop-vrf ' + next_hop_config['vrf'] if 'segments' in next_hop_config: tmp += ' segments ' + next_hop_config['segments'] if 'disable' in next_hop_config: self.assertNotIn(tmp, frrconfig) else: self.assertIn(tmp, frrconfig) if 'interface' in route_config: for interface, interface_config in route_config['interface'].items(): tmp = f'{ip_ipv6} route {route} {interface}' if 'interface' in interface_config: tmp += ' ' + interface_config['interface'] if 'distance' in interface_config: tmp += ' ' + interface_config['distance'] if 'vrf' in interface_config: tmp += ' nexthop-vrf ' + interface_config['vrf'] if 'segments' in interface_config: tmp += ' segments ' + interface_config['segments'] if 'disable' in interface_config: self.assertNotIn(tmp, frrconfig) else: self.assertIn(tmp, frrconfig) if 'blackhole' in route_config: tmp = f'{ip_ipv6} route {route} blackhole' if 'tag' in route_config['blackhole']: tmp += ' tag ' + route_config['blackhole']['tag'] if 'distance' in route_config['blackhole']: tmp += ' ' + route_config['blackhole']['distance'] self.assertIn(tmp, frrconfig) + def test_04_static_multicast(self): + for route, route_config in multicast_routes.items(): + if 'next_hop' in route_config: + base = base_path + ['multicast', 'route', route] + for next_hop, next_hop_config in route_config['next_hop'].items(): + self.cli_set(base + ['next-hop', next_hop]) + if 'distance' in next_hop_config: + self.cli_set(base + ['next-hop', next_hop, 'distance', next_hop_config['distance']]) + if 'disable' in next_hop_config: + self.cli_set(base + ['next-hop', next_hop, 'disable']) + + if 'interface' in route_config: + base = base_path + ['multicast', 'route', route] + for next_hop, next_hop_config in route_config['interface'].items(): + self.cli_set(base + ['interface', next_hop]) + if 'distance' in next_hop_config: + self.cli_set(base + ['interface', next_hop, 'distance', next_hop_config['distance']]) + + self.cli_commit() + + # Verify FRR configuration + frrconfig = self.getFRRconfig('ip mroute', end='') + for route, route_config in multicast_routes.items(): + if 'next_hop' in route_config: + for next_hop, next_hop_config in route_config['next_hop'].items(): + tmp = f'ip mroute {route} {next_hop}' + if 'distance' in next_hop_config: + tmp += ' ' + next_hop_config['distance'] + if 'disable' in next_hop_config: + self.assertNotIn(tmp, frrconfig) + else: + self.assertIn(tmp, frrconfig) + + if 'next_hop_interface' in route_config: + for next_hop, next_hop_config in route_config['next_hop_interface'].items(): + tmp = f'ip mroute {route} {next_hop}' + if 'distance' in next_hop_config: + tmp += ' ' + next_hop_config['distance'] + if 'disable' in next_hop_config: + self.assertNotIn(tmp, frrconfig) + else: + self.assertIn(tmp, frrconfig) + if __name__ == '__main__': - unittest.main(verbosity=2, failfast=True) + unittest.main(verbosity=2) diff --git a/smoketest/scripts/cli/test_protocols_static_multicast.py b/smoketest/scripts/cli/test_protocols_static_multicast.py deleted file mode 100755 index 9fdda236f..000000000 --- a/smoketest/scripts/cli/test_protocols_static_multicast.py +++ /dev/null @@ -1,49 +0,0 @@ -#!/usr/bin/env python3 -# -# Copyright (C) 2024 VyOS maintainers and contributors -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 or later as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see <http://www.gnu.org/licenses/>. - -import unittest - -from base_vyostest_shim import VyOSUnitTestSHIM - - -base_path = ['protocols', 'static', 'multicast'] - - -class TestProtocolsStaticMulticast(VyOSUnitTestSHIM.TestCase): - - def tearDown(self): - self.cli_delete(base_path) - self.cli_commit() - - mroute = self.getFRRconfig('ip mroute', end='') - self.assertFalse(mroute) - - def test_01_static_multicast(self): - - self.cli_set(base_path + ['route', '224.202.0.0/24', 'next-hop', '224.203.0.1']) - self.cli_set(base_path + ['interface-route', '224.203.0.0/24', 'next-hop-interface', 'eth0']) - - self.cli_commit() - - # Verify FRR bgpd configuration - frrconfig = self.getFRRconfig('ip mroute', end='') - - self.assertIn('ip mroute 224.202.0.0/24 224.203.0.1', frrconfig) - self.assertIn('ip mroute 224.203.0.0/24 eth0', frrconfig) - - -if __name__ == '__main__': - unittest.main(verbosity=2) diff --git a/src/conf_mode/interfaces_bonding.py b/src/conf_mode/interfaces_bonding.py index 633fb797c..adea8fc63 100755 --- a/src/conf_mode/interfaces_bonding.py +++ b/src/conf_mode/interfaces_bonding.py @@ -1,304 +1,298 @@ #!/usr/bin/env python3 # # Copyright (C) 2019-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from sys import exit from vyos.config import Config +from vyos.configdict import get_frrender_dict from vyos.configdict import get_interface_dict from vyos.configdict import is_node_changed from vyos.configdict import leaf_node_changed from vyos.configdict import is_member from vyos.configdict import is_source_interface from vyos.configverify import verify_address from vyos.configverify import verify_bridge_delete from vyos.configverify import verify_dhcpv6 from vyos.configverify import verify_eapol from vyos.configverify import verify_mirror_redirect from vyos.configverify import verify_mtu_ipv6 from vyos.configverify import verify_vlan_config from vyos.configverify import verify_vrf +from vyos.frrender import FRRender from vyos.ifconfig import BondIf from vyos.ifconfig.ethernet import EthernetIf from vyos.ifconfig import Section -from vyos.template import render_to_string from vyos.utils.assertion import assert_mac from vyos.utils.dict import dict_search from vyos.utils.dict import dict_to_paths_values from vyos.utils.network import interface_exists from vyos.configdict import has_address_configured from vyos.configdict import has_vrf_configured from vyos.configdep import set_dependents, call_dependents from vyos import ConfigError -from vyos import frr from vyos import airbag airbag.enable() +frrender = FRRender() def get_bond_mode(mode): if mode == 'round-robin': return 'balance-rr' elif mode == 'active-backup': return 'active-backup' elif mode == 'xor-hash': return 'balance-xor' elif mode == 'broadcast': return 'broadcast' elif mode == '802.3ad': return '802.3ad' elif mode == 'transmit-load-balance': return 'balance-tlb' elif mode == 'adaptive-load-balance': return 'balance-alb' else: raise ConfigError(f'invalid bond mode "{mode}"') def get_config(config=None): """ Retrive CLI config as dictionary. Dictionary can never be empty, as at least the interface name will be added or a deleted flag """ if config: conf = config else: conf = Config() base = ['interfaces', 'bonding'] ifname, bond = get_interface_dict(conf, base, with_pki=True) # To make our own life easier transfor the list of member interfaces # into a dictionary - we will use this to add additional information # later on for each member if 'member' in bond and 'interface' in bond['member']: # convert list of member interfaces to a dictionary bond['member']['interface'] = {k: {} for k in bond['member']['interface']} if 'mode' in bond: bond['mode'] = get_bond_mode(bond['mode']) tmp = is_node_changed(conf, base + [ifname, 'mode']) - if tmp: bond['shutdown_required'] = {} + if tmp: bond.update({'shutdown_required' : {}}) tmp = is_node_changed(conf, base + [ifname, 'lacp-rate']) - if tmp: bond['shutdown_required'] = {} + if tmp: bond.update({'shutdown_required' : {}}) + + tmp = is_node_changed(conf, base + [ifname, 'evpn']) + if tmp: bond.update({'frrender' : get_frrender_dict(conf)}) # determine which members have been removed interfaces_removed = leaf_node_changed(conf, base + [ifname, 'member', 'interface']) # Reset config level to interfaces old_level = conf.get_level() conf.set_level(['interfaces']) if interfaces_removed: bond['shutdown_required'] = {} if 'member' not in bond: bond['member'] = {} tmp = {} for interface in interfaces_removed: # if member is deleted from bond, add dependencies to call # ethernet commit again in apply function # to apply options under ethernet section set_dependents('ethernet', conf, interface) section = Section.section(interface) # this will be 'ethernet' for 'eth0' if conf.exists([section, interface, 'disable']): tmp[interface] = {'disable': ''} else: tmp[interface] = {} # also present the interfaces to be removed from the bond as dictionary bond['member']['interface_remove'] = tmp # Restore existing config level conf.set_level(old_level) if dict_search('member.interface', bond): for interface, interface_config in bond['member']['interface'].items(): interface_ethernet_config = conf.get_config_dict( ['interfaces', 'ethernet', interface], key_mangling=('-', '_'), get_first_key=True, no_tag_node_value_mangle=True, with_defaults=False, with_recursive_defaults=False) interface_config['config_paths'] = dict_to_paths_values(interface_ethernet_config) # Check if member interface is a new member if not conf.exists_effective(base + [ifname, 'member', 'interface', interface]): bond['shutdown_required'] = {} interface_config['new_added'] = {} # Check if member interface is disabled conf.set_level(['interfaces']) section = Section.section(interface) # this will be 'ethernet' for 'eth0' if conf.exists([section, interface, 'disable']): interface_config['disable'] = '' conf.set_level(old_level) # Check if member interface is already member of another bridge tmp = is_member(conf, interface, 'bridge') if tmp: interface_config['is_bridge_member'] = tmp # Check if member interface is already member of a bond tmp = is_member(conf, interface, 'bonding') for tmp in is_member(conf, interface, 'bonding'): if bond['ifname'] == tmp: continue interface_config['is_bond_member'] = tmp # Check if member interface is used as source-interface on another interface tmp = is_source_interface(conf, interface) if tmp: interface_config['is_source_interface'] = tmp # bond members must not have an assigned address tmp = has_address_configured(conf, interface) if tmp: interface_config['has_address'] = {} # bond members must not have a VRF attached tmp = has_vrf_configured(conf, interface) if tmp: interface_config['has_vrf'] = {} return bond def verify(bond): if 'deleted' in bond: verify_bridge_delete(bond) return None if 'arp_monitor' in bond: if 'target' in bond['arp_monitor'] and len(bond['arp_monitor']['target']) > 16: raise ConfigError('The maximum number of arp-monitor targets is 16') if 'interval' in bond['arp_monitor'] and int(bond['arp_monitor']['interval']) > 0: if bond['mode'] in ['802.3ad', 'balance-tlb', 'balance-alb']: raise ConfigError('ARP link monitoring does not work for mode 802.3ad, ' \ 'transmit-load-balance or adaptive-load-balance') if 'primary' in bond: if bond['mode'] not in ['active-backup', 'balance-tlb', 'balance-alb']: raise ConfigError('Option primary - mode dependency failed, not' 'supported in mode {mode}!'.format(**bond)) verify_mtu_ipv6(bond) verify_address(bond) verify_dhcpv6(bond) verify_vrf(bond) verify_mirror_redirect(bond) verify_eapol(bond) # use common function to verify VLAN configuration verify_vlan_config(bond) bond_name = bond['ifname'] if dict_search('member.interface', bond): for interface, interface_config in bond['member']['interface'].items(): error_msg = f'Can not add interface "{interface}" to bond, ' if interface == 'lo': raise ConfigError('Loopback interface "lo" can not be added to a bond') if not interface_exists(interface): raise ConfigError(error_msg + 'it does not exist!') if 'is_bridge_member' in interface_config: tmp = next(iter(interface_config['is_bridge_member'])) raise ConfigError(error_msg + f'it is already a member of bridge "{tmp}"!') if 'is_bond_member' in interface_config: tmp = next(iter(interface_config['is_bond_member'])) raise ConfigError(error_msg + f'it is already a member of bond "{tmp}"!') if 'is_source_interface' in interface_config: tmp = interface_config['is_source_interface'] raise ConfigError(error_msg + f'it is the source-interface of "{tmp}"!') if 'has_address' in interface_config: raise ConfigError(error_msg + 'it has an address assigned!') if 'has_vrf' in interface_config: raise ConfigError(error_msg + 'it has a VRF assigned!') if 'new_added' in interface_config and 'config_paths' in interface_config: for option_path, option_value in interface_config['config_paths'].items(): if option_path in EthernetIf.get_bond_member_allowed_options() : continue if option_path in BondIf.get_inherit_bond_options(): continue raise ConfigError(error_msg + f'it has a "{option_path.replace(".", " ")}" assigned!') if 'primary' in bond: if bond['primary'] not in bond['member']['interface']: raise ConfigError(f'Primary interface of bond "{bond_name}" must be a member interface') if bond['mode'] not in ['active-backup', 'balance-tlb', 'balance-alb']: raise ConfigError('primary interface only works for mode active-backup, ' \ 'transmit-load-balance or adaptive-load-balance') if 'system_mac' in bond: if bond['mode'] != '802.3ad': raise ConfigError('Actor MAC address only available in 802.3ad mode!') system_mac = bond['system_mac'] try: assert_mac(system_mac, test_all_zero=False) except: raise ConfigError(f'Cannot use a multicast MAC address "{system_mac}" as system-mac!') return None def generate(bond): - bond['frr_zebra_config'] = '' - if 'deleted' not in bond: - bond['frr_zebra_config'] = render_to_string('frr/evpn.mh.frr.j2', bond) + if 'frrender' in bond: + frrender.generate(bond['frrender']) return None def apply(bond): - ifname = bond['ifname'] - b = BondIf(ifname) + if 'frrender' in bond: + frrender.apply() + + b = BondIf(bond['ifname']) if 'deleted' in bond: - # delete interface b.remove() else: b.update(bond) if dict_search('member.interface_remove', bond): try: call_dependents() except ConfigError: raise ConfigError('Error in updating ethernet interface ' 'after deleting it from bond') - # Save original configuration prior to starting any commit actions - frr_cfg = frr.FRRConfig() - - # The route-map used for the FIB (zebra) is part of the zebra daemon - frr_cfg.load_configuration(frr.mgmt_daemon) - frr_cfg.modify_section(f'^interface {ifname}', stop_pattern='^exit', remove_stop_mark=True) - if 'frr_zebra_config' in bond: - frr_cfg.add_before(frr.default_add_before, bond['frr_zebra_config']) - frr_cfg.commit_configuration() - return None if __name__ == '__main__': try: c = get_config() verify(c) generate(c) apply(c) except ConfigError as e: print(e) exit(1) diff --git a/src/conf_mode/interfaces_ethernet.py b/src/conf_mode/interfaces_ethernet.py index edbbb00c9..6a035e9e9 100755 --- a/src/conf_mode/interfaces_ethernet.py +++ b/src/conf_mode/interfaces_ethernet.py @@ -1,357 +1,349 @@ #!/usr/bin/env python3 # # Copyright (C) 2019-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import os from sys import exit from vyos.base import Warning from vyos.config import Config +from vyos.configdict import get_frrender_dict from vyos.configdict import get_interface_dict from vyos.configdict import is_node_changed from vyos.configverify import verify_address from vyos.configverify import verify_dhcpv6 from vyos.configverify import verify_interface_exists from vyos.configverify import verify_mirror_redirect from vyos.configverify import verify_mtu from vyos.configverify import verify_mtu_ipv6 from vyos.configverify import verify_vlan_config from vyos.configverify import verify_vrf from vyos.configverify import verify_bond_bridge_member from vyos.configverify import verify_eapol from vyos.ethtool import Ethtool +from vyos.frrender import FRRender from vyos.ifconfig import EthernetIf from vyos.ifconfig import BondIf -from vyos.template import render_to_string from vyos.utils.dict import dict_search from vyos.utils.dict import dict_to_paths_values from vyos.utils.dict import dict_set from vyos.utils.dict import dict_delete from vyos import ConfigError -from vyos import frr from vyos import airbag airbag.enable() +frrender = FRRender() def update_bond_options(conf: Config, eth_conf: dict) -> list: """ Return list of blocked options if interface is a bond member :param conf: Config object :type conf: Config :param eth_conf: Ethernet config dictionary :type eth_conf: dict :return: List of blocked options :rtype: list """ blocked_list = [] bond_name = list(eth_conf['is_bond_member'].keys())[0] config_without_defaults = conf.get_config_dict( ['interfaces', 'ethernet', eth_conf['ifname']], key_mangling=('-', '_'), get_first_key=True, no_tag_node_value_mangle=True, with_defaults=False, with_recursive_defaults=False) config_with_defaults = conf.get_config_dict( ['interfaces', 'ethernet', eth_conf['ifname']], key_mangling=('-', '_'), get_first_key=True, no_tag_node_value_mangle=True, with_defaults=True, with_recursive_defaults=True) bond_config_with_defaults = conf.get_config_dict( ['interfaces', 'bonding', bond_name], key_mangling=('-', '_'), get_first_key=True, no_tag_node_value_mangle=True, with_defaults=True, with_recursive_defaults=True) eth_dict_paths = dict_to_paths_values(config_without_defaults) eth_path_base = ['interfaces', 'ethernet', eth_conf['ifname']] #if option is configured under ethernet section for option_path, option_value in eth_dict_paths.items(): bond_option_value = dict_search(option_path, bond_config_with_defaults) #If option is allowed for changing then continue if option_path in EthernetIf.get_bond_member_allowed_options(): continue # if option is inherited from bond then set valued from bond interface if option_path in BondIf.get_inherit_bond_options(): # If option equals to bond option then do nothing if option_value == bond_option_value: continue else: # if ethernet has option and bond interface has # then copy it from bond if bond_option_value is not None: if is_node_changed(conf, eth_path_base + option_path.split('.')): Warning( f'Cannot apply "{option_path.replace(".", " ")}" to "{option_value}".' \ f' Interface "{eth_conf["ifname"]}" is a bond member.' \ f' Option is inherited from bond "{bond_name}"') dict_set(option_path, bond_option_value, eth_conf) continue # if ethernet has option and bond interface does not have # then delete it form dict and do not apply it else: if is_node_changed(conf, eth_path_base + option_path.split('.')): Warning( f'Cannot apply "{option_path.replace(".", " ")}".' \ f' Interface "{eth_conf["ifname"]}" is a bond member.' \ f' Option is inherited from bond "{bond_name}"') dict_delete(option_path, eth_conf) blocked_list.append(option_path) # if inherited option is not configured under ethernet section but configured under bond section for option_path in BondIf.get_inherit_bond_options(): bond_option_value = dict_search(option_path, bond_config_with_defaults) if bond_option_value is not None: if option_path not in eth_dict_paths: if is_node_changed(conf, eth_path_base + option_path.split('.')): Warning( f'Cannot apply "{option_path.replace(".", " ")}" to "{dict_search(option_path, config_with_defaults)}".' \ f' Interface "{eth_conf["ifname"]}" is a bond member. ' \ f'Option is inherited from bond "{bond_name}"') dict_set(option_path, bond_option_value, eth_conf) eth_conf['bond_blocked_changes'] = blocked_list return None def get_config(config=None): """ Retrive CLI config as dictionary. Dictionary can never be empty, as at least the interface name will be added or a deleted flag """ if config: conf = config else: conf = Config() base = ['interfaces', 'ethernet'] ifname, ethernet = get_interface_dict(conf, base, with_pki=True) # T5862 - default MTU is not acceptable in some environments # There are cloud environments available where the maximum supported # ethernet MTU is e.g. 1450 bytes, thus we clamp this to the adapters # maximum MTU value or 1500 bytes - whatever is lower if 'mtu' not in ethernet: try: ethernet['mtu'] = '1500' max_mtu = EthernetIf(ifname).get_max_mtu() if max_mtu < int(ethernet['mtu']): ethernet['mtu'] = str(max_mtu) except: pass if 'is_bond_member' in ethernet: update_bond_options(conf, ethernet) tmp = is_node_changed(conf, base + [ifname, 'speed']) if tmp: ethernet.update({'speed_duplex_changed': {}}) tmp = is_node_changed(conf, base + [ifname, 'duplex']) if tmp: ethernet.update({'speed_duplex_changed': {}}) + tmp = is_node_changed(conf, base + [ifname, 'evpn']) + if tmp: ethernet.update({'frrender' : get_frrender_dict(conf)}) + return ethernet def verify_speed_duplex(ethernet: dict, ethtool: Ethtool): """ Verify speed and duplex :param ethernet: dictionary which is received from get_interface_dict :type ethernet: dict :param ethtool: Ethernet object :type ethtool: Ethtool """ if ((ethernet['speed'] == 'auto' and ethernet['duplex'] != 'auto') or (ethernet['speed'] != 'auto' and ethernet['duplex'] == 'auto')): raise ConfigError( 'Speed/Duplex missmatch. Must be both auto or manually configured') if ethernet['speed'] != 'auto' and ethernet['duplex'] != 'auto': # We need to verify if the requested speed and duplex setting is # supported by the underlaying NIC. speed = ethernet['speed'] duplex = ethernet['duplex'] if not ethtool.check_speed_duplex(speed, duplex): raise ConfigError( f'Adapter does not support changing speed ' \ f'and duplex settings to: {speed}/{duplex}!') def verify_flow_control(ethernet: dict, ethtool: Ethtool): """ Verify flow control :param ethernet: dictionary which is received from get_interface_dict :type ethernet: dict :param ethtool: Ethernet object :type ethtool: Ethtool """ if 'disable_flow_control' in ethernet: if not ethtool.check_flow_control(): raise ConfigError( 'Adapter does not support changing flow-control settings!') def verify_ring_buffer(ethernet: dict, ethtool: Ethtool): """ Verify ring buffer :param ethernet: dictionary which is received from get_interface_dict :type ethernet: dict :param ethtool: Ethernet object :type ethtool: Ethtool """ if 'ring_buffer' in ethernet: max_rx = ethtool.get_ring_buffer_max('rx') if not max_rx: raise ConfigError( 'Driver does not support RX ring-buffer configuration!') max_tx = ethtool.get_ring_buffer_max('tx') if not max_tx: raise ConfigError( 'Driver does not support TX ring-buffer configuration!') rx = dict_search('ring_buffer.rx', ethernet) if rx and int(rx) > int(max_rx): raise ConfigError(f'Driver only supports a maximum RX ring-buffer ' \ f'size of "{max_rx}" bytes!') tx = dict_search('ring_buffer.tx', ethernet) if tx and int(tx) > int(max_tx): raise ConfigError(f'Driver only supports a maximum TX ring-buffer ' \ f'size of "{max_tx}" bytes!') def verify_offload(ethernet: dict, ethtool: Ethtool): """ Verify offloading capabilities :param ethernet: dictionary which is received from get_interface_dict :type ethernet: dict :param ethtool: Ethernet object :type ethtool: Ethtool """ if dict_search('offload.rps', ethernet) != None: if not os.path.exists(f'/sys/class/net/{ethernet["ifname"]}/queues/rx-0/rps_cpus'): raise ConfigError('Interface does not suport RPS!') driver = ethtool.get_driver_name() # T3342 - Xen driver requires special treatment if driver == 'vif': if int(ethernet['mtu']) > 1500 and dict_search('offload.sg', ethernet) == None: raise ConfigError('Xen netback drivers requires scatter-gatter offloading '\ 'for MTU size larger then 1500 bytes') def verify_allowedbond_changes(ethernet: dict): """ Verify changed options if interface is in bonding :param ethernet: dictionary which is received from get_interface_dict :type ethernet: dict """ if 'bond_blocked_changes' in ethernet: for option in ethernet['bond_blocked_changes']: raise ConfigError(f'Cannot configure "{option.replace(".", " ")}"' \ f' on interface "{ethernet["ifname"]}".' \ f' Interface is a bond member') def verify(ethernet): if 'deleted' in ethernet: return None if 'is_bond_member' in ethernet: verify_bond_member(ethernet) else: verify_ethernet(ethernet) def verify_bond_member(ethernet): """ Verification function for ethernet interface which is in bonding :param ethernet: dictionary which is received from get_interface_dict :type ethernet: dict """ ifname = ethernet['ifname'] verify_interface_exists(ethernet, ifname) verify_eapol(ethernet) verify_mirror_redirect(ethernet) ethtool = Ethtool(ifname) verify_speed_duplex(ethernet, ethtool) verify_flow_control(ethernet, ethtool) verify_ring_buffer(ethernet, ethtool) verify_offload(ethernet, ethtool) verify_allowedbond_changes(ethernet) def verify_ethernet(ethernet): """ Verification function for simple ethernet interface :param ethernet: dictionary which is received from get_interface_dict :type ethernet: dict """ ifname = ethernet['ifname'] verify_interface_exists(ethernet, ifname) verify_mtu(ethernet) verify_mtu_ipv6(ethernet) verify_dhcpv6(ethernet) verify_address(ethernet) verify_vrf(ethernet) verify_bond_bridge_member(ethernet) verify_eapol(ethernet) verify_mirror_redirect(ethernet) ethtool = Ethtool(ifname) # No need to check speed and duplex keys as both have default values. verify_speed_duplex(ethernet, ethtool) verify_flow_control(ethernet, ethtool) verify_ring_buffer(ethernet, ethtool) verify_offload(ethernet, ethtool) # use common function to verify VLAN configuration verify_vlan_config(ethernet) return None def generate(ethernet): - if 'deleted' in ethernet: - return None - - ethernet['frr_zebra_config'] = '' - if 'deleted' not in ethernet: - ethernet['frr_zebra_config'] = render_to_string('frr/evpn.mh.frr.j2', ethernet) - + if 'frrender' in ethernet: + frrender.generate(ethernet['frrender']) return None def apply(ethernet): - ifname = ethernet['ifname'] + if 'frrender' in ethernet: + frrender.apply() - e = EthernetIf(ifname) + e = EthernetIf(ethernet['ifname']) if 'deleted' in ethernet: - # delete interface e.remove() else: e.update(ethernet) - - # Save original configuration prior to starting any commit actions - frr_cfg = frr.FRRConfig() - frr_cfg.load_configuration(frr.mgmt_daemon) - frr_cfg.modify_section(f'^interface {ifname}', stop_pattern='^exit', remove_stop_mark=True) - if 'frr_zebra_config' in ethernet: - frr_cfg.add_before(frr.default_add_before, ethernet['frr_zebra_config']) - frr_cfg.commit_configuration() + return None if __name__ == '__main__': try: c = get_config() verify(c) generate(c) apply(c) except ConfigError as e: print(e) exit(1) diff --git a/src/conf_mode/policy.py b/src/conf_mode/policy.py index aef9b96c4..e6b6d474a 100755 --- a/src/conf_mode/policy.py +++ b/src/conf_mode/policy.py @@ -1,320 +1,281 @@ #!/usr/bin/env python3 # -# Copyright (C) 2021-2022 VyOS maintainers and contributors +# Copyright (C) 2021-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from sys import exit from vyos.config import Config -from vyos.configdict import dict_merge -from vyos.template import render_to_string +from vyos.configdict import get_frrender_dict +from vyos.configverify import has_frr_protocol_in_dict +from vyos.frrender import FRRender +from vyos.frrender import frr_protocols from vyos.utils.dict import dict_search from vyos import ConfigError -from vyos import frr from vyos import airbag - airbag.enable() +frrender = FRRender() def community_action_compatibility(actions: dict) -> bool: """ Check compatibility of values in community and large community sections :param actions: dictionary with community :type actions: dict :return: true if compatible, false if not :rtype: bool """ if ('none' in actions) and ('replace' in actions or 'add' in actions): return False if 'replace' in actions and 'add' in actions: return False if ('delete' in actions) and ('none' in actions or 'replace' in actions): return False return True def extcommunity_action_compatibility(actions: dict) -> bool: """ Check compatibility of values in extended community sections :param actions: dictionary with community :type actions: dict :return: true if compatible, false if not :rtype: bool """ if ('none' in actions) and ( 'rt' in actions or 'soo' in actions or 'bandwidth' in actions or 'bandwidth_non_transitive' in actions): return False if ('bandwidth_non_transitive' in actions) and ('bandwidth' not in actions): return False return True def routing_policy_find(key, dictionary): # Recursively traverse a dictionary and extract the value assigned to # a given key as generator object. This is made for routing policies, # thus also import/export is checked for k, v in dictionary.items(): if k == key: if isinstance(v, dict): for a, b in v.items(): if a in ['import', 'export']: yield b else: yield v elif isinstance(v, dict): for result in routing_policy_find(key, v): yield result elif isinstance(v, list): for d in v: if isinstance(d, dict): for result in routing_policy_find(key, d): yield result def get_config(config=None): if config: conf = config else: conf = Config() - base = ['policy'] - policy = conf.get_config_dict(base, key_mangling=('-', '_'), - get_first_key=True, - no_tag_node_value_mangle=True) - - # We also need some additional information from the config, prefix-lists - # and route-maps for instance. They will be used in verify(). - # - # XXX: one MUST always call this without the key_mangling() option! See - # vyos.configverify.verify_common_route_maps() for more information. - tmp = conf.get_config_dict(['protocols'], key_mangling=('-', '_'), - no_tag_node_value_mangle=True) - # Merge policy dict into "regular" config dict - policy = dict_merge(tmp, policy) - return policy - - -def verify(policy): - if not policy: + return get_frrender_dict(conf) + + +def verify(config_dict): + if not has_frr_protocol_in_dict(config_dict, 'policy'): return None - for policy_type in ['access_list', 'access_list6', 'as_path_list', - 'community_list', 'extcommunity_list', - 'large_community_list', - 'prefix_list', 'prefix_list6', 'route_map']: + policy_types = ['access_list', 'access_list6', 'as_path_list', + 'community_list', 'extcommunity_list', + 'large_community_list', 'prefix_list', + 'prefix_list6', 'route_map'] + + policy = config_dict['policy'] + for protocol in frr_protocols: + if protocol not in config_dict: + continue + if 'protocol' not in policy: + policy.update({'protocol': {}}) + policy['protocol'].update({protocol : config_dict[protocol]}) + + for policy_type in policy_types: # Bail out early and continue with next policy type if policy_type not in policy: continue # instance can be an ACL name/number, prefix-list name or route-map name for instance, instance_config in policy[policy_type].items(): # If no rule was found within the instance ... sad, but we can leave # early as nothing needs to be verified if 'rule' not in instance_config: continue # human readable instance name (hypen instead of underscore) policy_hr = policy_type.replace('_', '-') entries = [] for rule, rule_config in instance_config['rule'].items(): mandatory_error = f'must be specified for "{policy_hr} {instance} rule {rule}"!' if 'action' not in rule_config: raise ConfigError(f'Action {mandatory_error}') if policy_type == 'access_list': if 'source' not in rule_config: raise ConfigError(f'A source {mandatory_error}') if int(instance) in range(100, 200) or int( instance) in range(2000, 2700): if 'destination' not in rule_config: raise ConfigError( f'A destination {mandatory_error}') if policy_type == 'access_list6': if 'source' not in rule_config: raise ConfigError(f'A source {mandatory_error}') if policy_type in ['as_path_list', 'community_list', 'extcommunity_list', 'large_community_list']: if 'regex' not in rule_config: raise ConfigError(f'A regex {mandatory_error}') if policy_type in ['prefix_list', 'prefix_list6']: if 'prefix' not in rule_config: raise ConfigError(f'A prefix {mandatory_error}') if rule_config in entries: raise ConfigError( f'Rule "{rule}" contains a duplicate prefix definition!') entries.append(rule_config) # route-maps tend to be a bit more complex so they get their own verify() section if 'route_map' in policy: for route_map, route_map_config in policy['route_map'].items(): if 'rule' not in route_map_config: continue for rule, rule_config in route_map_config['rule'].items(): # Action 'deny' cannot be used with "continue" or "on-match" # FRR does not validate it T4827, T6676 if rule_config['action'] == 'deny' and ('continue' in rule_config or 'on_match' in rule_config): raise ConfigError(f'rule {rule} "continue" or "on-match" cannot be used with action deny!') # Specified community-list must exist tmp = dict_search('match.community.community_list', rule_config) if tmp and tmp not in policy.get('community_list', []): raise ConfigError(f'community-list {tmp} does not exist!') # Specified extended community-list must exist tmp = dict_search('match.extcommunity', rule_config) if tmp and tmp not in policy.get('extcommunity_list', []): raise ConfigError( f'extcommunity-list {tmp} does not exist!') # Specified large-community-list must exist tmp = dict_search('match.large_community.large_community_list', rule_config) if tmp and tmp not in policy.get('large_community_list', []): raise ConfigError( f'large-community-list {tmp} does not exist!') # Specified prefix-list must exist tmp = dict_search('match.ip.address.prefix_list', rule_config) if tmp and tmp not in policy.get('prefix_list', []): raise ConfigError(f'prefix-list {tmp} does not exist!') # Specified prefix-list must exist tmp = dict_search('match.ipv6.address.prefix_list', rule_config) if tmp and tmp not in policy.get('prefix_list6', []): raise ConfigError(f'prefix-list6 {tmp} does not exist!') # Specified access_list6 in nexthop must exist tmp = dict_search('match.ipv6.nexthop.access_list', rule_config) if tmp and tmp not in policy.get('access_list6', []): raise ConfigError(f'access_list6 {tmp} does not exist!') # Specified prefix-list6 in nexthop must exist tmp = dict_search('match.ipv6.nexthop.prefix_list', rule_config) if tmp and tmp not in policy.get('prefix_list6', []): raise ConfigError(f'prefix-list6 {tmp} does not exist!') tmp = dict_search('set.community.delete', rule_config) if tmp and tmp not in policy.get('community_list', []): raise ConfigError(f'community-list {tmp} does not exist!') tmp = dict_search('set.large_community.delete', rule_config) if tmp and tmp not in policy.get('large_community_list', []): raise ConfigError( f'large-community-list {tmp} does not exist!') if 'set' in rule_config: rule_action = rule_config['set'] if 'community' in rule_action: if not community_action_compatibility( rule_action['community']): raise ConfigError( f'Unexpected combination between action replace, add, delete or none in community') if 'large_community' in rule_action: if not community_action_compatibility( rule_action['large_community']): raise ConfigError( f'Unexpected combination between action replace, add, delete or none in large-community') if 'extcommunity' in rule_action: if not extcommunity_action_compatibility( rule_action['extcommunity']): raise ConfigError( f'Unexpected combination between none, rt, soo, bandwidth, bandwidth-non-transitive in extended-community') # When routing protocols are active some use prefix-lists, route-maps etc. # to apply the systems routing policy to the learned or redistributed routes. # When the "routing policy" changes and policies, route-maps etc. are deleted, # it is our responsibility to verify that the policy can not be deleted if it # is used by any routing protocol - if 'protocols' in policy: - for policy_type in ['access_list', 'access_list6', 'as_path_list', - 'community_list', - 'extcommunity_list', 'large_community_list', - 'prefix_list', 'route_map']: - if policy_type in policy: - for policy_name in list(set(routing_policy_find(policy_type, - policy[ - 'protocols']))): - found = False - if policy_name in policy[policy_type]: - found = True - # BGP uses prefix-list for selecting both an IPv4 or IPv6 AFI related - # list - we need to go the extra mile here and check both prefix-lists - if policy_type == 'prefix_list' and 'prefix_list6' in policy and policy_name in \ - policy['prefix_list6']: - found = True - if not found: - tmp = policy_type.replace('_', '-') - raise ConfigError( - f'Can not delete {tmp} "{policy_name}", still in use!') - - return None - + # Check if any routing protocol is activated + if 'protocol' in policy: + for policy_type in policy_types: + for policy_name in list(set(routing_policy_find(policy_type, policy['protocol']))): + found = False + if policy_type in policy and policy_name in policy[policy_type]: + found = True + # BGP uses prefix-list for selecting both an IPv4 or IPv6 AFI related + # list - we need to go the extra mile here and check both prefix-lists + if policy_type == 'prefix_list' and 'prefix_list6' in policy and policy_name in \ + policy['prefix_list6']: + found = True + if not found: + tmp = policy_type.replace('_', '-') + raise ConfigError( + f'Can not delete {tmp} "{policy_name}", still in use!') -def generate(policy): - if not policy: - return None - policy['new_frr_config'] = render_to_string('frr/policy.frr.j2', policy) return None -def apply(policy): - # Save original configuration prior to starting any commit actions - frr_cfg = frr.FRRConfig() - - # The route-map used for the FIB (zebra) is part of the zebra daemon - frr_cfg.load_configuration(frr.bgp_daemon) - frr_cfg.modify_section(r'^bgp as-path access-list .*') - frr_cfg.modify_section(r'^bgp community-list .*') - frr_cfg.modify_section(r'^bgp extcommunity-list .*') - frr_cfg.modify_section(r'^bgp large-community-list .*') - frr_cfg.modify_section(r'^route-map .*', stop_pattern='^exit', - remove_stop_mark=True) - if 'new_frr_config' in policy: - frr_cfg.add_before(frr.default_add_before, policy['new_frr_config']) - frr_cfg.commit_configuration(frr.bgp_daemon) - - # The route-map used for the FIB (zebra) is part of the zebra daemon - frr_cfg.load_configuration(frr.zebra_daemon) - frr_cfg.modify_section(r'^access-list .*') - frr_cfg.modify_section(r'^ipv6 access-list .*') - frr_cfg.modify_section(r'^ip prefix-list .*') - frr_cfg.modify_section(r'^ipv6 prefix-list .*') - frr_cfg.modify_section(r'^route-map .*', stop_pattern='^exit', - remove_stop_mark=True) - if 'new_frr_config' in policy: - frr_cfg.add_before(frr.default_add_before, policy['new_frr_config']) - frr_cfg.commit_configuration(frr.zebra_daemon) +def generate(config_dict): + frrender.generate(config_dict) +def apply(config_dict): + frrender.apply() return None - if __name__ == '__main__': try: c = get_config() verify(c) generate(c) apply(c) except ConfigError as e: print(e) exit(1) diff --git a/src/conf_mode/protocols_babel.py b/src/conf_mode/protocols_babel.py index 06fd9b9b6..f9d5e45a0 100755 --- a/src/conf_mode/protocols_babel.py +++ b/src/conf_mode/protocols_babel.py @@ -1,157 +1,108 @@ #!/usr/bin/env python3 # # Copyright (C) 2021-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from sys import exit from vyos.config import Config -from vyos.config import config_dict_merge -from vyos.configdict import dict_merge -from vyos.configdict import node_changed +from vyos.configdict import get_frrender_dict +from vyos.configverify import has_frr_protocol_in_dict from vyos.configverify import verify_access_list from vyos.configverify import verify_prefix_list +from vyos.frrender import FRRender from vyos.utils.dict import dict_search -from vyos.template import render_to_string from vyos import ConfigError -from vyos import frr from vyos import airbag airbag.enable() +frrender = FRRender() + def get_config(config=None): if config: conf = config else: conf = Config() - base = ['protocols', 'babel'] - babel = conf.get_config_dict(base, key_mangling=('-', '_'), - get_first_key=True) - - # FRR has VRF support for different routing daemons. As interfaces belong - # to VRFs - or the global VRF, we need to check for changed interfaces so - # that they will be properly rendered for the FRR config. Also this eases - # removal of interfaces from the running configuration. - interfaces_removed = node_changed(conf, base + ['interface']) - if interfaces_removed: - babel['interface_removed'] = list(interfaces_removed) - - # Bail out early if configuration tree does not exist - if not conf.exists(base): - babel.update({'deleted' : ''}) - return babel - - # We have gathered the dict representation of the CLI, but there are default - # values which we need to update into the dictionary retrieved. - default_values = conf.get_config_defaults(base, key_mangling=('-', '_'), - get_first_key=True, - recursive=True) - # merge in default values - babel = config_dict_merge(default_values, babel) + return get_frrender_dict(conf) - # We also need some additional information from the config, prefix-lists - # and route-maps for instance. They will be used in verify(). - # - # XXX: one MUST always call this without the key_mangling() option! See - # vyos.configverify.verify_common_route_maps() for more information. - tmp = conf.get_config_dict(['policy']) - # Merge policy dict into "regular" config dict - babel = dict_merge(tmp, babel) - return babel - -def verify(babel): - if not babel: +def verify(config_dict): + if not has_frr_protocol_in_dict(config_dict, 'babel'): return None + babel = config_dict['babel'] + babel['policy'] = config_dict['policy'] + # verify distribute_list if "distribute_list" in babel: acl_keys = { "ipv4": [ "distribute_list.ipv4.access_list.in", "distribute_list.ipv4.access_list.out", ], "ipv6": [ "distribute_list.ipv6.access_list.in", "distribute_list.ipv6.access_list.out", ] } prefix_list_keys = { "ipv4": [ "distribute_list.ipv4.prefix_list.in", "distribute_list.ipv4.prefix_list.out", ], "ipv6":[ "distribute_list.ipv6.prefix_list.in", "distribute_list.ipv6.prefix_list.out", ] } for address_family in ["ipv4", "ipv6"]: for iface_key in babel["distribute_list"].get(address_family, {}).get("interface", {}).keys(): acl_keys[address_family].extend([ f"distribute_list.{address_family}.interface.{iface_key}.access_list.in", f"distribute_list.{address_family}.interface.{iface_key}.access_list.out" ]) prefix_list_keys[address_family].extend([ f"distribute_list.{address_family}.interface.{iface_key}.prefix_list.in", f"distribute_list.{address_family}.interface.{iface_key}.prefix_list.out" ]) for address_family, keys in acl_keys.items(): for key in keys: acl = dict_search(key, babel) if acl: verify_access_list(acl, babel, version='6' if address_family == 'ipv6' else '') for address_family, keys in prefix_list_keys.items(): for key in keys: prefix_list = dict_search(key, babel) if prefix_list: verify_prefix_list(prefix_list, babel, version='6' if address_family == 'ipv6' else '') -def generate(babel): - if not babel or 'deleted' in babel: - return None - - babel['new_frr_config'] = render_to_string('frr/babeld.frr.j2', babel) - return None - -def apply(babel): - # Save original configuration prior to starting any commit actions - frr_cfg = frr.FRRConfig() - - frr_cfg.load_configuration(frr.babel_daemon) - frr_cfg.modify_section('^router babel', stop_pattern='^exit', remove_stop_mark=True) - - for key in ['interface', 'interface_removed']: - if key not in babel: - continue - for interface in babel[key]: - frr_cfg.modify_section(f'^interface {interface}', stop_pattern='^exit', remove_stop_mark=True) - - if 'new_frr_config' in babel: - frr_cfg.add_before(frr.default_add_before, babel['new_frr_config']) - frr_cfg.commit_configuration(frr.babel_daemon) +def generate(config_dict): + frrender.generate(config_dict) +def apply(config_dict): + frrender.apply() return None if __name__ == '__main__': try: c = get_config() verify(c) generate(c) apply(c) except ConfigError as e: print(e) exit(1) diff --git a/src/conf_mode/protocols_bfd.py b/src/conf_mode/protocols_bfd.py index d94ec6a0d..a0d7fdfb5 100755 --- a/src/conf_mode/protocols_bfd.py +++ b/src/conf_mode/protocols_bfd.py @@ -1,110 +1,96 @@ #!/usr/bin/env python3 # # Copyright (C) 2019-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from vyos.config import Config +from vyos.configdict import get_frrender_dict from vyos.configverify import verify_vrf +from vyos.configverify import has_frr_protocol_in_dict +from vyos.frrender import FRRender from vyos.template import is_ipv6 -from vyos.template import render_to_string from vyos.utils.network import is_ipv6_link_local from vyos import ConfigError -from vyos import frr from vyos import airbag airbag.enable() +frrender = FRRender() + def get_config(config=None): if config: conf = config else: conf = Config() - base = ['protocols', 'bfd'] - bfd = conf.get_config_dict(base, key_mangling=('-', '_'), - get_first_key=True, - no_tag_node_value_mangle=True) - # Bail out early if configuration tree does not exist - if not conf.exists(base): - return bfd - - bfd = conf.merge_defaults(bfd, recursive=True) - return bfd + return get_frrender_dict(conf) -def verify(bfd): - if not bfd: +def verify(config_dict): + if not has_frr_protocol_in_dict(config_dict, 'bfd'): return None + bfd = config_dict['bfd'] if 'peer' in bfd: for peer, peer_config in bfd['peer'].items(): # IPv6 link local peers require an explicit local address/interface if is_ipv6_link_local(peer): if 'source' not in peer_config or len(peer_config['source']) < 2: raise ConfigError('BFD IPv6 link-local peers require explicit local address and interface setting') # IPv6 peers require an explicit local address if is_ipv6(peer): if 'source' not in peer_config or 'address' not in peer_config['source']: raise ConfigError('BFD IPv6 peers require explicit local address setting') if 'multihop' in peer_config: # multihop require source address if 'source' not in peer_config or 'address' not in peer_config['source']: raise ConfigError('BFD multihop require source address') # multihop and echo-mode cannot be used together if 'echo_mode' in peer_config: raise ConfigError('BFD multihop and echo-mode cannot be used together') # multihop doesn't accept interface names if 'source' in peer_config and 'interface' in peer_config['source']: raise ConfigError('BFD multihop and source interface cannot be used together') if 'minimum_ttl' in peer_config and 'multihop' not in peer_config: raise ConfigError('Minimum TTL is only available for multihop BFD sessions!') if 'profile' in peer_config: profile_name = peer_config['profile'] if 'profile' not in bfd or profile_name not in bfd['profile']: raise ConfigError(f'BFD profile "{profile_name}" does not exist!') if 'vrf' in peer_config: verify_vrf(peer_config) return None -def generate(bfd): - if not bfd: - return None - bfd['new_frr_config'] = render_to_string('frr/bfdd.frr.j2', bfd) - -def apply(bfd): - # Save original configuration prior to starting any commit actions - frr_cfg = frr.FRRConfig() - frr_cfg.load_configuration(frr.bfd_daemon) - frr_cfg.modify_section('^bfd', stop_pattern='^exit', remove_stop_mark=True) - if 'new_frr_config' in bfd: - frr_cfg.add_before(frr.default_add_before, bfd['new_frr_config']) - frr_cfg.commit_configuration(frr.bfd_daemon) +def generate(config_dict): + frrender.generate(config_dict) +def apply(config_dict): + frrender.apply() return None if __name__ == '__main__': try: c = get_config() verify(c) generate(c) apply(c) except ConfigError as e: print(e) exit(1) diff --git a/src/conf_mode/protocols_bgp.py b/src/conf_mode/protocols_bgp.py index e5c46aee6..989cf9b5c 100755 --- a/src/conf_mode/protocols_bgp.py +++ b/src/conf_mode/protocols_bgp.py @@ -1,653 +1,575 @@ #!/usr/bin/env python3 # # Copyright (C) 2020-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from sys import exit from sys import argv from vyos.base import Warning from vyos.config import Config -from vyos.configdict import dict_merge -from vyos.configdict import node_changed +from vyos.configdict import get_frrender_dict +from vyos.configverify import has_frr_protocol_in_dict from vyos.configverify import verify_prefix_list from vyos.configverify import verify_route_map from vyos.configverify import verify_vrf +from vyos.frrender import FRRender from vyos.template import is_ip from vyos.template import is_interface -from vyos.template import render_to_string from vyos.utils.dict import dict_search from vyos.utils.network import get_interface_vrf from vyos.utils.network import is_addr_assigned from vyos.utils.process import process_named_running -from vyos.utils.process import call from vyos import ConfigError -from vyos import frr from vyos import airbag airbag.enable() +frrender = FRRender() + +vrf = None +if len(argv) > 1: + vrf = argv[1] + def get_config(config=None): if config: conf = config else: conf = Config() - vrf = None - if len(argv) > 1: - vrf = argv[1] - - base_path = ['protocols', 'bgp'] - - # eqivalent of the C foo ? 'a' : 'b' statement - base = vrf and ['vrf', 'name', vrf, 'protocols', 'bgp'] or base_path - bgp = conf.get_config_dict(base, key_mangling=('-', '_'), - get_first_key=True, no_tag_node_value_mangle=True) - - bgp['dependent_vrfs'] = conf.get_config_dict(['vrf', 'name'], - key_mangling=('-', '_'), - get_first_key=True, - no_tag_node_value_mangle=True) - - # Remove per interface MPLS configuration - get a list if changed - # nodes under the interface tagNode - interfaces_removed = node_changed(conf, base + ['interface']) - if interfaces_removed: - bgp['interface_removed'] = list(interfaces_removed) - - # Assign the name of our VRF context. This MUST be done before the return - # statement below, else on deletion we will delete the default instance - # instead of the VRF instance. - if vrf: - bgp.update({'vrf' : vrf}) - # We can not delete the BGP VRF instance if there is a L3VNI configured - # FRR L3VNI must be deleted first otherwise we will see error: - # "FRR error: Please unconfigure l3vni 3000" - tmp = ['vrf', 'name', vrf, 'vni'] - if conf.exists_effective(tmp): - bgp.update({'vni' : conf.return_effective_value(tmp)}) - # We can safely delete ourself from the dependent vrf list - if vrf in bgp['dependent_vrfs']: - del bgp['dependent_vrfs'][vrf] - - bgp['dependent_vrfs'].update({'default': {'protocols': { - 'bgp': conf.get_config_dict(base_path, key_mangling=('-', '_'), - get_first_key=True, - no_tag_node_value_mangle=True)}}}) - - if not conf.exists(base): - # If bgp instance is deleted then mark it - bgp.update({'deleted' : ''}) - return bgp - - # We have gathered the dict representation of the CLI, but there are default - # options which we need to update into the dictionary retrived. - bgp = conf.merge_defaults(bgp, recursive=True) - - # We also need some additional information from the config, prefix-lists - # and route-maps for instance. They will be used in verify(). - # - # XXX: one MUST always call this without the key_mangling() option! See - # vyos.configverify.verify_common_route_maps() for more information. - tmp = conf.get_config_dict(['policy']) - # Merge policy dict into "regular" config dict - bgp = dict_merge(tmp, bgp) - - return bgp - + return get_frrender_dict(conf) def verify_vrf_as_import(search_vrf_name: str, afi_name: str, vrfs_config: dict) -> bool: """ :param search_vrf_name: search vrf name in import list :type search_vrf_name: str :param afi_name: afi/safi name :type afi_name: str :param vrfs_config: configuration dependents vrfs :type vrfs_config: dict :return: if vrf in import list retrun true else false :rtype: bool """ for vrf_name, vrf_config in vrfs_config.items(): import_list = dict_search( f'protocols.bgp.address_family.{afi_name}.import.vrf', vrf_config) if import_list: if search_vrf_name in import_list: return True return False def verify_vrf_import_options(afi_config: dict) -> bool: """ Search if afi contains one of options :param afi_config: afi/safi :type afi_config: dict :return: if vrf contains rd and route-target options return true else false :rtype: bool """ options = [ f'rd.vpn.export', f'route_target.vpn.import', f'route_target.vpn.export', f'route_target.vpn.both' ] for option in options: if dict_search(option, afi_config): return True return False def verify_vrf_import(vrf_name: str, vrfs_config: dict, afi_name: str) -> bool: """ Verify if vrf exists and contain options :param vrf_name: name of VRF :type vrf_name: str :param vrfs_config: dependent vrfs config :type vrfs_config: dict :param afi_name: afi/safi name :type afi_name: str :return: if vrf contains rd and route-target options return true else false :rtype: bool """ if vrf_name != 'default': verify_vrf({'vrf': vrf_name}) if dict_search(f'{vrf_name}.protocols.bgp.address_family.{afi_name}', vrfs_config): afi_config = \ vrfs_config[vrf_name]['protocols']['bgp']['address_family'][ afi_name] if verify_vrf_import_options(afi_config): return True return False def verify_vrflist_import(afi_name: str, afi_config: dict, vrfs_config: dict) -> bool: """ Call function to verify if scpecific vrf contains rd and route-target options return true else false :param afi_name: afi/safi name :type afi_name: str :param afi_config: afi/safi configuration :type afi_config: dict :param vrfs_config: dependent vrfs config :type vrfs_config:dict :return: if vrf contains rd and route-target options return true else false :rtype: bool """ for vrf_name in afi_config['import']['vrf']: if verify_vrf_import(vrf_name, vrfs_config, afi_name): return True return False def verify_remote_as(peer_config, bgp_config): if 'remote_as' in peer_config: return peer_config['remote_as'] if 'peer_group' in peer_config: peer_group_name = peer_config['peer_group'] tmp = dict_search(f'peer_group.{peer_group_name}.remote_as', bgp_config) if tmp: return tmp if 'interface' in peer_config: if 'remote_as' in peer_config['interface']: return peer_config['interface']['remote_as'] if 'peer_group' in peer_config['interface']: peer_group_name = peer_config['interface']['peer_group'] tmp = dict_search(f'peer_group.{peer_group_name}.remote_as', bgp_config) if tmp: return tmp if 'v6only' in peer_config['interface']: if 'remote_as' in peer_config['interface']['v6only']: return peer_config['interface']['v6only']['remote_as'] if 'peer_group' in peer_config['interface']['v6only']: peer_group_name = peer_config['interface']['v6only']['peer_group'] tmp = dict_search(f'peer_group.{peer_group_name}.remote_as', bgp_config) if tmp: return tmp return None def verify_afi(peer_config, bgp_config): # If address_family configured under neighboor if 'address_family' in peer_config: return True # If address_family configured under peer-group # if neighbor interface configured peer_group_name = None if dict_search('interface.peer_group', peer_config): peer_group_name = peer_config['interface']['peer_group'] elif dict_search('interface.v6only.peer_group', peer_config): peer_group_name = peer_config['interface']['v6only']['peer_group'] # if neighbor IP configured. if 'peer_group' in peer_config: peer_group_name = peer_config['peer_group'] if peer_group_name: tmp = dict_search(f'peer_group.{peer_group_name}.address_family', bgp_config) if tmp: return True return False -def verify(bgp): +def verify(config_dict): + global vrf + if not has_frr_protocol_in_dict(config_dict, 'bgp', vrf): + return None + + # eqivalent of the C foo ? 'a' : 'b' statement + bgp = vrf and config_dict['vrf']['name'][vrf]['protocols']['bgp'] or config_dict['bgp'] + bgp['policy'] = config_dict['policy'] + + if vrf: + bgp['vrf'] = vrf + if 'deleted' in bgp: if 'vrf' in bgp: # Cannot delete vrf if it exists in import vrf list in other vrfs for tmp_afi in ['ipv4_unicast', 'ipv6_unicast']: if verify_vrf_as_import(bgp['vrf'], tmp_afi, bgp['dependent_vrfs']): raise ConfigError(f'Cannot delete VRF instance "{bgp["vrf"]}", ' \ 'unconfigure "import vrf" commands!') else: # We are running in the default VRF context, thus we can not delete # our main BGP instance if there are dependent BGP VRF instances. if 'dependent_vrfs' in bgp: for vrf, vrf_options in bgp['dependent_vrfs'].items(): if vrf != 'default': if dict_search('protocols.bgp', vrf_options): - raise ConfigError('Cannot delete default BGP instance, ' \ - 'dependent VRF instance(s) exist(s)!') + dependent_vrfs = ', '.join(bgp['dependent_vrfs'].keys()) + raise ConfigError(f'Cannot delete default BGP instance, ' \ + f'dependent VRF instance(s): {dependent_vrfs}') if 'vni' in vrf_options: raise ConfigError('Cannot delete default BGP instance, ' \ 'dependent L3VNI exists!') return None if 'system_as' not in bgp: raise ConfigError('BGP system-as number must be defined!') # Verify BMP if 'bmp' in bgp: # check bmp flag "bgpd -d -F traditional --daemon -A 127.0.0.1 -M rpki -M bmp" if not process_named_running('bgpd', 'bmp'): raise ConfigError( f'"bmp" flag is not found in bgpd. Configure "set system frr bmp" and restart bgp process' ) # check bmp target if 'target' in bgp['bmp']: for target, target_config in bgp['bmp']['target'].items(): if 'address' not in target_config: raise ConfigError(f'BMP target "{target}" address must be defined!') # Verify vrf on interface and bgp section if 'interface' in bgp: for interface in bgp['interface']: error_msg = f'Interface "{interface}" belongs to different VRF instance' tmp = get_interface_vrf(interface) if 'vrf' in bgp: if bgp['vrf'] != tmp: vrf = bgp['vrf'] raise ConfigError(f'{error_msg} "{vrf}"!') elif tmp != 'default': raise ConfigError(f'{error_msg} "{tmp}"!') peer_groups_context = dict() # Common verification for both peer-group and neighbor statements for neighbor in ['neighbor', 'peer_group']: # bail out early if there is no neighbor or peer-group statement # this also saves one indention level if neighbor not in bgp: continue for peer, peer_config in bgp[neighbor].items(): # Only regular "neighbor" statement can have a peer-group set # Check if the configure peer-group exists if 'peer_group' in peer_config: peer_group = peer_config['peer_group'] if 'peer_group' not in bgp or peer_group not in bgp['peer_group']: raise ConfigError(f'Specified peer-group "{peer_group}" for '\ f'neighbor "{neighbor}" does not exist!') if 'remote_as' in peer_config: is_ibgp = True if peer_config['remote_as'] != 'internal' and \ peer_config['remote_as'] != bgp['system_as']: is_ibgp = False if peer_group not in peer_groups_context: peer_groups_context[peer_group] = is_ibgp elif peer_groups_context[peer_group] != is_ibgp: raise ConfigError(f'Peer-group members must be ' f'all internal or all external') if 'local_role' in peer_config: #Ensure Local Role has only one value. if len(peer_config['local_role']) > 1: raise ConfigError(f'Only one local role can be specified for peer "{peer}"!') if 'local_as' in peer_config: if len(peer_config['local_as']) > 1: raise ConfigError(f'Only one local-as number can be specified for peer "{peer}"!') # Neighbor local-as override can not be the same as the local-as # we use for this BGP instane! asn = list(peer_config['local_as'].keys())[0] if asn == bgp['system_as']: raise ConfigError('Cannot have local-as same as system-as number') # Neighbor AS specified for local-as and remote-as can not be the same if dict_search('remote_as', peer_config) == asn and neighbor != 'peer_group': raise ConfigError(f'Neighbor "{peer}" has local-as specified which is '\ 'the same as remote-as, this is not allowed!') # ttl-security and ebgp-multihop can't be used in the same configration if 'ebgp_multihop' in peer_config and 'ttl_security' in peer_config: raise ConfigError('You can not set both ebgp-multihop and ttl-security hops') # interface and ebgp-multihop can't be used in the same configration if 'ebgp_multihop' in peer_config and 'interface' in peer_config: raise ConfigError(f'Ebgp-multihop can not be used with directly connected '\ f'neighbor "{peer}"') # Check if neighbor has both override capability and strict capability match # configured at the same time. if 'override_capability' in peer_config and 'strict_capability_match' in peer_config: raise ConfigError(f'Neighbor "{peer}" cannot have both override-capability and '\ 'strict-capability-match configured at the same time!') # Check spaces in the password if 'password' in peer_config and ' ' in peer_config['password']: raise ConfigError('Whitespace is not allowed in passwords!') # Some checks can/must only be done on a neighbor and not a peer-group if neighbor == 'neighbor': # remote-as must be either set explicitly for the neighbor # or for the entire peer-group if not verify_remote_as(peer_config, bgp): raise ConfigError(f'Neighbor "{peer}" remote-as must be set!') if not verify_afi(peer_config, bgp): Warning(f'BGP neighbor "{peer}" requires address-family!') # Peer-group member cannot override remote-as of peer-group if 'peer_group' in peer_config: peer_group = peer_config['peer_group'] if 'remote_as' in peer_config and 'remote_as' in bgp['peer_group'][peer_group]: raise ConfigError(f'Peer-group member "{peer}" cannot override remote-as of peer-group "{peer_group}"!') if 'interface' in peer_config: if 'peer_group' in peer_config['interface']: peer_group = peer_config['interface']['peer_group'] if 'remote_as' in peer_config['interface'] and 'remote_as' in bgp['peer_group'][peer_group]: raise ConfigError(f'Peer-group member "{peer}" cannot override remote-as of peer-group "{peer_group}"!') if 'v6only' in peer_config['interface']: if 'peer_group' in peer_config['interface']['v6only']: peer_group = peer_config['interface']['v6only']['peer_group'] if 'remote_as' in peer_config['interface']['v6only'] and 'remote_as' in bgp['peer_group'][peer_group]: raise ConfigError(f'Peer-group member "{peer}" cannot override remote-as of peer-group "{peer_group}"!') # Only checks for ipv4 and ipv6 neighbors # Check if neighbor address is assigned as system interface address vrf = None vrf_error_msg = f' in default VRF!' if 'vrf' in bgp: vrf = bgp['vrf'] vrf_error_msg = f' in VRF "{vrf}"!' if is_ip(peer) and is_addr_assigned(peer, vrf): raise ConfigError(f'Can not configure local address as neighbor "{peer}"{vrf_error_msg}') elif is_interface(peer): if 'peer_group' in peer_config: raise ConfigError(f'peer-group must be set under the interface node of "{peer}"') if 'remote_as' in peer_config: raise ConfigError(f'remote-as must be set under the interface node of "{peer}"') if 'source_interface' in peer_config['interface']: raise ConfigError(f'"source-interface" option not allowed for neighbor "{peer}"') # Local-AS allowed only for EBGP peers if 'local_as' in peer_config: remote_as = verify_remote_as(peer_config, bgp) if remote_as == bgp['system_as']: raise ConfigError(f'local-as configured for "{peer}", allowed only for eBGP peers!') for afi in ['ipv4_unicast', 'ipv4_multicast', 'ipv4_labeled_unicast', 'ipv4_flowspec', 'ipv6_unicast', 'ipv6_multicast', 'ipv6_labeled_unicast', 'ipv6_flowspec', 'l2vpn_evpn']: # Bail out early if address family is not configured if 'address_family' not in peer_config or afi not in peer_config['address_family']: continue # Check if neighbor has both ipv4 unicast and ipv4 labeled unicast configured at the same time. if 'ipv4_unicast' in peer_config['address_family'] and 'ipv4_labeled_unicast' in peer_config['address_family']: raise ConfigError(f'Neighbor "{peer}" cannot have both ipv4-unicast and ipv4-labeled-unicast configured at the same time!') # Check if neighbor has both ipv6 unicast and ipv6 labeled unicast configured at the same time. if 'ipv6_unicast' in peer_config['address_family'] and 'ipv6_labeled_unicast' in peer_config['address_family']: raise ConfigError(f'Neighbor "{peer}" cannot have both ipv6-unicast and ipv6-labeled-unicast configured at the same time!') afi_config = peer_config['address_family'][afi] if 'conditionally_advertise' in afi_config: if 'advertise_map' not in afi_config['conditionally_advertise']: raise ConfigError('Must speficy advertise-map when conditionally-advertise is in use!') # Verify advertise-map (which is a route-map) exists verify_route_map(afi_config['conditionally_advertise']['advertise_map'], bgp) if ('exist_map' not in afi_config['conditionally_advertise'] and 'non_exist_map' not in afi_config['conditionally_advertise']): raise ConfigError('Must either speficy exist-map or non-exist-map when ' \ 'conditionally-advertise is in use!') if {'exist_map', 'non_exist_map'} <= set(afi_config['conditionally_advertise']): raise ConfigError('Can not specify both exist-map and non-exist-map for ' \ 'conditionally-advertise!') if 'exist_map' in afi_config['conditionally_advertise']: verify_route_map(afi_config['conditionally_advertise']['exist_map'], bgp) if 'non_exist_map' in afi_config['conditionally_advertise']: verify_route_map(afi_config['conditionally_advertise']['non_exist_map'], bgp) # T4332: bgp deterministic-med cannot be disabled while addpath-tx-bestpath-per-AS is in use if 'addpath_tx_per_as' in afi_config: if dict_search('parameters.deterministic_med', bgp) == None: raise ConfigError('addpath-tx-per-as requires BGP deterministic-med paramtere to be set!') # Validate if configured Prefix list exists if 'prefix_list' in afi_config: for tmp in ['import', 'export']: if tmp not in afi_config['prefix_list']: # bail out early continue if afi == 'ipv4_unicast': verify_prefix_list(afi_config['prefix_list'][tmp], bgp) elif afi == 'ipv6_unicast': verify_prefix_list(afi_config['prefix_list'][tmp], bgp, version='6') if 'route_map' in afi_config: for tmp in ['import', 'export']: if tmp in afi_config['route_map']: verify_route_map(afi_config['route_map'][tmp], bgp) if 'route_reflector_client' in afi_config: peer_group_as = peer_config.get('remote_as') if peer_group_as is None or (peer_group_as != 'internal' and peer_group_as != bgp['system_as']): raise ConfigError('route-reflector-client only supported for iBGP peers') else: if 'peer_group' in peer_config: peer_group_as = dict_search(f'peer_group.{peer_group}.remote_as', bgp) if peer_group_as is None or (peer_group_as != 'internal' and peer_group_as != bgp['system_as']): raise ConfigError('route-reflector-client only supported for iBGP peers') # T5833 not all AFIs are supported for VRF if 'vrf' in bgp and 'address_family' in peer_config: unsupported_vrf_afi = { 'ipv4_flowspec', 'ipv6_flowspec', 'ipv4_labeled_unicast', 'ipv6_labeled_unicast', 'ipv4_vpn', 'ipv6_vpn', } for afi in peer_config['address_family']: if afi in unsupported_vrf_afi: raise ConfigError( f"VRF is not allowed for address-family '{afi.replace('_', '-')}'" ) # Throw an error if a peer group is not configured for allow range for prefix in dict_search('listen.range', bgp) or []: # we can not use dict_search() here as prefix contains dots ... if 'peer_group' not in bgp['listen']['range'][prefix]: raise ConfigError(f'Listen range for prefix "{prefix}" has no peer group configured.') peer_group = bgp['listen']['range'][prefix]['peer_group'] if 'peer_group' not in bgp or peer_group not in bgp['peer_group']: raise ConfigError(f'Peer-group "{peer_group}" for listen range "{prefix}" does not exist!') if not verify_remote_as(bgp['listen']['range'][prefix], bgp): raise ConfigError(f'Peer-group "{peer_group}" requires remote-as to be set!') # Throw an error if the global administrative distance parameters aren't all filled out. if dict_search('parameters.distance.global', bgp) != None: for key in ['external', 'internal', 'local']: if dict_search(f'parameters.distance.global.{key}', bgp) == None: raise ConfigError('Missing mandatory configuration option for '\ f'global administrative distance {key}!') # TCP keepalive requires all three parameters to be set if dict_search('parameters.tcp_keepalive', bgp) != None: if not {'idle', 'interval', 'probes'} <= set(bgp['parameters']['tcp_keepalive']): raise ConfigError('TCP keepalive incomplete - idle, keepalive and probes must be set') # Address Family specific validation if 'address_family' in bgp: for afi, afi_config in bgp['address_family'].items(): if 'distance' in afi_config: # Throw an error if the address family specific administrative # distance parameters aren't all filled out. for key in ['external', 'internal', 'local']: if key not in afi_config['distance']: raise ConfigError('Missing mandatory configuration option for '\ f'{afi} administrative distance {key}!') if afi in ['ipv4_unicast', 'ipv6_unicast']: vrf_name = bgp['vrf'] if dict_search('vrf', bgp) else 'default' # Verify if currant VRF contains rd and route-target options # and does not exist in import list in other VRFs if dict_search(f'rd.vpn.export', afi_config): if verify_vrf_as_import(vrf_name, afi, bgp['dependent_vrfs']): raise ConfigError( 'Command "import vrf" conflicts with "rd vpn export" command!') if not dict_search('parameters.router_id', bgp): Warning(f'BGP "router-id" is required when using "rd" and "route-target"!') if dict_search('route_target.vpn.both', afi_config): if verify_vrf_as_import(vrf_name, afi, bgp['dependent_vrfs']): raise ConfigError( 'Command "import vrf" conflicts with "route-target vpn both" command!') if dict_search('route_target.vpn.export', afi_config): raise ConfigError( 'Command "route-target vpn export" conflicts '\ 'with "route-target vpn both" command!') if dict_search('route_target.vpn.import', afi_config): raise ConfigError( 'Command "route-target vpn import" conflicts '\ 'with "route-target vpn both" command!') if dict_search('route_target.vpn.import', afi_config): if verify_vrf_as_import(vrf_name, afi, bgp['dependent_vrfs']): raise ConfigError( 'Command "import vrf conflicts" with "route-target vpn import" command!') if dict_search('route_target.vpn.export', afi_config): if verify_vrf_as_import(vrf_name, afi, bgp['dependent_vrfs']): raise ConfigError( 'Command "import vrf" conflicts with "route-target vpn export" command!') # Verify if VRFs in import do not contain rd # and route-target options if dict_search('import.vrf', afi_config) is not None: # Verify if VRF with import does not contain rd # and route-target options if verify_vrf_import_options(afi_config): raise ConfigError( 'Please unconfigure "import vrf" commands before using vpn commands in the same VRF!') # Verify if VRFs in import list do not contain rd # and route-target options if verify_vrflist_import(afi, afi_config, bgp['dependent_vrfs']): raise ConfigError( 'Please unconfigure import vrf commands before using vpn commands in dependent VRFs!') # FRR error: please unconfigure vpn to vrf commands before # using import vrf commands if 'vpn' in afi_config['import'] or dict_search('export.vpn', afi_config) != None: raise ConfigError('Please unconfigure VPN to VRF commands before '\ 'using "import vrf" commands!') # Verify that the export/import route-maps do exist for export_import in ['export', 'import']: tmp = dict_search(f'route_map.vpn.{export_import}', afi_config) if tmp: verify_route_map(tmp, bgp) # per-vrf sid and per-af sid are mutually exclusive if 'sid' in afi_config and 'sid' in bgp: raise ConfigError('SID per VRF and SID per address-family are mutually exclusive!') # Checks only required for L2VPN EVPN if afi in ['l2vpn_evpn']: if 'vni' in afi_config: for vni, vni_config in afi_config['vni'].items(): if 'rd' in vni_config and 'advertise_all_vni' not in afi_config: raise ConfigError('BGP EVPN "rd" requires "advertise-all-vni" to be set!') if 'route_target' in vni_config and 'advertise_all_vni' not in afi_config: raise ConfigError('BGP EVPN "route-target" requires "advertise-all-vni" to be set!') return None -def generate(bgp): - if not bgp or 'deleted' in bgp: - return None - - bgp['frr_bgpd_config'] = render_to_string('frr/bgpd.frr.j2', bgp) - return None - -def apply(bgp): - if 'deleted' in bgp: - # We need to ensure that the L3VNI is deleted first. - # This is not possible with old config backend - # priority bug - if {'vrf', 'vni'} <= set(bgp): - call('vtysh -c "conf t" -c "vrf {vrf}" -c "no vni {vni}"'.format(**bgp)) - - # Save original configuration prior to starting any commit actions - frr_cfg = frr.FRRConfig() - - # Generate empty helper string which can be ammended to FRR commands, it - # will be either empty (default VRF) or contain the "vrf <name" statement - vrf = '' - if 'vrf' in bgp: - vrf = ' vrf ' + bgp['vrf'] - - frr_cfg.load_configuration(frr.bgp_daemon) - - # Remove interface specific config - for key in ['interface', 'interface_removed']: - if key not in bgp: - continue - for interface in bgp[key]: - frr_cfg.modify_section(f'^interface {interface}', stop_pattern='^exit', remove_stop_mark=True) - - frr_cfg.modify_section(f'^router bgp \d+{vrf}', stop_pattern='^exit', remove_stop_mark=True) - if 'frr_bgpd_config' in bgp: - frr_cfg.add_before(frr.default_add_before, bgp['frr_bgpd_config']) - frr_cfg.commit_configuration(frr.bgp_daemon) +def generate(config_dict): + frrender.generate(config_dict) +def apply(config_dict): + frrender.apply() return None if __name__ == '__main__': try: c = get_config() verify(c) generate(c) apply(c) except ConfigError as e: print(e) exit(1) diff --git a/src/conf_mode/protocols_eigrp.py b/src/conf_mode/protocols_eigrp.py index a948b47da..5d60e6bfd 100755 --- a/src/conf_mode/protocols_eigrp.py +++ b/src/conf_mode/protocols_eigrp.py @@ -1,117 +1,72 @@ #!/usr/bin/env python3 # # Copyright (C) 2022-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from sys import exit from sys import argv from vyos.config import Config -from vyos.configdict import dict_merge +from vyos.configdict import get_frrender_dict +from vyos.configverify import has_frr_protocol_in_dict from vyos.configverify import verify_vrf -from vyos.template import render_to_string +from vyos.frrender import FRRender from vyos import ConfigError -from vyos import frr from vyos import airbag airbag.enable() +frrender = FRRender() +vrf = None +if len(argv) > 1: + vrf = argv[1] def get_config(config=None): if config: conf = config else: conf = Config() - vrf = None - if len(argv) > 1: - vrf = argv[1] + return get_frrender_dict(conf) - base_path = ['protocols', 'eigrp'] +def verify(config_dict): + global vrf + if not has_frr_protocol_in_dict(config_dict, 'eigrp', vrf): + return None # eqivalent of the C foo ? 'a' : 'b' statement - base = vrf and ['vrf', 'name', vrf, 'protocols', 'eigrp'] or base_path - eigrp = conf.get_config_dict(base, key_mangling=('-', '_'), - get_first_key=True, no_tag_node_value_mangle=True) - - # Assign the name of our VRF context. This MUST be done before the return - # statement below, else on deletion we will delete the default instance - # instead of the VRF instance. - if vrf: eigrp.update({'vrf' : vrf}) - - if not conf.exists(base): - eigrp.update({'deleted' : ''}) - if not vrf: - # We are running in the default VRF context, thus we can not delete - # our main EIGRP instance if there are dependent EIGRP VRF instances. - eigrp['dependent_vrfs'] = conf.get_config_dict(['vrf', 'name'], - key_mangling=('-', '_'), - get_first_key=True, - no_tag_node_value_mangle=True) - - return eigrp - - # We also need some additional information from the config, prefix-lists - # and route-maps for instance. They will be used in verify(). - # - # XXX: one MUST always call this without the key_mangling() option! See - # vyos.configverify.verify_common_route_maps() for more information. - tmp = conf.get_config_dict(['policy']) - # Merge policy dict into "regular" config dict - eigrp = dict_merge(tmp, eigrp) - - return eigrp - -def verify(eigrp): - if not eigrp or 'deleted' in eigrp: - return + eigrp = vrf and config_dict['vrf']['name'][vrf]['protocols']['eigrp'] or config_dict['eigrp'] + eigrp['policy'] = config_dict['policy'] if 'system_as' not in eigrp: raise ConfigError('EIGRP system-as must be defined!') if 'vrf' in eigrp: verify_vrf(eigrp) -def generate(eigrp): - if not eigrp or 'deleted' in eigrp: - return None - - eigrp['frr_eigrpd_config'] = render_to_string('frr/eigrpd.frr.j2', eigrp) - -def apply(eigrp): - # Save original configuration prior to starting any commit actions - frr_cfg = frr.FRRConfig() - - # Generate empty helper string which can be ammended to FRR commands, it - # will be either empty (default VRF) or contain the "vrf <name" statement - vrf = '' - if 'vrf' in eigrp: - vrf = ' vrf ' + eigrp['vrf'] - - frr_cfg.load_configuration(frr.eigrp_daemon) - frr_cfg.modify_section(f'^router eigrp \d+{vrf}', stop_pattern='^exit', remove_stop_mark=True) - if 'frr_eigrpd_config' in eigrp: - frr_cfg.add_before(frr.default_add_before, eigrp['frr_eigrpd_config']) - frr_cfg.commit_configuration(frr.eigrp_daemon) +def generate(config_dict): + frrender.generate(config_dict) +def apply(config_dict): + frrender.apply() return None if __name__ == '__main__': try: c = get_config() verify(c) generate(c) apply(c) except ConfigError as e: print(e) exit(1) diff --git a/src/conf_mode/protocols_isis.py b/src/conf_mode/protocols_isis.py index 9b70c7329..e812770bc 100755 --- a/src/conf_mode/protocols_isis.py +++ b/src/conf_mode/protocols_isis.py @@ -1,310 +1,298 @@ #!/usr/bin/env python3 # # Copyright (C) 2020-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from sys import exit from sys import argv from vyos.config import Config -from vyos.configdict import dict_merge -from vyos.configdict import node_changed +from vyos.configdict import get_frrender_dict +from vyos.configverify import has_frr_protocol_in_dict from vyos.configverify import verify_common_route_maps from vyos.configverify import verify_interface_exists +from vyos.frrender import FRRender from vyos.ifconfig import Interface from vyos.utils.dict import dict_search from vyos.utils.network import get_interface_config -from vyos.template import render_to_string from vyos import ConfigError -from vyos import frr from vyos import airbag airbag.enable() +frrender = FRRender() + +vrf = None +if len(argv) > 1: + vrf = argv[1] + def get_config(config=None): if config: conf = config else: conf = Config() - vrf = None - if len(argv) > 1: - vrf = argv[1] + return get_frrender_dict(conf) - base_path = ['protocols', 'isis'] - # eqivalent of the C foo ? 'a' : 'b' statement - base = vrf and ['vrf', 'name', vrf, 'protocols', 'isis'] or base_path - isis = conf.get_config_dict(base, key_mangling=('-', '_'), - get_first_key=True, - no_tag_node_value_mangle=True) - - # Assign the name of our VRF context. This MUST be done before the return - # statement below, else on deletion we will delete the default instance - # instead of the VRF instance. - if vrf: isis['vrf'] = vrf - - # FRR has VRF support for different routing daemons. As interfaces belong - # to VRFs - or the global VRF, we need to check for changed interfaces so - # that they will be properly rendered for the FRR config. Also this eases - # removal of interfaces from the running configuration. - interfaces_removed = node_changed(conf, base + ['interface']) - if interfaces_removed: - isis['interface_removed'] = list(interfaces_removed) - - # Bail out early if configuration tree does no longer exist. this must - # be done after retrieving the list of interfaces to be removed. - if not conf.exists(base): - isis.update({'deleted' : ''}) - return isis - - # merge in default values - isis = conf.merge_defaults(isis, recursive=True) - - # We also need some additional information from the config, prefix-lists - # and route-maps for instance. They will be used in verify(). - # - # XXX: one MUST always call this without the key_mangling() option! See - # vyos.configverify.verify_common_route_maps() for more information. - tmp = conf.get_config_dict(['policy']) - # Merge policy dict into "regular" config dict - isis = dict_merge(tmp, isis) + # base_path = ['protocols', 'isis'] + + # # eqivalent of the C foo ? 'a' : 'b' statement + # base = vrf and ['vrf', 'name', vrf, 'protocols', 'isis'] or base_path + # isis = conf.get_config_dict(base, key_mangling=('-', '_'), + # get_first_key=True, + # no_tag_node_value_mangle=True) + + # # Assign the name of our VRF context. This MUST be done before the return + # # statement below, else on deletion we will delete the default instance + # # instead of the VRF instance. + # if vrf: isis['vrf'] = vrf + + # # FRR has VRF support for different routing daemons. As interfaces belong + # # to VRFs - or the global VRF, we need to check for changed interfaces so + # # that they will be properly rendered for the FRR config. Also this eases + # # removal of interfaces from the running configuration. + # interfaces_removed = node_changed(conf, base + ['interface']) + # if interfaces_removed: + # isis['interface_removed'] = list(interfaces_removed) + + # # Bail out early if configuration tree does no longer exist. this must + # # be done after retrieving the list of interfaces to be removed. + # if not conf.exists(base): + # isis.update({'deleted' : ''}) + # return isis + + # # merge in default values + # isis = conf.merge_defaults(isis, recursive=True) + + # # We also need some additional information from the config, prefix-lists + # # and route-maps for instance. They will be used in verify(). + # # + # # XXX: one MUST always call this without the key_mangling() option! See + # # vyos.configverify.verify_common_route_maps() for more information. + # tmp = conf.get_config_dict(['policy']) + # # Merge policy dict into "regular" config dict + # isis = dict_merge(tmp, isis) return isis -def verify(isis): - # bail out early - looks like removal from running config - if not isis or 'deleted' in isis: +def verify(config_dict): + global vrf + if not has_frr_protocol_in_dict(config_dict, 'isis', vrf): return None + # eqivalent of the C foo ? 'a' : 'b' statement + isis = vrf and config_dict['vrf']['name'][vrf]['protocols']['isis'] or config_dict['isis'] + isis['policy'] = config_dict['policy'] + + if 'deleted' in isis: + return None + + if vrf: + isis['vrf'] = vrf + if 'net' not in isis: raise ConfigError('Network entity is mandatory!') # last byte in IS-IS area address must be 0 tmp = isis['net'].split('.') if int(tmp[-1]) != 0: raise ConfigError('Last byte of IS-IS network entity title must always be 0!') verify_common_route_maps(isis) # If interface not set if 'interface' not in isis: raise ConfigError('Interface used for routing updates is mandatory!') for interface in isis['interface']: verify_interface_exists(isis, interface) # Interface MTU must be >= configured lsp-mtu mtu = Interface(interface).get_mtu() area_mtu = isis['lsp_mtu'] # Recommended maximum PDU size = interface MTU - 3 bytes recom_area_mtu = mtu - 3 if mtu < int(area_mtu) or int(area_mtu) > recom_area_mtu: raise ConfigError(f'Interface {interface} has MTU {mtu}, ' \ f'current area MTU is {area_mtu}! \n' \ f'Recommended area lsp-mtu {recom_area_mtu} or less ' \ '(calculated on MTU size).') if 'vrf' in isis: # If interface specific options are set, we must ensure that the # interface is bound to our requesting VRF. Due to the VyOS # priorities the interface is bound to the VRF after creation of # the VRF itself, and before any routing protocol is configured. vrf = isis['vrf'] tmp = get_interface_config(interface) if 'master' not in tmp or tmp['master'] != vrf: raise ConfigError(f'Interface "{interface}" is not a member of VRF "{vrf}"!') # If md5 and plaintext-password set at the same time for password in ['area_password', 'domain_password']: if password in isis: if {'md5', 'plaintext_password'} <= set(isis[password]): tmp = password.replace('_', '-') raise ConfigError(f'Can use either md5 or plaintext-password for {tmp}!') # If one param from delay set, but not set others if 'spf_delay_ietf' in isis: required_timers = ['holddown', 'init_delay', 'long_delay', 'short_delay', 'time_to_learn'] exist_timers = [] for elm_timer in required_timers: if elm_timer in isis['spf_delay_ietf']: exist_timers.append(elm_timer) exist_timers = set(required_timers).difference(set(exist_timers)) if len(exist_timers) > 0: raise ConfigError('All types of spf-delay must be configured. Missing: ' + ', '.join(exist_timers).replace('_', '-')) # If Redistribute set, but level don't set if 'redistribute' in isis: proc_level = isis.get('level','').replace('-','_') for afi in ['ipv4', 'ipv6']: if afi not in isis['redistribute']: continue for proto, proto_config in isis['redistribute'][afi].items(): if 'level_1' not in proto_config and 'level_2' not in proto_config: raise ConfigError(f'Redistribute level-1 or level-2 should be specified in ' \ f'"protocols isis redistribute {afi} {proto}"!') for redistr_level, redistr_config in proto_config.items(): if proc_level and proc_level != 'level_1_2' and proc_level != redistr_level: raise ConfigError(f'"protocols isis redistribute {afi} {proto} {redistr_level}" ' \ f'can not be used with \"protocols isis level {proc_level}\"!') # Segment routing checks if dict_search('segment_routing.global_block', isis): g_high_label_value = dict_search('segment_routing.global_block.high_label_value', isis) g_low_label_value = dict_search('segment_routing.global_block.low_label_value', isis) # If segment routing global block high or low value is blank, throw error if not (g_low_label_value or g_high_label_value): raise ConfigError('Segment routing global-block requires both low and high value!') # If segment routing global block low value is higher than the high value, throw error if int(g_low_label_value) > int(g_high_label_value): raise ConfigError('Segment routing global-block low value must be lower than high value') if dict_search('segment_routing.local_block', isis): if dict_search('segment_routing.global_block', isis) == None: raise ConfigError('Segment routing local-block requires global-block to be configured!') l_high_label_value = dict_search('segment_routing.local_block.high_label_value', isis) l_low_label_value = dict_search('segment_routing.local_block.low_label_value', isis) # If segment routing local-block high or low value is blank, throw error if not (l_low_label_value or l_high_label_value): raise ConfigError('Segment routing local-block requires both high and low value!') # If segment routing local-block low value is higher than the high value, throw error if int(l_low_label_value) > int(l_high_label_value): raise ConfigError('Segment routing local-block low value must be lower than high value') # local-block most live outside global block global_range = range(int(g_low_label_value), int(g_high_label_value) +1) local_range = range(int(l_low_label_value), int(l_high_label_value) +1) # Check for overlapping ranges if list(set(global_range) & set(local_range)): raise ConfigError(f'Segment-Routing Global Block ({g_low_label_value}/{g_high_label_value}) '\ f'conflicts with Local Block ({l_low_label_value}/{l_high_label_value})!') # Check for a blank or invalid value per prefix if dict_search('segment_routing.prefix', isis): for prefix, prefix_config in isis['segment_routing']['prefix'].items(): if 'absolute' in prefix_config: if prefix_config['absolute'].get('value') is None: raise ConfigError(f'Segment routing prefix {prefix} absolute value cannot be blank.') elif 'index' in prefix_config: if prefix_config['index'].get('value') is None: raise ConfigError(f'Segment routing prefix {prefix} index value cannot be blank.') # Check for explicit-null and no-php-flag configured at the same time per prefix if dict_search('segment_routing.prefix', isis): for prefix, prefix_config in isis['segment_routing']['prefix'].items(): if 'absolute' in prefix_config: if ("explicit_null" in prefix_config['absolute']) and ("no_php_flag" in prefix_config['absolute']): raise ConfigError(f'Segment routing prefix {prefix} cannot have both explicit-null '\ f'and no-php-flag configured at the same time.') elif 'index' in prefix_config: if ("explicit_null" in prefix_config['index']) and ("no_php_flag" in prefix_config['index']): raise ConfigError(f'Segment routing prefix {prefix} cannot have both explicit-null '\ f'and no-php-flag configured at the same time.') # Check for index ranges being larger than the segment routing global block if dict_search('segment_routing.global_block', isis): g_high_label_value = dict_search('segment_routing.global_block.high_label_value', isis) g_low_label_value = dict_search('segment_routing.global_block.low_label_value', isis) g_label_difference = int(g_high_label_value) - int(g_low_label_value) if dict_search('segment_routing.prefix', isis): for prefix, prefix_config in isis['segment_routing']['prefix'].items(): if 'index' in prefix_config: index_size = isis['segment_routing']['prefix'][prefix]['index']['value'] if int(index_size) > int(g_label_difference): raise ConfigError(f'Segment routing prefix {prefix} cannot have an '\ f'index base size larger than the SRGB label base.') # Check for LFA tiebreaker index duplication if dict_search('fast_reroute.lfa.local.tiebreaker', isis): comparison_dictionary = {} for item, item_options in isis['fast_reroute']['lfa']['local']['tiebreaker'].items(): for index, index_options in item_options.items(): for index_value, index_value_options in index_options.items(): if index_value not in comparison_dictionary.keys(): comparison_dictionary[index_value] = [item] else: comparison_dictionary[index_value].append(item) for index, index_length in comparison_dictionary.items(): if int(len(index_length)) > 1: raise ConfigError(f'LFA index {index} cannot have more than one tiebreaker configured.') # Check for LFA priority-limit configured multiple times per level if dict_search('fast_reroute.lfa.local.priority_limit', isis): comparison_dictionary = {} for priority, priority_options in isis['fast_reroute']['lfa']['local']['priority_limit'].items(): for level, level_options in priority_options.items(): if level not in comparison_dictionary.keys(): comparison_dictionary[level] = [priority] else: comparison_dictionary[level].append(priority) for level, level_length in comparison_dictionary.items(): if int(len(level_length)) > 1: raise ConfigError(f'LFA priority-limit on {level.replace("_", "-")} cannot have more than one priority configured.') # Check for LFA remote prefix list configured with more than one list if dict_search('fast_reroute.lfa.remote.prefix_list', isis): if int(len(isis['fast_reroute']['lfa']['remote']['prefix_list'].items())) > 1: raise ConfigError(f'LFA remote prefix-list has more than one configured. Cannot have more than one configured.') return None -def generate(isis): - if not isis or 'deleted' in isis: - return None - - isis['frr_isisd_config'] = render_to_string('frr/isisd.frr.j2', isis) - return None - -def apply(isis): - # Save original configuration prior to starting any commit actions - frr_cfg = frr.FRRConfig() - - # Generate empty helper string which can be ammended to FRR commands, it - # will be either empty (default VRF) or contain the "vrf <name" statement - vrf = '' - if 'vrf' in isis: - vrf = ' vrf ' + isis['vrf'] - - frr_cfg.load_configuration(frr.isis_daemon) - frr_cfg.modify_section(f'^router isis VyOS{vrf}', stop_pattern='^exit', remove_stop_mark=True) - - for key in ['interface', 'interface_removed']: - if key not in isis: - continue - for interface in isis[key]: - frr_cfg.modify_section(f'^interface {interface}', stop_pattern='^exit', remove_stop_mark=True) - - if 'frr_isisd_config' in isis: - frr_cfg.add_before(frr.default_add_before, isis['frr_isisd_config']) - - frr_cfg.commit_configuration(frr.isis_daemon) +def generate(config_dict): + frrender.generate(config_dict) +def apply(config_dict): + frrender.apply() return None if __name__ == '__main__': try: c = get_config() verify(c) generate(c) apply(c) except ConfigError as e: print(e) exit(1) diff --git a/src/conf_mode/protocols_mpls.py b/src/conf_mode/protocols_mpls.py index 4a05b8044..2a691f4a4 100755 --- a/src/conf_mode/protocols_mpls.py +++ b/src/conf_mode/protocols_mpls.py @@ -1,146 +1,138 @@ #!/usr/bin/env python3 # -# Copyright (C) 2020-2022 VyOS maintainers and contributors +# Copyright (C) 2020-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import os from sys import exit from glob import glob from vyos.config import Config -from vyos.template import render_to_string +from vyos.configdict import get_frrender_dict +from vyos.configverify import has_frr_protocol_in_dict +from vyos.frrender import FRRender from vyos.utils.dict import dict_search from vyos.utils.file import read_file from vyos.utils.system import sysctl_write from vyos.configverify import verify_interface_exists from vyos import ConfigError -from vyos import frr from vyos import airbag airbag.enable() -config_file = r'/tmp/ldpd.frr' +frrender = FRRender() def get_config(config=None): if config: conf = config else: conf = Config() - base = ['protocols', 'mpls'] - mpls = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True) - return mpls + return get_frrender_dict(conf) -def verify(mpls): - # If no config, then just bail out early. - if not mpls: +def verify(config_dict): + if not has_frr_protocol_in_dict(config_dict, 'mpls'): return None + mpls = config_dict['mpls'] + if 'interface' in mpls: for interface in mpls['interface']: verify_interface_exists(mpls, interface) # Checks to see if LDP is properly configured if 'ldp' in mpls: # If router ID not defined if 'router_id' not in mpls['ldp']: raise ConfigError('Router ID missing. An LDP router id is mandatory!') # If interface not set if 'interface' not in mpls['ldp']: raise ConfigError('LDP interfaces are missing. An LDP interface is mandatory!') # If transport addresses are not set if not dict_search('ldp.discovery.transport_ipv4_address', mpls) and \ not dict_search('ldp.discovery.transport_ipv6_address', mpls): raise ConfigError('LDP transport address missing!') return None -def generate(mpls): - # If there's no MPLS config generated, create dictionary key with no value. - if not mpls or 'deleted' in mpls: - return None +def generate(config_dict): + frrender.generate(config_dict) - mpls['frr_ldpd_config'] = render_to_string('frr/ldpd.frr.j2', mpls) - return None +def apply(config_dict): + frrender.apply() -def apply(mpls): - # Save original configuration prior to starting any commit actions - frr_cfg = frr.FRRConfig() - - frr_cfg.load_configuration(frr.ldpd_daemon) - frr_cfg.modify_section(f'^mpls ldp', stop_pattern='^exit', remove_stop_mark=True) + if not has_frr_protocol_in_dict(config_dict, 'mpls'): + return None - if 'frr_ldpd_config' in mpls: - frr_cfg.add_before(frr.default_add_before, mpls['frr_ldpd_config']) - frr_cfg.commit_configuration(frr.ldpd_daemon) + mpls = config_dict['mpls'] # Set number of entries in the platform label tables labels = '0' if 'interface' in mpls: labels = '1048575' sysctl_write('net.mpls.platform_labels', labels) # Check for changes in global MPLS options if 'parameters' in mpls: # Choose whether to copy IP TTL to MPLS header TTL if 'no_propagate_ttl' in mpls['parameters']: sysctl_write('net.mpls.ip_ttl_propagate', 0) # Choose whether to limit maximum MPLS header TTL if 'maximum_ttl' in mpls['parameters']: ttl = mpls['parameters']['maximum_ttl'] sysctl_write('net.mpls.default_ttl', ttl) else: # Set default global MPLS options if not defined. sysctl_write('net.mpls.ip_ttl_propagate', 1) sysctl_write('net.mpls.default_ttl', 255) # Enable and disable MPLS processing on interfaces per configuration if 'interface' in mpls: system_interfaces = [] # Populate system interfaces list with local MPLS capable interfaces for interface in glob('/proc/sys/net/mpls/conf/*'): system_interfaces.append(os.path.basename(interface)) # This is where the comparison is done on if an interface needs to be enabled/disabled. for system_interface in system_interfaces: interface_state = read_file(f'/proc/sys/net/mpls/conf/{system_interface}/input') if '1' in interface_state: if system_interface not in mpls['interface']: system_interface = system_interface.replace('.', '/') sysctl_write(f'net.mpls.conf.{system_interface}.input', 0) elif '0' in interface_state: if system_interface in mpls['interface']: system_interface = system_interface.replace('.', '/') sysctl_write(f'net.mpls.conf.{system_interface}.input', 1) else: system_interfaces = [] # If MPLS interfaces are not configured, set MPLS processing disabled for interface in glob('/proc/sys/net/mpls/conf/*'): system_interfaces.append(os.path.basename(interface)) for system_interface in system_interfaces: system_interface = system_interface.replace('.', '/') sysctl_write(f'net.mpls.conf.{system_interface}.input', 0) return None if __name__ == '__main__': try: c = get_config() verify(c) generate(c) apply(c) except ConfigError as e: print(e) exit(1) diff --git a/src/conf_mode/protocols_openfabric.py b/src/conf_mode/protocols_openfabric.py index b60117a02..3dc06ee68 100644 --- a/src/conf_mode/protocols_openfabric.py +++ b/src/conf_mode/protocols_openfabric.py @@ -1,143 +1,108 @@ #!/usr/bin/env python3 # # Copyright (C) 2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from sys import exit from vyos.base import Warning from vyos.config import Config -from vyos.configdict import node_changed +from vyos.configdict import get_frrender_dict from vyos.configverify import verify_interface_exists -from vyos.template import render_to_string +from vyos.configverify import has_frr_protocol_in_dict +from vyos.frrender import FRRender from vyos import ConfigError -from vyos import frr from vyos import airbag airbag.enable() +frrender = FRRender() def get_config(config=None): if config: conf = config else: conf = Config() - base_path = ['protocols', 'openfabric'] + return get_frrender_dict(conf) - openfabric = conf.get_config_dict(base_path, key_mangling=('-', '_'), - get_first_key=True, - no_tag_node_value_mangle=True) - - # Remove per domain MPLS configuration - get a list of all changed Openfabric domains - # (removed and added) so that they will be properly rendered for the FRR config. - openfabric['domains_all'] = list(conf.list_nodes(' '.join(base_path) + f' domain') + - node_changed(conf, base_path + ['domain'])) - - # Get a list of all interfaces - openfabric['interfaces_all'] = [] - for domain in openfabric['domains_all']: - interfaces_modified = list(node_changed(conf, base_path + ['domain', domain, 'interface']) + - conf.list_nodes(' '.join(base_path) + f' domain {domain} interface')) - openfabric['interfaces_all'].extend(interfaces_modified) - - if not conf.exists(base_path): - openfabric.update({'deleted': ''}) - - return openfabric +def verify(config_dict): + if not has_frr_protocol_in_dict(config_dict, 'openfabric'): + return None -def verify(openfabric): - # bail out early - looks like removal from running config - if not openfabric or 'deleted' in openfabric: + openfabric = config_dict['openfabric'] + if 'deleted' in openfabric: return None if 'net' not in openfabric: raise ConfigError('Network entity is mandatory!') # last byte in OpenFabric area address must be 0 tmp = openfabric['net'].split('.') if int(tmp[-1]) != 0: raise ConfigError('Last byte of OpenFabric network entity title must always be 0!') if 'domain' not in openfabric: raise ConfigError('OpenFabric domain name is mandatory!') interfaces_used = [] for domain, domain_config in openfabric['domain'].items(): # If interface not set if 'interface' not in domain_config: raise ConfigError(f'Interface used for routing updates in OpenFabric "{domain}" is mandatory!') for iface, iface_config in domain_config['interface'].items(): verify_interface_exists(openfabric, iface) # interface can be activated only on one OpenFabric instance if iface in interfaces_used: raise ConfigError(f'Interface {iface} is already used in different OpenFabric instance!') if 'address_family' not in iface_config or len(iface_config['address_family']) < 1: raise ConfigError(f'Need to specify address family for the interface "{iface}"!') # If md5 and plaintext-password set at the same time if 'password' in iface_config: if {'md5', 'plaintext_password'} <= set(iface_config['password']): raise ConfigError(f'Can use either md5 or plaintext-password for password for the interface!') if iface == 'lo' and 'passive' not in iface_config: Warning('For loopback interface passive mode is implied!') interfaces_used.append(iface) # If md5 and plaintext-password set at the same time password = 'domain_password' if password in domain_config: if {'md5', 'plaintext_password'} <= set(domain_config[password]): raise ConfigError(f'Can use either md5 or plaintext-password for domain-password!') return None -def generate(openfabric): - if not openfabric or 'deleted' in openfabric: - return None - - openfabric['frr_fabricd_config'] = render_to_string('frr/fabricd.frr.j2', openfabric) - return None - -def apply(openfabric): - # Save original configuration prior to starting any commit actions - frr_cfg = frr.FRRConfig() - - frr_cfg.load_configuration(frr.openfabric_daemon) - for domain in openfabric['domains_all']: - frr_cfg.modify_section(f'^router openfabric {domain}', stop_pattern='^exit', remove_stop_mark=True) - - for interface in openfabric['interfaces_all']: - frr_cfg.modify_section(f'^interface {interface}', stop_pattern='^exit', remove_stop_mark=True) - - if 'frr_fabricd_config' in openfabric: - frr_cfg.add_before(frr.default_add_before, openfabric['frr_fabricd_config']) - - frr_cfg.commit_configuration(frr.openfabric_daemon) +def generate(config_dict): + frrender.generate(config_dict) +def apply(config_dict): + frrender.apply() return None if __name__ == '__main__': try: c = get_config() verify(c) generate(c) apply(c) except ConfigError as e: print(e) exit(1) diff --git a/src/conf_mode/protocols_ospf.py b/src/conf_mode/protocols_ospf.py index 44817b9b7..32ad5e497 100755 --- a/src/conf_mode/protocols_ospf.py +++ b/src/conf_mode/protocols_ospf.py @@ -1,288 +1,195 @@ #!/usr/bin/env python3 # # Copyright (C) 2021-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from sys import exit from sys import argv from vyos.config import Config -from vyos.config import config_dict_merge -from vyos.configdict import dict_merge -from vyos.configdict import node_changed +from vyos.configdict import get_frrender_dict from vyos.configverify import verify_common_route_maps from vyos.configverify import verify_route_map from vyos.configverify import verify_interface_exists from vyos.configverify import verify_access_list -from vyos.template import render_to_string +from vyos.configverify import has_frr_protocol_in_dict +from vyos.frrender import FRRender from vyos.utils.dict import dict_search from vyos.utils.network import get_interface_config from vyos import ConfigError -from vyos import frr from vyos import airbag airbag.enable() +frrender = FRRender() + +vrf = None +if len(argv) > 1: + vrf = argv[1] def get_config(config=None): if config: conf = config else: conf = Config() - vrf = None - if len(argv) > 1: - vrf = argv[1] + return get_frrender_dict(conf) - base_path = ['protocols', 'ospf'] +def verify(config_dict): + global vrf + if not has_frr_protocol_in_dict(config_dict, 'ospf', vrf): + return None # eqivalent of the C foo ? 'a' : 'b' statement - base = vrf and ['vrf', 'name', vrf, 'protocols', 'ospf'] or base_path - ospf = conf.get_config_dict(base, key_mangling=('-', '_'), - get_first_key=True) - - # Assign the name of our VRF context. This MUST be done before the return - # statement below, else on deletion we will delete the default instance - # instead of the VRF instance. - if vrf: ospf['vrf'] = vrf - - # FRR has VRF support for different routing daemons. As interfaces belong - # to VRFs - or the global VRF, we need to check for changed interfaces so - # that they will be properly rendered for the FRR config. Also this eases - # removal of interfaces from the running configuration. - interfaces_removed = node_changed(conf, base + ['interface']) - if interfaces_removed: - ospf['interface_removed'] = list(interfaces_removed) - - # Bail out early if configuration tree does no longer exist. this must - # be done after retrieving the list of interfaces to be removed. - if not conf.exists(base): - ospf.update({'deleted' : ''}) - return ospf - - # We have gathered the dict representation of the CLI, but there are default - # options which we need to update into the dictionary retrived. - default_values = conf.get_config_defaults(**ospf.kwargs, recursive=True) - - # We have to cleanup the default dict, as default values could enable features - # which are not explicitly enabled on the CLI. Example: default-information - # originate comes with a default metric-type of 2, which will enable the - # entire default-information originate tree, even when not set via CLI so we - # need to check this first and probably drop that key. - if dict_search('default_information.originate', ospf) is None: - del default_values['default_information'] - if 'mpls_te' not in ospf: - del default_values['mpls_te'] - if 'graceful_restart' not in ospf: - del default_values['graceful_restart'] - for area_num in default_values.get('area', []): - if dict_search(f'area.{area_num}.area_type.nssa', ospf) is None: - del default_values['area'][area_num]['area_type']['nssa'] - - for protocol in ['babel', 'bgp', 'connected', 'isis', 'kernel', 'rip', 'static']: - if dict_search(f'redistribute.{protocol}', ospf) is None: - del default_values['redistribute'][protocol] - if not bool(default_values['redistribute']): - del default_values['redistribute'] - - for interface in ospf.get('interface', []): - # We need to reload the defaults on every pass b/c of - # hello-multiplier dependency on dead-interval - # If hello-multiplier is set, we need to remove the default from - # dead-interval. - if 'hello_multiplier' in ospf['interface'][interface]: - del default_values['interface'][interface]['dead_interval'] - - ospf = config_dict_merge(default_values, ospf) - - # We also need some additional information from the config, prefix-lists - # and route-maps for instance. They will be used in verify(). - # - # XXX: one MUST always call this without the key_mangling() option! See - # vyos.configverify.verify_common_route_maps() for more information. - tmp = conf.get_config_dict(['policy']) - # Merge policy dict into "regular" config dict - ospf = dict_merge(tmp, ospf) - - return ospf - -def verify(ospf): - if not ospf: - return None + ospf = vrf and config_dict['vrf']['name'][vrf]['protocols']['ospf'] or config_dict['ospf'] + ospf['policy'] = config_dict['policy'] verify_common_route_maps(ospf) # As we can have a default-information route-map, we need to validate it! route_map_name = dict_search('default_information.originate.route_map', ospf) if route_map_name: verify_route_map(route_map_name, ospf) # Validate if configured Access-list exists if 'area' in ospf: networks = [] for area, area_config in ospf['area'].items(): if 'import_list' in area_config: acl_import = area_config['import_list'] if acl_import: verify_access_list(acl_import, ospf) if 'export_list' in area_config: acl_export = area_config['export_list'] if acl_export: verify_access_list(acl_export, ospf) if 'network' in area_config: for network in area_config['network']: if network in networks: raise ConfigError(f'Network "{network}" already defined in different area!') networks.append(network) if 'interface' in ospf: for interface, interface_config in ospf['interface'].items(): verify_interface_exists(ospf, interface) # One can not use dead-interval and hello-multiplier at the same # time. FRR will only activate the last option set via CLI. if {'hello_multiplier', 'dead_interval'} <= set(interface_config): raise ConfigError(f'Can not use hello-multiplier and dead-interval ' \ f'concurrently for {interface}!') # One can not use the "network <prefix> area <id>" command and an # per interface area assignment at the same time. FRR will error # out using: "Please remove all network commands first." if 'area' in ospf and 'area' in interface_config: for area, area_config in ospf['area'].items(): if 'network' in area_config: raise ConfigError('Can not use OSPF interface area and area ' \ 'network configuration at the same time!') # If interface specific options are set, we must ensure that the # interface is bound to our requesting VRF. Due to the VyOS # priorities the interface is bound to the VRF after creation of # the VRF itself, and before any routing protocol is configured. - if 'vrf' in ospf: - vrf = ospf['vrf'] + if vrf: tmp = get_interface_config(interface) if 'master' not in tmp or tmp['master'] != vrf: raise ConfigError(f'Interface "{interface}" is not a member of VRF "{vrf}"!') # Segment routing checks if dict_search('segment_routing.global_block', ospf): g_high_label_value = dict_search('segment_routing.global_block.high_label_value', ospf) g_low_label_value = dict_search('segment_routing.global_block.low_label_value', ospf) # If segment routing global block high or low value is blank, throw error if not (g_low_label_value or g_high_label_value): raise ConfigError('Segment routing global-block requires both low and high value!') # If segment routing global block low value is higher than the high value, throw error if int(g_low_label_value) > int(g_high_label_value): raise ConfigError('Segment routing global-block low value must be lower than high value') if dict_search('segment_routing.local_block', ospf): if dict_search('segment_routing.global_block', ospf) == None: raise ConfigError('Segment routing local-block requires global-block to be configured!') l_high_label_value = dict_search('segment_routing.local_block.high_label_value', ospf) l_low_label_value = dict_search('segment_routing.local_block.low_label_value', ospf) # If segment routing local-block high or low value is blank, throw error if not (l_low_label_value or l_high_label_value): raise ConfigError('Segment routing local-block requires both high and low value!') # If segment routing local-block low value is higher than the high value, throw error if int(l_low_label_value) > int(l_high_label_value): raise ConfigError('Segment routing local-block low value must be lower than high value') # local-block most live outside global block global_range = range(int(g_low_label_value), int(g_high_label_value) +1) local_range = range(int(l_low_label_value), int(l_high_label_value) +1) # Check for overlapping ranges if list(set(global_range) & set(local_range)): raise ConfigError(f'Segment-Routing Global Block ({g_low_label_value}/{g_high_label_value}) '\ f'conflicts with Local Block ({l_low_label_value}/{l_high_label_value})!') # Check for a blank or invalid value per prefix if dict_search('segment_routing.prefix', ospf): for prefix, prefix_config in ospf['segment_routing']['prefix'].items(): if 'index' in prefix_config: if prefix_config['index'].get('value') is None: raise ConfigError(f'Segment routing prefix {prefix} index value cannot be blank.') # Check for explicit-null and no-php-flag configured at the same time per prefix if dict_search('segment_routing.prefix', ospf): for prefix, prefix_config in ospf['segment_routing']['prefix'].items(): if 'index' in prefix_config: if ("explicit_null" in prefix_config['index']) and ("no_php_flag" in prefix_config['index']): raise ConfigError(f'Segment routing prefix {prefix} cannot have both explicit-null '\ f'and no-php-flag configured at the same time.') # Check for index ranges being larger than the segment routing global block if dict_search('segment_routing.global_block', ospf): g_high_label_value = dict_search('segment_routing.global_block.high_label_value', ospf) g_low_label_value = dict_search('segment_routing.global_block.low_label_value', ospf) g_label_difference = int(g_high_label_value) - int(g_low_label_value) if dict_search('segment_routing.prefix', ospf): for prefix, prefix_config in ospf['segment_routing']['prefix'].items(): if 'index' in prefix_config: index_size = ospf['segment_routing']['prefix'][prefix]['index']['value'] if int(index_size) > int(g_label_difference): raise ConfigError(f'Segment routing prefix {prefix} cannot have an '\ f'index base size larger than the SRGB label base.') # Check route summarisation if 'summary_address' in ospf: for prefix, prefix_options in ospf['summary_address'].items(): if {'tag', 'no_advertise'} <= set(prefix_options): raise ConfigError(f'Can not set both "tag" and "no-advertise" for Type-5 '\ f'and Type-7 route summarisation of "{prefix}"!') return None -def generate(ospf): - if not ospf or 'deleted' in ospf: - return None - - ospf['frr_ospfd_config'] = render_to_string('frr/ospfd.frr.j2', ospf) - return None - -def apply(ospf): - # Save original configuration prior to starting any commit actions - frr_cfg = frr.FRRConfig() - - # Generate empty helper string which can be ammended to FRR commands, it - # will be either empty (default VRF) or contain the "vrf <name" statement - vrf = '' - if 'vrf' in ospf: - vrf = ' vrf ' + ospf['vrf'] - - frr_cfg.load_configuration(frr.ospf_daemon) - frr_cfg.modify_section(f'^router ospf{vrf}', stop_pattern='^exit', remove_stop_mark=True) - - for key in ['interface', 'interface_removed']: - if key not in ospf: - continue - for interface in ospf[key]: - frr_cfg.modify_section(f'^interface {interface}', stop_pattern='^exit', remove_stop_mark=True) - - if 'frr_ospfd_config' in ospf: - frr_cfg.add_before(frr.default_add_before, ospf['frr_ospfd_config']) - - frr_cfg.commit_configuration(frr.ospf_daemon) +def generate(config_dict): + frrender.generate(config_dict) +def apply(config_dict): + frrender.apply() return None if __name__ == '__main__': try: c = get_config() verify(c) generate(c) apply(c) except ConfigError as e: print(e) exit(1) diff --git a/src/conf_mode/protocols_ospfv3.py b/src/conf_mode/protocols_ospfv3.py index 7bdab3017..038fcd2b4 100755 --- a/src/conf_mode/protocols_ospfv3.py +++ b/src/conf_mode/protocols_ospfv3.py @@ -1,189 +1,107 @@ #!/usr/bin/env python3 # # Copyright (C) 2021-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from sys import exit from sys import argv from vyos.config import Config -from vyos.config import config_dict_merge -from vyos.configdict import dict_merge -from vyos.configdict import node_changed +from vyos.configdict import get_frrender_dict from vyos.configverify import verify_common_route_maps from vyos.configverify import verify_route_map from vyos.configverify import verify_interface_exists -from vyos.template import render_to_string +from vyos.configverify import has_frr_protocol_in_dict +from vyos.frrender import FRRender from vyos.ifconfig import Interface from vyos.utils.dict import dict_search from vyos.utils.network import get_interface_config from vyos import ConfigError -from vyos import frr from vyos import airbag airbag.enable() +frrender = FRRender() + +vrf = None +if len(argv) > 1: + vrf = argv[1] + def get_config(config=None): if config: conf = config else: conf = Config() - vrf = None - if len(argv) > 1: - vrf = argv[1] + return get_frrender_dict(conf) - base_path = ['protocols', 'ospfv3'] +def verify(config_dict): + global vrf + if not has_frr_protocol_in_dict(config_dict, 'ospfv3', vrf): + return None # eqivalent of the C foo ? 'a' : 'b' statement - base = vrf and ['vrf', 'name', vrf, 'protocols', 'ospfv3'] or base_path - ospfv3 = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True) - - # Assign the name of our VRF context. This MUST be done before the return - # statement below, else on deletion we will delete the default instance - # instead of the VRF instance. - if vrf: ospfv3['vrf'] = vrf - - # FRR has VRF support for different routing daemons. As interfaces belong - # to VRFs - or the global VRF, we need to check for changed interfaces so - # that they will be properly rendered for the FRR config. Also this eases - # removal of interfaces from the running configuration. - interfaces_removed = node_changed(conf, base + ['interface']) - if interfaces_removed: - ospfv3['interface_removed'] = list(interfaces_removed) - - # Bail out early if configuration tree does no longer exist. this must - # be done after retrieving the list of interfaces to be removed. - if not conf.exists(base): - ospfv3.update({'deleted' : ''}) - return ospfv3 - - # We have gathered the dict representation of the CLI, but there are default - # options which we need to update into the dictionary retrived. - default_values = conf.get_config_defaults(**ospfv3.kwargs, - recursive=True) - - # We have to cleanup the default dict, as default values could enable features - # which are not explicitly enabled on the CLI. Example: default-information - # originate comes with a default metric-type of 2, which will enable the - # entire default-information originate tree, even when not set via CLI so we - # need to check this first and probably drop that key. - if dict_search('default_information.originate', ospfv3) is None: - del default_values['default_information'] - if 'graceful_restart' not in ospfv3: - del default_values['graceful_restart'] - - for protocol in ['babel', 'bgp', 'connected', 'isis', 'kernel', 'ripng', 'static']: - if dict_search(f'redistribute.{protocol}', ospfv3) is None: - del default_values['redistribute'][protocol] - if not bool(default_values['redistribute']): - del default_values['redistribute'] - - default_values.pop('interface', {}) - - # merge in remaining default values - ospfv3 = config_dict_merge(default_values, ospfv3) - - # We also need some additional information from the config, prefix-lists - # and route-maps for instance. They will be used in verify(). - # - # XXX: one MUST always call this without the key_mangling() option! See - # vyos.configverify.verify_common_route_maps() for more information. - tmp = conf.get_config_dict(['policy']) - # Merge policy dict into "regular" config dict - ospfv3 = dict_merge(tmp, ospfv3) - - return ospfv3 - -def verify(ospfv3): - if not ospfv3: - return None + ospfv3 = vrf and config_dict['vrf']['name'][vrf]['protocols']['ospfv3'] or config_dict['ospfv3'] + ospfv3['policy'] = config_dict['policy'] verify_common_route_maps(ospfv3) # As we can have a default-information route-map, we need to validate it! route_map_name = dict_search('default_information.originate.route_map', ospfv3) if route_map_name: verify_route_map(route_map_name, ospfv3) if 'area' in ospfv3: for area, area_config in ospfv3['area'].items(): if 'area_type' in area_config: if len(area_config['area_type']) > 1: raise ConfigError(f'Can only configure one area-type for OSPFv3 area "{area}"!') if 'range' in area_config: for range, range_config in area_config['range'].items(): if {'not_advertise', 'advertise'} <= range_config.keys(): raise ConfigError(f'"not-advertise" and "advertise" for "range {range}" cannot be both configured at the same time!') if 'interface' in ospfv3: for interface, interface_config in ospfv3['interface'].items(): verify_interface_exists(ospfv3, interface) if 'ifmtu' in interface_config: mtu = Interface(interface).get_mtu() if int(interface_config['ifmtu']) > int(mtu): raise ConfigError(f'OSPFv3 ifmtu can not exceed physical MTU of "{mtu}"') # If interface specific options are set, we must ensure that the # interface is bound to our requesting VRF. Due to the VyOS # priorities the interface is bound to the VRF after creation of # the VRF itself, and before any routing protocol is configured. - if 'vrf' in ospfv3: - vrf = ospfv3['vrf'] + if vrf: tmp = get_interface_config(interface) if 'master' not in tmp or tmp['master'] != vrf: raise ConfigError(f'Interface "{interface}" is not a member of VRF "{vrf}"!') return None -def generate(ospfv3): - if not ospfv3 or 'deleted' in ospfv3: - return None - - ospfv3['new_frr_config'] = render_to_string('frr/ospf6d.frr.j2', ospfv3) - return None - -def apply(ospfv3): - # Save original configuration prior to starting any commit actions - frr_cfg = frr.FRRConfig() - - # Generate empty helper string which can be ammended to FRR commands, it - # will be either empty (default VRF) or contain the "vrf <name" statement - vrf = '' - if 'vrf' in ospfv3: - vrf = ' vrf ' + ospfv3['vrf'] - - frr_cfg.load_configuration(frr.ospf6_daemon) - frr_cfg.modify_section(f'^router ospf6{vrf}', stop_pattern='^exit', remove_stop_mark=True) - - for key in ['interface', 'interface_removed']: - if key not in ospfv3: - continue - for interface in ospfv3[key]: - frr_cfg.modify_section(f'^interface {interface}', stop_pattern='^exit', remove_stop_mark=True) - - if 'new_frr_config' in ospfv3: - frr_cfg.add_before(frr.default_add_before, ospfv3['new_frr_config']) - - frr_cfg.commit_configuration(frr.ospf6_daemon) +def generate(config_dict): + frrender.generate(config_dict) +def apply(config_dict): + frrender.apply() return None if __name__ == '__main__': try: c = get_config() verify(c) generate(c) apply(c) except ConfigError as e: print(e) exit(1) diff --git a/src/conf_mode/protocols_rip.py b/src/conf_mode/protocols_rip.py index 994007137..3862530a2 100755 --- a/src/conf_mode/protocols_rip.py +++ b/src/conf_mode/protocols_rip.py @@ -1,133 +1,87 @@ #!/usr/bin/env python3 # # Copyright (C) 2021-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from sys import exit from vyos.config import Config -from vyos.configdict import dict_merge -from vyos.configdict import node_changed +from vyos.configdict import get_frrender_dict +from vyos.configverify import has_frr_protocol_in_dict from vyos.configverify import verify_common_route_maps from vyos.configverify import verify_access_list from vyos.configverify import verify_prefix_list +from vyos.frrender import FRRender from vyos.utils.dict import dict_search -from vyos.template import render_to_string from vyos import ConfigError -from vyos import frr from vyos import airbag airbag.enable() +frrender = FRRender() + def get_config(config=None): if config: conf = config else: conf = Config() - base = ['protocols', 'rip'] - rip = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True) - - # FRR has VRF support for different routing daemons. As interfaces belong - # to VRFs - or the global VRF, we need to check for changed interfaces so - # that they will be properly rendered for the FRR config. Also this eases - # removal of interfaces from the running configuration. - interfaces_removed = node_changed(conf, base + ['interface']) - if interfaces_removed: - rip['interface_removed'] = list(interfaces_removed) - - # Bail out early if configuration tree does not exist - if not conf.exists(base): - rip.update({'deleted' : ''}) - return rip - - # We have gathered the dict representation of the CLI, but there are default - # options which we need to update into the dictionary retrived. - rip = conf.merge_defaults(rip, recursive=True) - - # We also need some additional information from the config, prefix-lists - # and route-maps for instance. They will be used in verify(). - # - # XXX: one MUST always call this without the key_mangling() option! See - # vyos.configverify.verify_common_route_maps() for more information. - tmp = conf.get_config_dict(['policy']) - # Merge policy dict into "regular" config dict - rip = dict_merge(tmp, rip) - - return rip - -def verify(rip): - if not rip: + + return get_frrender_dict(conf) + +def verify(config_dict): + if not has_frr_protocol_in_dict(config_dict, 'rip'): return None + rip = config_dict['rip'] + rip['policy'] = config_dict['policy'] + verify_common_route_maps(rip) acl_in = dict_search('distribute_list.access_list.in', rip) if acl_in: verify_access_list(acl_in, rip) acl_out = dict_search('distribute_list.access_list.out', rip) if acl_out: verify_access_list(acl_out, rip) prefix_list_in = dict_search('distribute_list.prefix-list.in', rip) if prefix_list_in: verify_prefix_list(prefix_list_in, rip) prefix_list_out = dict_search('distribute_list.prefix_list.out', rip) if prefix_list_out: verify_prefix_list(prefix_list_out, rip) if 'interface' in rip: for interface, interface_options in rip['interface'].items(): if 'authentication' in interface_options: if {'md5', 'plaintext_password'} <= set(interface_options['authentication']): raise ConfigError('Can not use both md5 and plaintext-password at the same time!') if 'split_horizon' in interface_options: if {'disable', 'poison_reverse'} <= set(interface_options['split_horizon']): raise ConfigError(f'You can not have "split-horizon poison-reverse" enabled ' \ f'with "split-horizon disable" for "{interface}"!') -def generate(rip): - if not rip or 'deleted' in rip: - return None - - rip['new_frr_config'] = render_to_string('frr/ripd.frr.j2', rip) - return None +def generate(frr_dict): + frrender.generate(frr_dict) def apply(rip): - # Save original configuration prior to starting any commit actions - frr_cfg = frr.FRRConfig() - - # The route-map used for the FIB (zebra) is part of the mgmt daemon as of FRR 10.1 - frr_cfg.load_configuration(frr.mgmt_daemon) - frr_cfg.modify_section('^ip protocol rip route-map [-a-zA-Z0-9.]+', stop_pattern='(\s|!)') - frr_cfg.modify_section('^key chain \S+', stop_pattern='^exit', remove_stop_mark=True) - frr_cfg.modify_section('^router rip', stop_pattern='^exit', remove_stop_mark=True) - - for key in ['interface', 'interface_removed']: - if key not in rip: - continue - for interface in rip[key]: - frr_cfg.modify_section(f'^interface {interface}', stop_pattern='^exit', remove_stop_mark=True) - - if 'new_frr_config' in rip: - frr_cfg.add_before(frr.default_add_before, rip['new_frr_config']) - frr_cfg.commit_configuration() - + frrender.apply() return None if __name__ == '__main__': try: c = get_config() verify(c) generate(c) apply(c) except ConfigError as e: print(e) exit(1) diff --git a/src/conf_mode/protocols_ripng.py b/src/conf_mode/protocols_ripng.py index 9d4447d1f..327a0d239 100755 --- a/src/conf_mode/protocols_ripng.py +++ b/src/conf_mode/protocols_ripng.py @@ -1,115 +1,88 @@ #!/usr/bin/env python3 # # Copyright (C) 2021-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from sys import exit from vyos.config import Config -from vyos.configdict import dict_merge +from vyos.configdict import get_frrender_dict +from vyos.configverify import has_frr_protocol_in_dict from vyos.configverify import verify_common_route_maps from vyos.configverify import verify_access_list from vyos.configverify import verify_prefix_list +from vyos.frrender import FRRender from vyos.utils.dict import dict_search -from vyos.template import render_to_string from vyos import ConfigError -from vyos import frr from vyos import airbag airbag.enable() +frrender = FRRender() + def get_config(config=None): if config: conf = config else: conf = Config() - base = ['protocols', 'ripng'] - ripng = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True) - - # Bail out early if configuration tree does not exist - if not conf.exists(base): - return ripng - - # We have gathered the dict representation of the CLI, but there are default - # options which we need to update into the dictionary retrived. - ripng = conf.merge_defaults(ripng, recursive=True) - - # We also need some additional information from the config, prefix-lists - # and route-maps for instance. They will be used in verify(). - # - # XXX: one MUST always call this without the key_mangling() option! See - # vyos.configverify.verify_common_route_maps() for more information. - tmp = conf.get_config_dict(['policy']) - # Merge policy dict into "regular" config dict - ripng = dict_merge(tmp, ripng) - - return ripng - -def verify(ripng): - if not ripng: + + return get_frrender_dict(conf) + +def verify(config_dict): + if not has_frr_protocol_in_dict(config_dict, 'ripng'): return None + ripng = config_dict['ripng'] + ripng['policy'] = config_dict['policy'] + verify_common_route_maps(ripng) acl_in = dict_search('distribute_list.access_list.in', ripng) if acl_in: verify_access_list(acl_in, ripng, version='6') acl_out = dict_search('distribute_list.access_list.out', ripng) if acl_out: verify_access_list(acl_out, ripng, version='6') prefix_list_in = dict_search('distribute_list.prefix_list.in', ripng) if prefix_list_in: verify_prefix_list(prefix_list_in, ripng, version='6') prefix_list_out = dict_search('distribute_list.prefix_list.out', ripng) if prefix_list_out: verify_prefix_list(prefix_list_out, ripng, version='6') if 'interface' in ripng: for interface, interface_options in ripng['interface'].items(): if 'authentication' in interface_options: if {'md5', 'plaintext_password'} <= set(interface_options['authentication']): raise ConfigError('Can not use both md5 and plaintext-password at the same time!') if 'split_horizon' in interface_options: if {'disable', 'poison_reverse'} <= set(interface_options['split_horizon']): raise ConfigError(f'You can not have "split-horizon poison-reverse" enabled ' \ f'with "split-horizon disable" for "{interface}"!') -def generate(ripng): - if not ripng: - return None - ripng['new_frr_config'] = render_to_string('frr/ripngd.frr.j2', ripng) - -def apply(ripng): - # Save original configuration prior to starting any commit actions - frr_cfg = frr.FRRConfig() - - # The route-map used for the FIB (zebra) is part of the zebra daemon - frr_cfg.load_configuration(frr.mgmt_daemon) - frr_cfg.modify_section('^ipv6 protocol ripng route-map [-a-zA-Z0-9.]+', stop_pattern='(\s|!)') - frr_cfg.modify_section('key chain \S+', stop_pattern='^exit', remove_stop_mark=True) - frr_cfg.modify_section('interface \S+', stop_pattern='^exit', remove_stop_mark=True) - frr_cfg.modify_section('^router ripng', stop_pattern='^exit', remove_stop_mark=True) - if 'new_frr_config' in ripng: - frr_cfg.add_before(frr.default_add_before, ripng['new_frr_config']) - frr_cfg.commit_configuration() +def generate(config_dict): + frrender.generate(config_dict) + return None +def apply(config_dict): + frrender.apply() return None if __name__ == '__main__': try: c = get_config() verify(c) generate(c) apply(c) except ConfigError as e: print(e) exit(1) diff --git a/src/conf_mode/protocols_rpki.py b/src/conf_mode/protocols_rpki.py index bec0cda91..c75f95860 100755 --- a/src/conf_mode/protocols_rpki.py +++ b/src/conf_mode/protocols_rpki.py @@ -1,128 +1,113 @@ #!/usr/bin/env python3 # # Copyright (C) 2021-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import os from glob import glob from sys import exit from vyos.config import Config +from vyos.configdict import get_frrender_dict +from vyos.configverify import has_frr_protocol_in_dict +from vyos.frrender import FRRender from vyos.pki import wrap_openssh_public_key from vyos.pki import wrap_openssh_private_key -from vyos.template import render_to_string from vyos.utils.dict import dict_search_args from vyos.utils.file import write_file from vyos import ConfigError -from vyos import frr from vyos import airbag airbag.enable() +frrender = FRRender() rpki_ssh_key_base = '/run/frr/id_rpki' def get_config(config=None): if config: conf = config else: conf = Config() - base = ['protocols', 'rpki'] + return get_frrender_dict(conf) - rpki = conf.get_config_dict(base, key_mangling=('-', '_'), - get_first_key=True, with_pki=True) - # Bail out early if configuration tree does not exist - if not conf.exists(base): - rpki.update({'deleted' : ''}) - return rpki - - # We have gathered the dict representation of the CLI, but there are default - # options which we need to update into the dictionary retrived. - rpki = conf.merge_defaults(rpki, recursive=True) - - return rpki - -def verify(rpki): - if not rpki: +def verify(config_dict): + if not has_frr_protocol_in_dict(config_dict, 'rpki'): return None + rpki = config_dict['rpki'] + if 'cache' in rpki: preferences = [] for peer, peer_config in rpki['cache'].items(): for mandatory in ['port', 'preference']: if mandatory not in peer_config: raise ConfigError(f'RPKI cache "{peer}" {mandatory} must be defined!') if 'preference' in peer_config: preference = peer_config['preference'] if preference in preferences: raise ConfigError(f'RPKI cache with preference {preference} already configured!') preferences.append(preference) if 'ssh' in peer_config: if 'username' not in peer_config['ssh']: raise ConfigError('RPKI+SSH requires username to be defined!') if 'key' not in peer_config['ssh'] or 'openssh' not in rpki['pki']: raise ConfigError('RPKI+SSH requires key to be defined!') if peer_config['ssh']['key'] not in rpki['pki']['openssh']: raise ConfigError('RPKI+SSH key not found on PKI subsystem!') return None -def generate(rpki): +def generate(config_dict): for key in glob(f'{rpki_ssh_key_base}*'): os.unlink(key) - if not rpki: - return + if not has_frr_protocol_in_dict(config_dict, 'rpki'): + return None + + rpki = config_dict['rpki'] if 'cache' in rpki: for cache, cache_config in rpki['cache'].items(): if 'ssh' in cache_config: key_name = cache_config['ssh']['key'] public_key_data = dict_search_args(rpki['pki'], 'openssh', key_name, 'public', 'key') public_key_type = dict_search_args(rpki['pki'], 'openssh', key_name, 'public', 'type') private_key_data = dict_search_args(rpki['pki'], 'openssh', key_name, 'private', 'key') cache_config['ssh']['public_key_file'] = f'{rpki_ssh_key_base}_{cache}.pub' cache_config['ssh']['private_key_file'] = f'{rpki_ssh_key_base}_{cache}' write_file(cache_config['ssh']['public_key_file'], wrap_openssh_public_key(public_key_data, public_key_type)) write_file(cache_config['ssh']['private_key_file'], wrap_openssh_private_key(private_key_data)) - rpki['new_frr_config'] = render_to_string('frr/rpki.frr.j2', rpki) - + frrender.generate(config_dict) return None -def apply(rpki): - # Save original configuration prior to starting any commit actions - frr_cfg = frr.FRRConfig() - frr_cfg.load_configuration(frr.bgp_daemon) - frr_cfg.modify_section('^rpki', stop_pattern='^exit', remove_stop_mark=True) - if 'new_frr_config' in rpki: - frr_cfg.add_before(frr.default_add_before, rpki['new_frr_config']) - - frr_cfg.commit_configuration(frr.bgp_daemon) +def apply(config_dict): + frrender.apply() return None if __name__ == '__main__': try: c = get_config() verify(c) generate(c) apply(c) except ConfigError as e: print(e) exit(1) diff --git a/src/conf_mode/protocols_segment-routing.py b/src/conf_mode/protocols_segment-routing.py index 67f8005ef..0fad968e1 100755 --- a/src/conf_mode/protocols_segment-routing.py +++ b/src/conf_mode/protocols_segment-routing.py @@ -1,114 +1,102 @@ #!/usr/bin/env python3 # # Copyright (C) 2023-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from sys import exit from vyos.config import Config -from vyos.configdict import node_changed -from vyos.template import render_to_string +from vyos.configdict import get_frrender_dict +from vyos.configdict import list_diff +from vyos.configverify import has_frr_protocol_in_dict +from vyos.frrender import FRRender +from vyos.ifconfig import Section from vyos.utils.dict import dict_search from vyos.utils.system import sysctl_write from vyos import ConfigError -from vyos import frr from vyos import airbag airbag.enable() +frrender = FRRender() def get_config(config=None): if config: conf = config else: conf = Config() - base = ['protocols', 'segment-routing'] - sr = conf.get_config_dict(base, key_mangling=('-', '_'), - get_first_key=True, - no_tag_node_value_mangle=True, - with_recursive_defaults=True) + return get_frrender_dict(conf) - # FRR has VRF support for different routing daemons. As interfaces belong - # to VRFs - or the global VRF, we need to check for changed interfaces so - # that they will be properly rendered for the FRR config. Also this eases - # removal of interfaces from the running configuration. - interfaces_removed = node_changed(conf, base + ['interface']) - if interfaces_removed: - sr['interface_removed'] = list(interfaces_removed) +def verify(config_dict): + if not has_frr_protocol_in_dict(config_dict, 'segment_routing'): + return None - import pprint - pprint.pprint(sr) - return sr + sr = config_dict['segment_routing'] -def verify(sr): if 'srv6' in sr: srv6_enable = False if 'interface' in sr: for interface, interface_config in sr['interface'].items(): if 'srv6' in interface_config: srv6_enable = True break if not srv6_enable: raise ConfigError('SRv6 should be enabled on at least one interface!') return None -def generate(sr): - if not sr: +def generate(config_dict): + frrender.generate(config_dict) + return None + +def apply(config_dict): + if not has_frr_protocol_in_dict(config_dict, 'segment_routing'): return None - sr['new_frr_config'] = render_to_string('frr/zebra.segment_routing.frr.j2', sr) - return None + sr = config_dict['segment_routing'] -def apply(sr): - if 'interface_removed' in sr: - for interface in sr['interface_removed']: - # Disable processing of IPv6-SR packets - sysctl_write(f'net.ipv6.conf.{interface}.seg6_enabled', '0') + current_interfaces = Section.interfaces() + sr_interfaces = list(sr.get('interface', {}).keys()) - if 'interface' in sr: - for interface, interface_config in sr['interface'].items(): - # Accept or drop SR-enabled IPv6 packets on this interface - if 'srv6' in interface_config: - sysctl_write(f'net.ipv6.conf.{interface}.seg6_enabled', '1') - # Define HMAC policy for ingress SR-enabled packets on this interface - # It's a redundant check as HMAC has a default value - but better safe - # then sorry - tmp = dict_search('srv6.hmac', interface_config) - if tmp == 'accept': - sysctl_write(f'net.ipv6.conf.{interface}.seg6_require_hmac', '0') - elif tmp == 'drop': - sysctl_write(f'net.ipv6.conf.{interface}.seg6_require_hmac', '1') - elif tmp == 'ignore': - sysctl_write(f'net.ipv6.conf.{interface}.seg6_require_hmac', '-1') - else: - sysctl_write(f'net.ipv6.conf.{interface}.seg6_enabled', '0') + for interface in list_diff(current_interfaces, sr_interfaces): + # Disable processing of IPv6-SR packets + sysctl_write(f'net.ipv6.conf.{interface}.seg6_enabled', '0') - # Save original configuration prior to starting any commit actions - frr_cfg = frr.FRRConfig() - frr_cfg.load_configuration(frr.zebra_daemon) - frr_cfg.modify_section(r'^segment-routing') - if 'new_frr_config' in sr: - frr_cfg.add_before(frr.default_add_before, sr['new_frr_config']) - frr_cfg.commit_configuration(frr.zebra_daemon) + for interface, interface_config in sr.get('interface', {}).items(): + # Accept or drop SR-enabled IPv6 packets on this interface + if 'srv6' in interface_config: + sysctl_write(f'net.ipv6.conf.{interface}.seg6_enabled', '1') + # Define HMAC policy for ingress SR-enabled packets on this interface + # It's a redundant check as HMAC has a default value - but better safe + # then sorry + tmp = dict_search('srv6.hmac', interface_config) + if tmp == 'accept': + sysctl_write(f'net.ipv6.conf.{interface}.seg6_require_hmac', '0') + elif tmp == 'drop': + sysctl_write(f'net.ipv6.conf.{interface}.seg6_require_hmac', '1') + elif tmp == 'ignore': + sysctl_write(f'net.ipv6.conf.{interface}.seg6_require_hmac', '-1') + else: + sysctl_write(f'net.ipv6.conf.{interface}.seg6_enabled', '0') + frrender.apply() return None if __name__ == '__main__': try: c = get_config() verify(c) generate(c) apply(c) except ConfigError as e: print(e) exit(1) diff --git a/src/conf_mode/protocols_static.py b/src/conf_mode/protocols_static.py index c5dc77fd2..1e498b256 100755 --- a/src/conf_mode/protocols_static.py +++ b/src/conf_mode/protocols_static.py @@ -1,130 +1,110 @@ #!/usr/bin/env python3 # # Copyright (C) 2021-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. +from ipaddress import IPv4Network from sys import exit from sys import argv from vyos.config import Config -from vyos.configdict import dict_merge -from vyos.configdict import get_dhcp_interfaces -from vyos.configdict import get_pppoe_interfaces +from vyos.configdict import get_frrender_dict +from vyos.configverify import has_frr_protocol_in_dict from vyos.configverify import verify_common_route_maps from vyos.configverify import verify_vrf +from vyos.frrender import FRRender from vyos.template import render -from vyos.template import render_to_string from vyos import ConfigError -from vyos import frr from vyos import airbag airbag.enable() +frrender = FRRender() + +vrf = None +if len(argv) > 1: + vrf = argv[1] config_file = '/etc/iproute2/rt_tables.d/vyos-static.conf' def get_config(config=None): if config: conf = config else: conf = Config() - vrf = None - if len(argv) > 1: - vrf = argv[1] + return get_frrender_dict(conf) + +def verify(config_dict): + global vrf + if not has_frr_protocol_in_dict(config_dict, 'static', vrf): + return None - base_path = ['protocols', 'static'] # eqivalent of the C foo ? 'a' : 'b' statement - base = vrf and ['vrf', 'name', vrf, 'protocols', 'static'] or base_path - static = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True, no_tag_node_value_mangle=True) - - # Assign the name of our VRF context - if vrf: static['vrf'] = vrf - - # We also need some additional information from the config, prefix-lists - # and route-maps for instance. They will be used in verify(). - # - # XXX: one MUST always call this without the key_mangling() option! See - # vyos.configverify.verify_common_route_maps() for more information. - tmp = conf.get_config_dict(['policy']) - # Merge policy dict into "regular" config dict - static = dict_merge(tmp, static) - - # T3680 - get a list of all interfaces currently configured to use DHCP - tmp = get_dhcp_interfaces(conf, vrf) - if tmp: static.update({'dhcp' : tmp}) - tmp = get_pppoe_interfaces(conf, vrf) - if tmp: static.update({'pppoe' : tmp}) - - return static - -def verify(static): + static = vrf and config_dict['vrf']['name'][vrf]['protocols']['static'] or config_dict['static'] + static['policy'] = config_dict['policy'] + verify_common_route_maps(static) for route in ['route', 'route6']: # if there is no route(6) key in the dictionary we can immediately # bail out early if route not in static: continue # When leaking routes to other VRFs we must ensure that the destination # VRF exists for prefix, prefix_options in static[route].items(): # both the interface and next-hop CLI node can have a VRF subnode, # thus we check this using a for loop for type in ['interface', 'next_hop']: if type in prefix_options: for interface, interface_config in prefix_options[type].items(): verify_vrf(interface_config) if {'blackhole', 'reject'} <= set(prefix_options): raise ConfigError(f'Can not use both blackhole and reject for '\ f'prefix "{prefix}"!') + if 'multicast' in static and 'route' in static['multicast']: + for prefix, prefix_options in static['multicast']['route'].items(): + if not IPv4Network(prefix).is_multicast: + raise ConfigError(f'{prefix} is not a multicast network!') + return None -def generate(static): - if not static: +def generate(config_dict): + global vrf + if not has_frr_protocol_in_dict(config_dict, 'static', vrf): return None + # eqivalent of the C foo ? 'a' : 'b' statement + static = vrf and config_dict['vrf']['name'][vrf]['protocols']['static'] or config_dict['static'] + # Put routing table names in /etc/iproute2/rt_tables render(config_file, 'iproute2/static.conf.j2', static) - static['new_frr_config'] = render_to_string('frr/staticd.frr.j2', static) + frrender.generate(config_dict) return None def apply(static): - # Save original configuration prior to starting any commit actions - frr_cfg = frr.FRRConfig() - frr_cfg.load_configuration(frr.mgmt_daemon) - - if 'vrf' in static: - vrf = static['vrf'] - frr_cfg.modify_section(f'^vrf {vrf}', stop_pattern='^exit-vrf', remove_stop_mark=True) - else: - frr_cfg.modify_section(r'^ip route .*') - frr_cfg.modify_section(r'^ipv6 route .*') - - if 'new_frr_config' in static: - frr_cfg.add_before(frr.default_add_before, static['new_frr_config']) - frr_cfg.commit_configuration() - + frrender.apply() return None if __name__ == '__main__': try: c = get_config() verify(c) generate(c) apply(c) except ConfigError as e: print(e) exit(1) diff --git a/src/conf_mode/protocols_static_multicast.py b/src/conf_mode/protocols_static_multicast.py deleted file mode 100755 index 4393f3ed3..000000000 --- a/src/conf_mode/protocols_static_multicast.py +++ /dev/null @@ -1,133 +0,0 @@ -#!/usr/bin/env python3 -# -# Copyright (C) 2020-2024 VyOS maintainers and contributors -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License version 2 or later as -# published by the Free Software Foundation. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see <http://www.gnu.org/licenses/>. - - -from ipaddress import IPv4Address -from sys import exit - -from vyos import ConfigError -from vyos import frr -from vyos.config import Config -from vyos.template import render_to_string - -from vyos import airbag -airbag.enable() - -config_file = r'/tmp/static_mcast.frr' - -# Get configuration for static multicast route -def get_config(config=None): - if config: - conf = config - else: - conf = Config() - mroute = { - 'old_mroute' : {}, - 'mroute' : {} - } - - base_path = "protocols static multicast" - - if not (conf.exists(base_path) or conf.exists_effective(base_path)): - return None - - conf.set_level(base_path) - - # Get multicast effective routes - for route in conf.list_effective_nodes('route'): - mroute['old_mroute'][route] = {} - for next_hop in conf.list_effective_nodes('route {0} next-hop'.format(route)): - mroute['old_mroute'][route].update({ - next_hop : conf.return_value('route {0} next-hop {1} distance'.format(route, next_hop)) - }) - - # Get multicast effective interface-routes - for route in conf.list_effective_nodes('interface-route'): - if not route in mroute['old_mroute']: - mroute['old_mroute'][route] = {} - for next_hop in conf.list_effective_nodes('interface-route {0} next-hop-interface'.format(route)): - mroute['old_mroute'][route].update({ - next_hop : conf.return_value('interface-route {0} next-hop-interface {1} distance'.format(route, next_hop)) - }) - - # Get multicast routes - for route in conf.list_nodes('route'): - mroute['mroute'][route] = {} - for next_hop in conf.list_nodes('route {0} next-hop'.format(route)): - mroute['mroute'][route].update({ - next_hop : conf.return_value('route {0} next-hop {1} distance'.format(route, next_hop)) - }) - - # Get multicast interface-routes - for route in conf.list_nodes('interface-route'): - if not route in mroute['mroute']: - mroute['mroute'][route] = {} - for next_hop in conf.list_nodes('interface-route {0} next-hop-interface'.format(route)): - mroute['mroute'][route].update({ - next_hop : conf.return_value('interface-route {0} next-hop-interface {1} distance'.format(route, next_hop)) - }) - - return mroute - -def verify(mroute): - if mroute is None: - return None - - for mcast_route in mroute['mroute']: - route = mcast_route.split('/') - if IPv4Address(route[0]) < IPv4Address('224.0.0.0'): - raise ConfigError(f'{mcast_route} not a multicast network') - - -def generate(mroute): - if mroute is None: - return None - - mroute['new_frr_config'] = render_to_string('frr/static_mcast.frr.j2', mroute) - return None - - -def apply(mroute): - if mroute is None: - return None - - frr_cfg = frr.FRRConfig() - frr_cfg.load_configuration(frr.mgmt_daemon) - - if 'old_mroute' in mroute: - for route_gr in mroute['old_mroute']: - for nh in mroute['old_mroute'][route_gr]: - if mroute['old_mroute'][route_gr][nh]: - frr_cfg.modify_section(f'^ip mroute {route_gr} {nh} {mroute["old_mroute"][route_gr][nh]}') - else: - frr_cfg.modify_section(f'^ip mroute {route_gr} {nh}') - - if 'new_frr_config' in mroute: - frr_cfg.add_before(frr.default_add_before, mroute['new_frr_config']) - - frr_cfg.commit_configuration() - return None - - -if __name__ == '__main__': - try: - c = get_config() - verify(c) - generate(c) - apply(c) - except ConfigError as e: - print(e) - exit(1) diff --git a/src/conf_mode/service_snmp.py b/src/conf_mode/service_snmp.py index 134662f85..1174b1238 100755 --- a/src/conf_mode/service_snmp.py +++ b/src/conf_mode/service_snmp.py @@ -1,284 +1,273 @@ #!/usr/bin/env python3 # # Copyright (C) 2018-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import os from sys import exit from vyos.base import Warning from vyos.config import Config from vyos.configdict import dict_merge from vyos.configverify import verify_vrf from vyos.snmpv3_hashgen import plaintext_to_md5 from vyos.snmpv3_hashgen import plaintext_to_sha1 from vyos.snmpv3_hashgen import random from vyos.template import render from vyos.utils.configfs import delete_cli_node from vyos.utils.configfs import add_cli_node from vyos.utils.dict import dict_search from vyos.utils.network import is_addr_assigned from vyos.utils.process import call from vyos.utils.permission import chmod_755 from vyos.version import get_version_data from vyos import ConfigError -from vyos import frr from vyos import airbag airbag.enable() config_file_client = r'/etc/snmp/snmp.conf' config_file_daemon = r'/etc/snmp/snmpd.conf' config_file_access = r'/usr/share/snmp/snmpd.conf' config_file_user = r'/var/lib/snmp/snmpd.conf' default_script_dir = r'/config/user-data/' systemd_override = r'/run/systemd/system/snmpd.service.d/override.conf' systemd_service = 'snmpd.service' def get_config(config=None): if config: conf = config else: conf = Config() base = ['service', 'snmp'] snmp = conf.get_config_dict(base, key_mangling=('-', '_'), get_first_key=True, no_tag_node_value_mangle=True) if not conf.exists(base): snmp.update({'deleted' : ''}) if conf.exists(['service', 'lldp', 'snmp']): snmp.update({'lldp_snmp' : ''}) if 'deleted' in snmp: return snmp version_data = get_version_data() snmp['version'] = version_data['version'] # create an internal snmpv3 user of the form 'vyosxxxxxxxxxxxxxxxx' snmp['vyos_user'] = 'vyos' + random(8) snmp['vyos_user_pass'] = random(16) # We have gathered the dict representation of the CLI, but there are default # options which we need to update into the dictionary retrived. snmp = conf.merge_defaults(snmp, recursive=True) if 'listen_address' in snmp: # Always listen on localhost if an explicit address has been configured # This is a safety measure to not end up with invalid listen addresses # that are not configured on this system. See https://vyos.dev/T850 if '127.0.0.1' not in snmp['listen_address']: tmp = {'127.0.0.1': {'port': '161'}} snmp['listen_address'] = dict_merge(tmp, snmp['listen_address']) if '::1' not in snmp['listen_address']: tmp = {'::1': {'port': '161'}} snmp['listen_address'] = dict_merge(tmp, snmp['listen_address']) if 'script_extensions' in snmp and 'extension_name' in snmp['script_extensions']: for key, val in snmp['script_extensions']['extension_name'].items(): if 'script' not in val: continue script_path = val['script'] # if script has not absolute path, use pre configured path if not os.path.isabs(script_path): script_path = os.path.join(default_script_dir, script_path) snmp['script_extensions']['extension_name'][key]['script'] = script_path return snmp def verify(snmp): if 'deleted' in snmp: return None if {'deleted', 'lldp_snmp'} <= set(snmp): raise ConfigError('Can not delete SNMP service, as LLDP still uses SNMP!') ### check if the configured script actually exist if 'script_extensions' in snmp and 'extension_name' in snmp['script_extensions']: for extension, extension_opt in snmp['script_extensions']['extension_name'].items(): if 'script' not in extension_opt: raise ConfigError(f'Script extension "{extension}" requires an actual script to be configured!') tmp = extension_opt['script'] if not os.path.isfile(tmp): Warning(f'script "{tmp}" does not exist!') else: chmod_755(extension_opt['script']) if 'listen_address' in snmp: for address in snmp['listen_address']: # We only wan't to configure addresses that exist on the system. # Hint the user if they don't exist if 'vrf' in snmp: vrf_name = snmp['vrf'] if not is_addr_assigned(address, vrf_name) and address not in ['::1','127.0.0.1']: raise ConfigError(f'SNMP listen address "{address}" not configured in vrf "{vrf_name}"!') elif not is_addr_assigned(address): raise ConfigError(f'SNMP listen address "{address}" not configured in default vrf!') if 'trap_target' in snmp: for trap, trap_config in snmp['trap_target'].items(): if 'community' not in trap_config: raise ConfigError(f'Trap target "{trap}" requires a community to be set!') if 'oid_enable' in snmp: Warning(f'Custom OIDs are enabled and may lead to system instability and high resource consumption') verify_vrf(snmp) # bail out early if SNMP v3 is not configured if 'v3' not in snmp: return None if 'user' in snmp['v3']: for user, user_config in snmp['v3']['user'].items(): if 'group' not in user_config: raise ConfigError(f'Group membership required for user "{user}"!') if 'plaintext_password' not in user_config['auth'] and 'encrypted_password' not in user_config['auth']: raise ConfigError(f'Must specify authentication encrypted-password or plaintext-password for user "{user}"!') if 'plaintext_password' not in user_config['privacy'] and 'encrypted_password' not in user_config['privacy']: raise ConfigError(f'Must specify privacy encrypted-password or plaintext-password for user "{user}"!') if 'group' in snmp['v3']: for group, group_config in snmp['v3']['group'].items(): if 'seclevel' not in group_config: raise ConfigError(f'Must configure "seclevel" for group "{group}"!') if 'view' not in group_config: raise ConfigError(f'Must configure "view" for group "{group}"!') # Check if 'view' exists view = group_config['view'] if 'view' not in snmp['v3'] or view not in snmp['v3']['view']: raise ConfigError(f'You must create view "{view}" first!') if 'view' in snmp['v3']: for view, view_config in snmp['v3']['view'].items(): if 'oid' not in view_config: raise ConfigError(f'Must configure an "oid" for view "{view}"!') if 'trap_target' in snmp['v3']: for trap, trap_config in snmp['v3']['trap_target'].items(): if 'plaintext_password' not in trap_config['auth'] and 'encrypted_password' not in trap_config['auth']: raise ConfigError(f'Must specify one of authentication encrypted-password or plaintext-password for trap "{trap}"!') if {'plaintext_password', 'encrypted_password'} <= set(trap_config['auth']): raise ConfigError(f'Can not specify both authentication encrypted-password and plaintext-password for trap "{trap}"!') if 'plaintext_password' not in trap_config['privacy'] and 'encrypted_password' not in trap_config['privacy']: raise ConfigError(f'Must specify one of privacy encrypted-password or plaintext-password for trap "{trap}"!') if {'plaintext_password', 'encrypted_password'} <= set(trap_config['privacy']): raise ConfigError(f'Can not specify both privacy encrypted-password and plaintext-password for trap "{trap}"!') if 'type' not in trap_config: raise ConfigError('SNMP v3 trap "type" must be specified!') return None def generate(snmp): # As we are manipulating the snmpd user database we have to stop it first! # This is even save if service is going to be removed call(f'systemctl stop {systemd_service}') # Clean config files config_files = [config_file_client, config_file_daemon, config_file_access, config_file_user, systemd_override] for file in config_files: if os.path.isfile(file): os.unlink(file) if 'deleted' in snmp: return None if 'v3' in snmp: # SNMPv3 uses a hashed password. If CLI defines a plaintext password, # we will hash it in the background and replace the CLI node! if 'user' in snmp['v3']: for user, user_config in snmp['v3']['user'].items(): if dict_search('auth.type', user_config) == 'sha': hash = plaintext_to_sha1 else: hash = plaintext_to_md5 if dict_search('auth.plaintext_password', user_config) is not None: tmp = hash(dict_search('auth.plaintext_password', user_config), dict_search('v3.engineid', snmp)) snmp['v3']['user'][user]['auth']['encrypted_password'] = tmp del snmp['v3']['user'][user]['auth']['plaintext_password'] cli_base = ['service', 'snmp', 'v3', 'user', user, 'auth'] delete_cli_node(cli_base + ['plaintext-password']) add_cli_node(cli_base + ['encrypted-password'], value=tmp) if dict_search('privacy.plaintext_password', user_config) is not None: tmp = hash(dict_search('privacy.plaintext_password', user_config), dict_search('v3.engineid', snmp)) snmp['v3']['user'][user]['privacy']['encrypted_password'] = tmp del snmp['v3']['user'][user]['privacy']['plaintext_password'] cli_base = ['service', 'snmp', 'v3', 'user', user, 'privacy'] delete_cli_node(cli_base + ['plaintext-password']) add_cli_node(cli_base + ['encrypted-password'], value=tmp) # Write client config file render(config_file_client, 'snmp/etc.snmp.conf.j2', snmp) # Write server config file render(config_file_daemon, 'snmp/etc.snmpd.conf.j2', snmp) # Write access rights config file render(config_file_access, 'snmp/usr.snmpd.conf.j2', snmp) # Write access rights config file render(config_file_user, 'snmp/var.snmpd.conf.j2', snmp) # Write daemon configuration file render(systemd_override, 'snmp/override.conf.j2', snmp) return None def apply(snmp): # Always reload systemd manager configuration call('systemctl daemon-reload') if 'deleted' in snmp: return None # start SNMP daemon call(f'systemctl reload-or-restart {systemd_service}') - - # Enable AgentX in FRR - # This should be done for each daemon individually because common command - # works only if all the daemons started with SNMP support - # Following daemons from FRR 9.0/stable have SNMP module compiled in VyOS - frr_daemons_list = [frr.zebra_daemon, frr.bgp_daemon, frr.ospf_daemon, frr.ospf6_daemon, - frr.rip_daemon, frr.isis_daemon, frr.ldpd_daemon] - for frr_daemon in frr_daemons_list: - call(f'vtysh -c "configure terminal" -d {frr_daemon} -c "agentx" >/dev/null') - return None if __name__ == '__main__': try: c = get_config() verify(c) generate(c) apply(c) except ConfigError as e: print(e) exit(1) diff --git a/src/conf_mode/vrf.py b/src/conf_mode/vrf.py index 3d3845fdf..a13bb8b1e 100755 --- a/src/conf_mode/vrf.py +++ b/src/conf_mode/vrf.py @@ -1,368 +1,369 @@ #!/usr/bin/env python3 # # Copyright (C) 2020-2024 VyOS maintainers and contributors # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 or later as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from sys import exit from jmespath import search from json import loads from vyos.config import Config +from vyos.configdict import get_frrender_dict from vyos.configdict import dict_merge from vyos.configdict import node_changed from vyos.configverify import verify_route_map from vyos.firewall import conntrack_required +from vyos.frrender import FRRender from vyos.ifconfig import Interface from vyos.template import render -from vyos.template import render_to_string from vyos.utils.dict import dict_search from vyos.utils.network import get_vrf_tableid from vyos.utils.network import get_vrf_members from vyos.utils.network import interface_exists from vyos.utils.process import call from vyos.utils.process import cmd from vyos.utils.process import popen from vyos.utils.system import sysctl_write from vyos import ConfigError -from vyos import frr from vyos import airbag airbag.enable() +frrender = FRRender() config_file = '/etc/iproute2/rt_tables.d/vyos-vrf.conf' k_mod = ['vrf'] nftables_table = 'inet vrf_zones' nftables_rules = { 'vrf_zones_ct_in': 'counter ct original zone set iifname map @ct_iface_map', 'vrf_zones_ct_out': 'counter ct original zone set oifname map @ct_iface_map' } def has_rule(af : str, priority : int, table : str=None): """ Check if a given ip rule exists $ ip --json -4 rule show [{'l3mdev': None, 'priority': 1000, 'src': 'all'}, {'action': 'unreachable', 'l3mdev': None, 'priority': 2000, 'src': 'all'}, {'priority': 32765, 'src': 'all', 'table': 'local'}, {'priority': 32766, 'src': 'all', 'table': 'main'}, {'priority': 32767, 'src': 'all', 'table': 'default'}] """ if af not in ['-4', '-6']: raise ValueError() command = f'ip --detail --json {af} rule show' for tmp in loads(cmd(command)): if 'priority' in tmp and 'table' in tmp: if tmp['priority'] == priority and tmp['table'] == table: return True elif 'priority' in tmp and table in tmp: # l3mdev table has a different layout if tmp['priority'] == priority: return True return False def is_nft_vrf_zone_rule_setup() -> bool: """ Check if an nftables connection tracking rule already exists """ tmp = loads(cmd('sudo nft -j list table inet vrf_zones')) num_rules = len(search("nftables[].rule[].chain", tmp)) return bool(num_rules) def vrf_interfaces(c, match): matched = [] old_level = c.get_level() c.set_level(['interfaces']) section = c.get_config_dict([], get_first_key=True) for type in section: interfaces = section[type] for name in interfaces: interface = interfaces[name] if 'vrf' in interface: v = interface.get('vrf', '') if v == match: matched.append(name) c.set_level(old_level) return matched def vrf_routing(c, match): matched = [] old_level = c.get_level() c.set_level(['protocols', 'vrf']) if match in c.list_nodes([]): matched.append(match) c.set_level(old_level) return matched def get_config(config=None): if config: conf = config else: conf = Config() base = ['vrf'] vrf = conf.get_config_dict(base, key_mangling=('-', '_'), no_tag_node_value_mangle=True, get_first_key=True) # determine which VRF has been removed for name in node_changed(conf, base + ['name']): if 'vrf_remove' not in vrf: vrf.update({'vrf_remove' : {}}) vrf['vrf_remove'][name] = {} # get VRF bound interfaces interfaces = vrf_interfaces(conf, name) if interfaces: vrf['vrf_remove'][name]['interface'] = interfaces # get VRF bound routing instances routes = vrf_routing(conf, name) if routes: vrf['vrf_remove'][name]['route'] = routes if 'name' in vrf: vrf['conntrack'] = conntrack_required(conf) + # We need to merge the FRR rendering dict into the VRF dict + # this is required to get the route-map information to FRR + vrf.update({'frrender' : get_frrender_dict(conf)}) + # We also need the route-map information from the config # # XXX: one MUST always call this without the key_mangling() option! See # vyos.configverify.verify_common_route_maps() for more information. tmp = {'policy' : {'route-map' : conf.get_config_dict(['policy', 'route-map'], get_first_key=True)}} # Merge policy dict into "regular" config dict vrf = dict_merge(tmp, vrf) return vrf def verify(vrf): # ensure VRF is not assigned to any interface if 'vrf_remove' in vrf: for name, config in vrf['vrf_remove'].items(): if 'interface' in config: raise ConfigError(f'Can not remove VRF "{name}", it still has '\ f'member interfaces!') if 'route' in config: raise ConfigError(f'Can not remove VRF "{name}", it still has '\ f'static routes installed!') if 'name' in vrf: reserved_names = ["add", "all", "broadcast", "default", "delete", "dev", "get", "inet", "mtu", "link", "type", "vrf"] table_ids = [] vnis = [] for name, vrf_config in vrf['name'].items(): # Reserved VRF names if name in reserved_names: raise ConfigError(f'VRF name "{name}" is reserved and connot be used!') # table id is mandatory if 'table' not in vrf_config: raise ConfigError(f'VRF "{name}" table id is mandatory!') # routing table id can't be changed - OS restriction if interface_exists(name): tmp = get_vrf_tableid(name) if tmp and tmp != int(vrf_config['table']): raise ConfigError(f'VRF "{name}" table id modification not possible!') # VRF routing table ID must be unique on the system if 'table' in vrf_config and vrf_config['table'] in table_ids: raise ConfigError(f'VRF "{name}" table id is not unique!') table_ids.append(vrf_config['table']) # VRF VNIs must be unique on the system if 'vni' in vrf_config: vni = vrf_config['vni'] if vni in vnis: raise ConfigError(f'VRF "{name}" VNI "{vni}" is not unique!') vnis.append(vni) tmp = dict_search('ip.protocol', vrf_config) if tmp != None: for protocol, protocol_options in tmp.items(): if 'route_map' in protocol_options: verify_route_map(protocol_options['route_map'], vrf) tmp = dict_search('ipv6.protocol', vrf_config) if tmp != None: for protocol, protocol_options in tmp.items(): if 'route_map' in protocol_options: verify_route_map(protocol_options['route_map'], vrf) return None def generate(vrf): # Render iproute2 VR helper names render(config_file, 'iproute2/vrf.conf.j2', vrf) - # Render VRF Kernel/Zebra route-map filters - vrf['frr_zebra_config'] = render_to_string('frr/zebra.vrf.route-map.frr.j2', vrf) + + if 'frrender' in vrf: + frrender.generate(vrf['frrender']) return None def apply(vrf): # Documentation # # - https://github.com/torvalds/linux/blob/master/Documentation/networking/vrf.txt # - https://github.com/Mellanox/mlxsw/wiki/Virtual-Routing-and-Forwarding-(VRF) # - https://github.com/Mellanox/mlxsw/wiki/L3-Tunneling # - https://netdevconf.info/1.1/proceedings/slides/ahern-vrf-tutorial.pdf # - https://netdevconf.info/1.2/slides/oct6/02_ahern_what_is_l3mdev_slides.pdf # set the default VRF global behaviour bind_all = '0' if 'bind_to_all' in vrf: bind_all = '1' sysctl_write('net.ipv4.tcp_l3mdev_accept', bind_all) sysctl_write('net.ipv4.udp_l3mdev_accept', bind_all) for tmp in (dict_search('vrf_remove', vrf) or []): if interface_exists(tmp): # T5492: deleting a VRF instance may leafe processes running # (e.g. dhclient) as there is a depedency ordering issue in the CLI. # We need to ensure that we stop the dhclient processes first so # a proper DHCLP RELEASE message is sent for interface in get_vrf_members(tmp): vrf_iface = Interface(interface) vrf_iface.set_dhcp(False) vrf_iface.set_dhcpv6(False) # Remove nftables conntrack zone map item nft_del_element = f'delete element inet vrf_zones ct_iface_map {{ "{tmp}" }}' # Check if deleting is possible first to avoid raising errors _, err = popen(f'nft --check {nft_del_element}') if not err: # Remove map element cmd(f'nft {nft_del_element}') # Delete the VRF Kernel interface call(f'ip link delete dev {tmp}') if 'name' in vrf: # Linux routing uses rules to find tables - routing targets are then # looked up in those tables. If the lookup got a matching route, the # process ends. # # TL;DR; first table with a matching entry wins! # # You can see your routing table lookup rules using "ip rule", sadly the # local lookup is hit before any VRF lookup. Pinging an addresses from the # VRF will usually find a hit in the local table, and never reach the VRF # routing table - this is usually not what you want. Thus we will # re-arrange the tables and move the local lookup further down once VRFs # are enabled. # # Thanks to https://stbuehler.de/blog/article/2020/02/29/using_vrf__virtual_routing_and_forwarding__on_linux.html for afi in ['-4', '-6']: # move lookup local to pref 32765 (from 0) if not has_rule(afi, 32765, 'local'): call(f'ip {afi} rule add pref 32765 table local') if has_rule(afi, 0, 'local'): call(f'ip {afi} rule del pref 0') # make sure that in VRFs after failed lookup in the VRF specific table # nothing else is reached if not has_rule(afi, 1000, 'l3mdev'): # this should be added by the kernel when a VRF is created # add it here for completeness call(f'ip {afi} rule add pref 1000 l3mdev protocol kernel') # add another rule with an unreachable target which only triggers in VRF context # if a route could not be reached if not has_rule(afi, 2000, 'l3mdev'): call(f'ip {afi} rule add pref 2000 l3mdev unreachable') nft_vrf_zone_rule_setup = False for name, config in vrf['name'].items(): table = config['table'] if not interface_exists(name): # For each VRF apart from your default context create a VRF # interface with a separate routing table call(f'ip link add {name} type vrf table {table}') # set VRF description for e.g. SNMP monitoring vrf_if = Interface(name) # We also should add proper loopback IP addresses to the newly added # VRF for services bound to the loopback address (SNMP, NTP) vrf_if.add_addr('127.0.0.1/8') vrf_if.add_addr('::1/128') # add VRF description if available vrf_if.set_alias(config.get('description', '')) # Enable/Disable IPv4 forwarding tmp = dict_search('ip.disable_forwarding', config) value = '0' if (tmp != None) else '1' vrf_if.set_ipv4_forwarding(value) # Enable/Disable IPv6 forwarding tmp = dict_search('ipv6.disable_forwarding', config) value = '0' if (tmp != None) else '1' vrf_if.set_ipv6_forwarding(value) # Enable/Disable of an interface must always be done at the end of the # derived class to make use of the ref-counting set_admin_state() # function. We will only enable the interface if 'up' was called as # often as 'down'. This is required by some interface implementations # as certain parameters can only be changed when the interface is # in admin-down state. This ensures the link does not flap during # reconfiguration. state = 'down' if 'disable' in config else 'up' vrf_if.set_admin_state(state) # Add nftables conntrack zone map item nft_add_element = f'add element inet vrf_zones ct_iface_map {{ "{name}" : {table} }}' cmd(f'nft {nft_add_element}') # Only call into nftables as long as there is nothing setup to avoid wasting # CPU time and thus lenghten the commit process if not nft_vrf_zone_rule_setup: nft_vrf_zone_rule_setup = is_nft_vrf_zone_rule_setup() # Install nftables conntrack rules only once if vrf['conntrack'] and not nft_vrf_zone_rule_setup: for chain, rule in nftables_rules.items(): cmd(f'nft add rule inet vrf_zones {chain} {rule}') if 'name' not in vrf or not vrf['conntrack']: for chain, rule in nftables_rules.items(): cmd(f'nft flush chain inet vrf_zones {chain}') # Return default ip rule values if 'name' not in vrf: for afi in ['-4', '-6']: # move lookup local to pref 0 (from 32765) if not has_rule(afi, 0, 'local'): call(f'ip {afi} rule add pref 0 from all lookup local') if has_rule(afi, 32765, 'local'): call(f'ip {afi} rule del pref 32765 table local') if has_rule(afi, 1000, 'l3mdev'): call(f'ip {afi} rule del pref 1000 l3mdev protocol kernel') if has_rule(afi, 2000, 'l3mdev'): call(f'ip {afi} rule del pref 2000 l3mdev unreachable') - # Save original configuration prior to starting any commit actions - frr_cfg = frr.FRRConfig() - frr_cfg.load_configuration(frr.mgmt_daemon) - frr_cfg.modify_section(f'^vrf .+', stop_pattern='^exit-vrf', remove_stop_mark=True) - if 'frr_zebra_config' in vrf: - frr_cfg.add_before(frr.default_add_before, vrf['frr_zebra_config']) - frr_cfg.commit_configuration() + if 'frrender' in vrf: + frrender.apply() return None if __name__ == '__main__': try: c = get_config() verify(c) generate(c) apply(c) except ConfigError as e: print(e) exit(1)